diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index b02970751..7ddfd5c7b 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -193,70 +193,86 @@ type EBPFMetrics struct { DisableAlerts []EBPFAgentAlert `json:"disableAlerts"` } -// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering -type EBPFFlowFilter struct { - // Set `enable` to `true` to enable eBPF flow filtering feature. - Enable *bool `json:"enable,omitempty"` - - // CIDR defines the IP CIDR to filter flows by. - // Example: 10.10.10.0/24 or 100:100:100:100::/64 +// `EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. +type EBPFFlowFilterRule struct { + // `cidr` defines the IP CIDR to filter flows by. + // Examples: `10.10.10.0/24` or `100:100:100:100::/64` CIDR string `json:"cidr,omitempty"` - // Action defines the action to perform on the flows that match the filter. + // `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`. // +kubebuilder:validation:Enum:="Accept";"Reject" Action string `json:"action,omitempty"` - // Protocol defines the protocol to filter flows by. + // `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`. // +kubebuilder:validation:Enum:="TCP";"UDP";"ICMP";"ICMPv6";"SCTP" // +optional Protocol string `json:"protocol,omitempty"` - // Direction defines the direction to filter flows by. + // `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`. // +kubebuilder:validation:Enum:="Ingress";"Egress" // +optional Direction string `json:"direction,omitempty"` - // `tcpFlags` defines the TCP flags to filter flows by. + // `tcpFlags` optionally defines TCP flags to filter flows by. + // In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. // +kubebuilder:validation:Enum:="SYN";"SYN-ACK";"ACK";"FIN";"RST";"URG";"ECE";"CWR";"FIN-ACK";"RST-ACK" // +optional TCPFlags string `json:"tcpFlags,omitempty"` - // SourcePorts defines the source ports to filter flows by. - // To filter a single port, set a single port as an integer value. For example, sourcePorts: 80. - // To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100". + // `sourcePorts` optionally defines the source ports to filter flows by. + // To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + // To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. // To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. // +optional SourcePorts intstr.IntOrString `json:"sourcePorts,omitempty"` - // DestPorts defines the destination ports to filter flows by. - // To filter a single port, set a single port as an integer value. For example, destPorts: 80. - // To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100". + // `destPorts` optionally defines the destination ports to filter flows by. + // To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + // To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. // To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. // +optional DestPorts intstr.IntOrString `json:"destPorts,omitempty"` - // Ports defines the ports to filter flows by. it can be user for either source or destination ports. - // To filter a single port, set a single port as an integer value. For example, ports: 80. - // To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100". + // `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + // To filter a single port, set a single port as an integer value. For example, `ports: 80`. + // To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. // To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. Ports intstr.IntOrString `json:"ports,omitempty"` - // PeerIP defines the IP address to filter flows by. - // Example: 10.10.10.10 + // `peerIP` optionally defines the remote IP address to filter flows by. + // Example: `10.10.10.10`. // +optional PeerIP string `json:"peerIP,omitempty"` - // ICMPType defines the ICMP type to filter flows by. + // `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by. // +optional - ICMPType *int `json:"icmpType,omitempty"` + ICMPCode *int `json:"icmpCode,omitempty"` - // ICMPCode defines the ICMP code to filter flows by. + // `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by. // +optional - ICMPCode *int `json:"icmpCode,omitempty"` + ICMPType *int `json:"icmpType,omitempty"` - // `pktDrops`, to filter flows with packet drops + // `pktDrops` optionally filters only flows containing packet drops. // +optional PktDrops *bool `json:"pktDrops,omitempty"` + + // `sampling` sampling rate for the matched flow + // +optional + Sampling *uint32 `json:"sampling,omitempty"` +} + +// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering. +type EBPFFlowFilter struct { + // Set `enable` to `true` to enable the eBPF flow filtering feature. + Enable *bool `json:"enable,omitempty"` + + // [deprecated (*)] this setting is not used anymore. + EBPFFlowFilterRule `json:",inline"` + + // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:MaxItems:=16 + FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go index 7f681eac3..784d3a19e 100644 --- a/apis/flowcollector/v1beta1/zz_generated.conversion.go +++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go @@ -88,6 +88,16 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddGeneratedConversionFunc((*EBPFFlowFilterRule)(nil), (*v1beta2.EBPFFlowFilterRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(a.(*EBPFFlowFilterRule), b.(*v1beta2.EBPFFlowFilterRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.EBPFFlowFilterRule)(nil), (*EBPFFlowFilterRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(a.(*v1beta2.EBPFFlowFilterRule), b.(*EBPFFlowFilterRule), scope) + }); err != nil { + return err + } if err := s.AddGeneratedConversionFunc((*EBPFMetrics)(nil), (*v1beta2.EBPFMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EBPFMetrics_To_v1beta2_EBPFMetrics(a.(*EBPFMetrics), b.(*v1beta2.EBPFMetrics), scope) }); err != nil { @@ -517,6 +527,33 @@ func Convert_v1beta2_ConsolePluginPortConfig_To_v1beta1_ConsolePluginPortConfig( func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error { out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { + return err + } + out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + return nil +} + +// Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter is an autogenerated conversion function. +func Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error { + return autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in, out, s) +} + +func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error { + out.Enable = (*bool)(unsafe.Pointer(in.Enable)) + if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { + return err + } + out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + return nil +} + +// Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter is an autogenerated conversion function. +func Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error { + return autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in, out, s) +} + +func autoConvert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in *EBPFFlowFilterRule, out *v1beta2.EBPFFlowFilterRule, s conversion.Scope) error { out.CIDR = in.CIDR out.Action = in.Action out.Protocol = in.Protocol @@ -526,19 +563,19 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi out.DestPorts = in.DestPorts out.Ports = in.Ports out.PeerIP = in.PeerIP - out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType)) out.ICMPCode = (*int)(unsafe.Pointer(in.ICMPCode)) + out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType)) out.PktDrops = (*bool)(unsafe.Pointer(in.PktDrops)) + out.Sampling = (*uint32)(unsafe.Pointer(in.Sampling)) return nil } -// Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter is an autogenerated conversion function. -func Convert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFilter, out *v1beta2.EBPFFlowFilter, s conversion.Scope) error { - return autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in, out, s) +// Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule is an autogenerated conversion function. +func Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in *EBPFFlowFilterRule, out *v1beta2.EBPFFlowFilterRule, s conversion.Scope) error { + return autoConvert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(in, out, s) } -func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error { - out.Enable = (*bool)(unsafe.Pointer(in.Enable)) +func autoConvert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in *v1beta2.EBPFFlowFilterRule, out *EBPFFlowFilterRule, s conversion.Scope) error { out.CIDR = in.CIDR out.Action = in.Action out.Protocol = in.Protocol @@ -551,12 +588,13 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB out.ICMPCode = (*int)(unsafe.Pointer(in.ICMPCode)) out.ICMPType = (*int)(unsafe.Pointer(in.ICMPType)) out.PktDrops = (*bool)(unsafe.Pointer(in.PktDrops)) + out.Sampling = (*uint32)(unsafe.Pointer(in.Sampling)) return nil } -// Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter is an autogenerated conversion function. -func Convert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EBPFFlowFilter, out *EBPFFlowFilter, s conversion.Scope) error { - return autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in, out, s) +// Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule is an autogenerated conversion function. +func Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in *v1beta2.EBPFFlowFilterRule, out *EBPFFlowFilterRule, s conversion.Scope) error { + return autoConvert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(in, out, s) } func autoConvert_v1beta1_EBPFMetrics_To_v1beta2_EBPFMetrics(in *EBPFMetrics, out *v1beta2.EBPFMetrics, s conversion.Scope) error { diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go index b89109301..e79aca3d5 100644 --- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go @@ -130,16 +130,39 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { *out = new(bool) **out = **in } + in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) + if in.FlowFilterRules != nil { + in, out := &in.FlowFilterRules, &out.FlowFilterRules + *out = make([]EBPFFlowFilterRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter. +func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter { + if in == nil { + return nil + } + out := new(EBPFFlowFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBPFFlowFilterRule) DeepCopyInto(out *EBPFFlowFilterRule) { + *out = *in out.SourcePorts = in.SourcePorts out.DestPorts = in.DestPorts out.Ports = in.Ports - if in.ICMPType != nil { - in, out := &in.ICMPType, &out.ICMPType + if in.ICMPCode != nil { + in, out := &in.ICMPCode, &out.ICMPCode *out = new(int) **out = **in } - if in.ICMPCode != nil { - in, out := &in.ICMPCode, &out.ICMPCode + if in.ICMPType != nil { + in, out := &in.ICMPType, &out.ICMPType *out = new(int) **out = **in } @@ -148,14 +171,19 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { *out = new(bool) **out = **in } + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(uint32) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter. -func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilterRule. +func (in *EBPFFlowFilterRule) DeepCopy() *EBPFFlowFilterRule { if in == nil { return nil } - out := new(EBPFFlowFilter) + out := new(EBPFFlowFilterRule) in.DeepCopyInto(out) return out } diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index 32f100a17..02057da06 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -216,11 +216,8 @@ type EBPFMetrics struct { DisableAlerts []EBPFAgentAlert `json:"disableAlerts"` } -// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering. -type EBPFFlowFilter struct { - // Set `enable` to `true` to enable the eBPF flow filtering feature. - Enable *bool `json:"enable,omitempty"` - +// `EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. +type EBPFFlowFilterRule struct { // `cidr` defines the IP CIDR to filter flows by. // Examples: `10.10.10.0/24` or `100:100:100:100::/64` CIDR string `json:"cidr,omitempty"` @@ -281,6 +278,24 @@ type EBPFFlowFilter struct { // `pktDrops` optionally filters only flows containing packet drops. // +optional PktDrops *bool `json:"pktDrops,omitempty"` + + // `sampling` sampling rate for the matched flow + // +optional + Sampling *uint32 `json:"sampling,omitempty"` +} + +// `EBPFFlowFilter` defines the desired eBPF agent configuration regarding flow filtering. +type EBPFFlowFilter struct { + // Set `enable` to `true` to enable the eBPF flow filtering feature. + Enable *bool `json:"enable,omitempty"` + + // [deprecated (*)] this setting is not used anymore. + EBPFFlowFilterRule `json:",inline"` + + // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:MaxItems:=16 + FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go index fb79a02aa..a2b6f301f 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "net" "slices" "strconv" "strings" @@ -106,32 +107,91 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollector) (adm } var errs []error if fc.Spec.Agent.EBPF.FlowFilter != nil && fc.Spec.Agent.EBPF.FlowFilter.Enable != nil && *fc.Spec.Agent.EBPF.FlowFilter.Enable { - hasPorts := fc.Spec.Agent.EBPF.FlowFilter.Ports.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.Ports.StrVal != "" - if hasPorts { - if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.Ports); err != nil { - errs = append(errs, err) - } + for i := range fc.Spec.Agent.EBPF.FlowFilter.FlowFilterRules { + errs = append(errs, validateFilter(&fc.Spec.Agent.EBPF.FlowFilter.FlowFilterRules[i])...) } - hasSrcPorts := fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.SourcePorts.StrVal != "" - if hasSrcPorts { - if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.SourcePorts); err != nil { - errs = append(errs, err) - } + errs = append(errs, validateFilter(fc.Spec.Agent.EBPF.FlowFilter)...) + } + + return warnings, errs +} + +type filter interface { + getCIDR() string + getPorts() intstr.IntOrString + getSrcPorts() intstr.IntOrString + getDstPorts() intstr.IntOrString +} + +func (f *EBPFFlowFilter) getCIDR() string { + return f.CIDR +} + +func (f *EBPFFlowFilter) getPorts() intstr.IntOrString { + return f.Ports +} + +func (f *EBPFFlowFilter) getSrcPorts() intstr.IntOrString { + return f.SourcePorts +} + +func (f *EBPFFlowFilter) getDstPorts() intstr.IntOrString { + return f.DestPorts +} + +func (f *EBPFFlowFilterRule) getCIDR() string { + return f.CIDR +} + +func (f *EBPFFlowFilterRule) getPorts() intstr.IntOrString { + return f.Ports +} + +func (f *EBPFFlowFilterRule) getSrcPorts() intstr.IntOrString { + return f.SourcePorts +} + +func (f *EBPFFlowFilterRule) getDstPorts() intstr.IntOrString { + return f.DestPorts +} + +func validateFilter[T filter](f T) []error { + var errs []error + + cidr := f.getCIDR() + if cidr != "" { + if _, _, err := net.ParseCIDR(cidr); err != nil { + errs = append(errs, err) } - hasDstPorts := fc.Spec.Agent.EBPF.FlowFilter.DestPorts.IntVal > 0 || fc.Spec.Agent.EBPF.FlowFilter.DestPorts.StrVal != "" - if hasDstPorts { - if err := validateFilterPortConfig(fc.Spec.Agent.EBPF.FlowFilter.DestPorts); err != nil { - errs = append(errs, err) - } + } + ports := f.getPorts() + hasPorts := ports.IntVal > 0 || ports.StrVal != "" + if hasPorts { + if err := validateFilterPortConfig(ports); err != nil { + errs = append(errs, err) } - if hasPorts && hasSrcPorts { - errs = append(errs, errors.New("cannot configure agent filter with ports and sourcePorts, they are mutually exclusive")) + } + srcPorts := f.getSrcPorts() + hasSrcPorts := srcPorts.IntVal > 0 || srcPorts.StrVal != "" + if hasSrcPorts { + if err := validateFilterPortConfig(srcPorts); err != nil { + errs = append(errs, err) } - if hasPorts && hasDstPorts { - errs = append(errs, errors.New("cannot configure agent filter with ports and destPorts, they are mutually exclusive")) + } + dstPorts := f.getDstPorts() + hasDstPorts := dstPorts.IntVal > 0 || dstPorts.StrVal != "" + if hasDstPorts { + if err := validateFilterPortConfig(dstPorts); err != nil { + errs = append(errs, err) } } - return warnings, errs + if hasPorts && hasSrcPorts { + errs = append(errs, errors.New("cannot configure agent filter with ports and sourcePorts, they are mutually exclusive")) + } + if hasPorts && hasDstPorts { + errs = append(errs, errors.New("cannot configure agent filter with ports and destPorts, they are mutually exclusive")) + } + return errs } func validateFilterPortConfig(value intstr.IntOrString) error { diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go index 055df760b..e4314f618 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go @@ -43,10 +43,14 @@ func TestValidateAgent(t *testing.T) { Privileged: true, Sampling: ptr.To(int32(100)), FlowFilter: &EBPFFlowFilter{ - Enable: ptr.To(true), - Action: "Accept", - CIDR: "0.0.0.0/0", - Direction: "Egress", + Enable: ptr.To(true), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Action: "Accept", + CIDR: "0.0.0.0/0", + Direction: "Egress", + }, + }, }, }, }, @@ -118,11 +122,15 @@ func TestValidateAgent(t *testing.T) { Type: AgentEBPF, EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ - Enable: ptr.To(true), - Action: "Accept", - CIDR: "0.0.0.0/0", - Ports: intstr.FromInt(80), - SourcePorts: intstr.FromInt(443), + Enable: ptr.To(true), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Action: "Accept", + CIDR: "0.0.0.0/0", + Ports: intstr.FromInt(80), + SourcePorts: intstr.FromInt(443), + }, + }, }, }, }, @@ -142,7 +150,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("abcd"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("abcd"), + }, + }, }, }, }, @@ -162,7 +174,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("80-255"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("80-255"), + }, + }, }, }, }, @@ -181,7 +197,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("255-80"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("255-80"), + }, + }, }, }, }, @@ -201,7 +221,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("80-?"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("80-?"), + }, + }, }, }, }, @@ -221,7 +245,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("255,80"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("255,80"), + }, + }, }, }, }, @@ -240,7 +268,11 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - Ports: intstr.FromString("80,100,250"), + FlowFilterRules: []EBPFFlowFilterRule{ + { + Ports: intstr.FromString("80,100,250"), + }, + }, }, }, }, @@ -248,6 +280,30 @@ func TestValidateAgent(t *testing.T) { }, expectedError: "expected two integers", }, + { + name: "FlowFilter expect invalid CIDR", + fc: &FlowCollector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: FlowCollectorSpec{ + Agent: FlowCollectorAgent{ + Type: AgentEBPF, + EBPF: FlowCollectorEBPF{ + FlowFilter: &EBPFFlowFilter{ + Enable: ptr.To(true), + FlowFilterRules: []EBPFFlowFilterRule{ + { + CIDR: "1.1.1.1", + }, + }, + }, + }, + }, + }, + }, + expectedError: "invalid CIDR", + }, } CurrentClusterInfo = &cluster.Info{} diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go index fd6634a9b..6461b82b2 100644 --- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go @@ -289,6 +289,29 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { *out = new(bool) **out = **in } + in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) + if in.FlowFilterRules != nil { + in, out := &in.FlowFilterRules, &out.FlowFilterRules + *out = make([]EBPFFlowFilterRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter. +func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter { + if in == nil { + return nil + } + out := new(EBPFFlowFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBPFFlowFilterRule) DeepCopyInto(out *EBPFFlowFilterRule) { + *out = *in out.SourcePorts = in.SourcePorts out.DestPorts = in.DestPorts out.Ports = in.Ports @@ -307,14 +330,19 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { *out = new(bool) **out = **in } + if in.Sampling != nil { + in, out := &in.Sampling, &out.Sampling + *out = new(uint32) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilter. -func (in *EBPFFlowFilter) DeepCopy() *EBPFFlowFilter { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBPFFlowFilterRule. +func (in *EBPFFlowFilterRule) DeepCopy() *EBPFFlowFilterRule { if in == nil { return nil } - out := new(EBPFFlowFilter) + out := new(EBPFFlowFilterRule) in.DeepCopyInto(out) return out } diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index d883e8fe2..7f6791517 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -160,68 +160,72 @@ spec: regarding flow filtering' properties: action: - description: Action defines the action to perform on the - flows that match the filter. + description: '`action` defines the action to perform on + the flows that match the filter. The available options + are `Accept`, which is the default, and `Reject`.' enum: - Accept - Reject type: string cidr: description: |- - CIDR defines the IP CIDR to filter flows by. - Example: 10.10.10.0/24 or 100:100:100:100::/64 + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` type: string destPorts: anyOf: - type: integer - type: string description: |- - DestPorts defines the destination ports to filter flows by. - To filter a single port, set a single port as an integer value. For example, destPorts: 80. - To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100". + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true direction: - description: Direction defines the direction to filter - flows by. + description: '`direction` optionally defines a direction + to filter flows by. The available options are `Ingress` + and `Egress`.' enum: - Ingress - Egress type: string enable: - description: Set `enable` to `true` to enable eBPF flow - filtering feature. + description: Set `enable` to `true` to enable the eBPF + flow filtering feature. type: boolean icmpCode: - description: ICMPCode defines the ICMP code to filter - flows by. + description: '`icmpCode`, for Internet Control Message + Protocol (ICMP) traffic, optionally defines the ICMP + code to filter flows by.' type: integer icmpType: - description: ICMPType defines the ICMP type to filter - flows by. + description: '`icmpType`, for ICMP traffic, optionally + defines the ICMP type to filter flows by.' type: integer peerIP: description: |- - PeerIP defines the IP address to filter flows by. - Example: 10.10.10.10 + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. type: string pktDrops: - description: '`pktDrops`, to filter flows with packet - drops' + description: '`pktDrops` optionally filters only flows + containing packet drops.' type: boolean ports: anyOf: - type: integer - type: string description: |- - Ports defines the ports to filter flows by. it can be user for either source or destination ports. - To filter a single port, set a single port as an integer value. For example, ports: 80. - To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100". + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true protocol: - description: Protocol defines the protocol to filter flows - by. + description: '`protocol` optionally defines a protocol + to filter flows by. The available options are `TCP`, + `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' enum: - TCP - UDP @@ -229,19 +233,139 @@ spec: - ICMPv6 - SCTP type: string + rules: + description: '`flowFilterRules` defines a list of ebpf + agent flow filtering rules' + items: + description: '`EBPFFlowFilterRule` defines the desired + eBPF agent configuration regarding flow filtering + rule.' + properties: + action: + description: '`action` defines the action to perform + on the flows that match the filter. The available + options are `Accept`, which is the default, and + `Reject`.' + enum: + - Accept + - Reject + type: string + cidr: + description: |- + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` + type: string + destPorts: + anyOf: + - type: integer + - type: string + description: |- + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + direction: + description: '`direction` optionally defines a direction + to filter flows by. The available options are + `Ingress` and `Egress`.' + enum: + - Ingress + - Egress + type: string + icmpCode: + description: '`icmpCode`, for Internet Control Message + Protocol (ICMP) traffic, optionally defines the + ICMP code to filter flows by.' + type: integer + icmpType: + description: '`icmpType`, for ICMP traffic, optionally + defines the ICMP type to filter flows by.' + type: integer + peerIP: + description: |- + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. + type: string + pktDrops: + description: '`pktDrops` optionally filters only + flows containing packet drops.' + type: boolean + ports: + anyOf: + - type: integer + - type: string + description: |- + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + protocol: + description: '`protocol` optionally defines a protocol + to filter flows by. The available options are + `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' + enum: + - TCP + - UDP + - ICMP + - ICMPv6 + - SCTP + type: string + sampling: + description: '`sampling` sampling rate for the matched + flow' + format: int32 + type: integer + sourcePorts: + anyOf: + - type: integer + - type: string + description: |- + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + tcpFlags: + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + enum: + - SYN + - SYN-ACK + - ACK + - FIN + - RST + - URG + - ECE + - CWR + - FIN-ACK + - RST-ACK + type: string + type: object + maxItems: 16 + minItems: 1 + type: array + sampling: + description: '`sampling` sampling rate for the matched + flow' + format: int32 + type: integer sourcePorts: anyOf: - type: integer - type: string description: |- - SourcePorts defines the source ports to filter flows by. - To filter a single port, set a single port as an integer value. For example, sourcePorts: 80. - To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100". + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true tcpFlags: - description: '`tcpFlags` defines the TCP flags to filter - flows by.' + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. enum: - SYN - SYN-ACK @@ -3921,6 +4045,125 @@ spec: - ICMPv6 - SCTP type: string + rules: + description: '`flowFilterRules` defines a list of ebpf + agent flow filtering rules' + items: + description: '`EBPFFlowFilterRule` defines the desired + eBPF agent configuration regarding flow filtering + rule.' + properties: + action: + description: '`action` defines the action to perform + on the flows that match the filter. The available + options are `Accept`, which is the default, and + `Reject`.' + enum: + - Accept + - Reject + type: string + cidr: + description: |- + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` + type: string + destPorts: + anyOf: + - type: integer + - type: string + description: |- + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + direction: + description: '`direction` optionally defines a direction + to filter flows by. The available options are + `Ingress` and `Egress`.' + enum: + - Ingress + - Egress + type: string + icmpCode: + description: '`icmpCode`, for Internet Control Message + Protocol (ICMP) traffic, optionally defines the + ICMP code to filter flows by.' + type: integer + icmpType: + description: '`icmpType`, for ICMP traffic, optionally + defines the ICMP type to filter flows by.' + type: integer + peerIP: + description: |- + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. + type: string + pktDrops: + description: '`pktDrops` optionally filters only + flows containing packet drops.' + type: boolean + ports: + anyOf: + - type: integer + - type: string + description: |- + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + protocol: + description: '`protocol` optionally defines a protocol + to filter flows by. The available options are + `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' + enum: + - TCP + - UDP + - ICMP + - ICMPv6 + - SCTP + type: string + sampling: + description: '`sampling` sampling rate for the matched + flow' + format: int32 + type: integer + sourcePorts: + anyOf: + - type: integer + - type: string + description: |- + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + tcpFlags: + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + enum: + - SYN + - SYN-ACK + - ACK + - FIN + - RST + - URG + - ECE + - CWR + - FIN-ACK + - RST-ACK + type: string + type: object + maxItems: 16 + minItems: 1 + type: array + sampling: + description: '`sampling` sampling rate for the matched + flow' + format: int32 + type: integer sourcePorts: anyOf: - type: integer diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 45e839976..6e348b6f8 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -144,61 +144,61 @@ spec: description: '`flowFilter` defines the eBPF agent configuration regarding flow filtering' properties: action: - description: Action defines the action to perform on the flows that match the filter. + description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.' enum: - Accept - Reject type: string cidr: description: |- - CIDR defines the IP CIDR to filter flows by. - Example: 10.10.10.0/24 or 100:100:100:100::/64 + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` type: string destPorts: anyOf: - type: integer - type: string description: |- - DestPorts defines the destination ports to filter flows by. - To filter a single port, set a single port as an integer value. For example, destPorts: 80. - To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100". + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true direction: - description: Direction defines the direction to filter flows by. + description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.' enum: - Ingress - Egress type: string enable: - description: Set `enable` to `true` to enable eBPF flow filtering feature. + description: Set `enable` to `true` to enable the eBPF flow filtering feature. type: boolean icmpCode: - description: ICMPCode defines the ICMP code to filter flows by. + description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.' type: integer icmpType: - description: ICMPType defines the ICMP type to filter flows by. + description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.' type: integer peerIP: description: |- - PeerIP defines the IP address to filter flows by. - Example: 10.10.10.10 + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. type: string pktDrops: - description: '`pktDrops`, to filter flows with packet drops' + description: '`pktDrops` optionally filters only flows containing packet drops.' type: boolean ports: anyOf: - type: integer - type: string description: |- - Ports defines the ports to filter flows by. it can be user for either source or destination ports. - To filter a single port, set a single port as an integer value. For example, ports: 80. - To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100". + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true protocol: - description: Protocol defines the protocol to filter flows by. + description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' enum: - TCP - UDP @@ -206,18 +206,123 @@ spec: - ICMPv6 - SCTP type: string + rules: + description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + items: + description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' + properties: + action: + description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.' + enum: + - Accept + - Reject + type: string + cidr: + description: |- + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` + type: string + destPorts: + anyOf: + - type: integer + - type: string + description: |- + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + direction: + description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.' + enum: + - Ingress + - Egress + type: string + icmpCode: + description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.' + type: integer + icmpType: + description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.' + type: integer + peerIP: + description: |- + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. + type: string + pktDrops: + description: '`pktDrops` optionally filters only flows containing packet drops.' + type: boolean + ports: + anyOf: + - type: integer + - type: string + description: |- + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + protocol: + description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' + enum: + - TCP + - UDP + - ICMP + - ICMPv6 + - SCTP + type: string + sampling: + description: '`sampling` sampling rate for the matched flow' + format: int32 + type: integer + sourcePorts: + anyOf: + - type: integer + - type: string + description: |- + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + tcpFlags: + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + enum: + - SYN + - SYN-ACK + - ACK + - FIN + - RST + - URG + - ECE + - CWR + - FIN-ACK + - RST-ACK + type: string + type: object + maxItems: 16 + minItems: 1 + type: array + sampling: + description: '`sampling` sampling rate for the matched flow' + format: int32 + type: integer sourcePorts: anyOf: - type: integer - type: string description: |- - SourcePorts defines the source ports to filter flows by. - To filter a single port, set a single port as an integer value. For example, sourcePorts: 80. - To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100". + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. x-kubernetes-int-or-string: true tcpFlags: - description: '`tcpFlags` defines the TCP flags to filter flows by.' + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. enum: - SYN - SYN-ACK @@ -3610,6 +3715,109 @@ spec: - ICMPv6 - SCTP type: string + rules: + description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + items: + description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' + properties: + action: + description: '`action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.' + enum: + - Accept + - Reject + type: string + cidr: + description: |- + `cidr` defines the IP CIDR to filter flows by. + Examples: `10.10.10.0/24` or `100:100:100:100::/64` + type: string + destPorts: + anyOf: + - type: integer + - type: string + description: |- + `destPorts` optionally defines the destination ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + direction: + description: '`direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.' + enum: + - Ingress + - Egress + type: string + icmpCode: + description: '`icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.' + type: integer + icmpType: + description: '`icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.' + type: integer + peerIP: + description: |- + `peerIP` optionally defines the remote IP address to filter flows by. + Example: `10.10.10.10`. + type: string + pktDrops: + description: '`pktDrops` optionally filters only flows containing packet drops.' + type: boolean + ports: + anyOf: + - type: integer + - type: string + description: |- + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. + To filter a single port, set a single port as an integer value. For example, `ports: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + protocol: + description: '`protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.' + enum: + - TCP + - UDP + - ICMP + - ICMPv6 + - SCTP + type: string + sampling: + description: '`sampling` sampling rate for the matched flow' + format: int32 + type: integer + sourcePorts: + anyOf: + - type: integer + - type: string + description: |- + `sourcePorts` optionally defines the source ports to filter flows by. + To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. + To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. + To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + x-kubernetes-int-or-string: true + tcpFlags: + description: |- + `tcpFlags` optionally defines TCP flags to filter flows by. + In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + enum: + - SYN + - SYN-ACK + - ACK + - FIN + - RST + - URG + - ECE + - CWR + - FIN-ACK + - RST-ACK + type: string + type: object + maxItems: 16 + minItems: 1 + type: array + sampling: + description: '`sampling` sampling rate for the matched flow' + format: int32 + type: integer sourcePorts: anyOf: - type: integer diff --git a/config/samples/flows_v1beta2_flowcollector.yaml b/config/samples/flows_v1beta2_flowcollector.yaml index 07358df69..31df2805f 100644 --- a/config/samples/flows_v1beta2_flowcollector.yaml +++ b/config/samples/flows_v1beta2_flowcollector.yaml @@ -29,9 +29,25 @@ spec: excludeInterfaces: ["lo"] kafkaBatchSize: 1048576 #flowFilter: + # rules: + # - action: Accept + # cidr: 10.128.0.1/24 + # ports: 6443 + # protocol: TCP + # sampling: 10 + # - action: Accept + # cidr: 10.129.0.1/24 + # ports: 53 + # protocol: UDP + # sampling: 20 + # - action: Accept + # cidr: 172.30.0.0/16 + # protocol: TCP + # sampling: 30 + # sourcePorts: 443 # tcpFlags: "SYN" # action: Accept - # cidr: 0.0.0.0/0 + # cidr: 2.2.2.2/24 # protocol: TCP # sourcePorts: 53 # enable: true diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml index 71ad3231c..feed43ab1 100644 --- a/controllers/consoleplugin/config/static-frontend-config.yaml +++ b/controllers/consoleplugin/config/static-frontend-config.yaml @@ -346,6 +346,11 @@ columns: calculated: '[SrcK8S_HostIP,DstK8S_HostIP]' default: false width: 10 + - id: Sampling + name: Sampling + field: Sampling + default: false + width: 10 - id: K8S_HostName name: Node Name calculated: '[SrcK8S_HostName,DstK8S_HostName]' @@ -1239,6 +1244,9 @@ fields: - name: Dscp type: number description: Differentiated Services Code Point (DSCP) value + - name: Sampling + type: number + description: Sampling rate used for this flow - name: IcmpType type: number description: ICMP type diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index a6b511593..7c3ad17d9 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -2,10 +2,13 @@ package ebpf import ( "context" + "encoding/json" "fmt" "strconv" "strings" + "github.com/go-logr/logr" + ebpfconfig "github.com/netobserv/netobserv-ebpf-agent/pkg/agent" flowslatest "github.com/netobserv/network-observability-operator/apis/flowcollector/v1beta2" "github.com/netobserv/network-observability-operator/controllers/constants" "github.com/netobserv/network-observability-operator/controllers/ebpf/internal/permissions" @@ -14,7 +17,6 @@ import ( "github.com/netobserv/network-observability-operator/pkg/volumes" "github.com/netobserv/network-observability-operator/pkg/watchers" - "github.com/go-logr/logr" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -64,24 +66,7 @@ const ( envMetricsTLSCertPath = "METRICS_TLS_CERT_PATH" envMetricsTLSKeyPath = "METRICS_TLS_KEY_PATH" envEnableFlowFilter = "ENABLE_FLOW_FILTER" - envFilterIPCIDR = "FILTER_IP_CIDR" - envFilterAction = "FILTER_ACTION" - envFilterDirection = "FILTER_DIRECTION" - envFilterProtocol = "FILTER_PROTOCOL" - envFilterSourcePort = "FILTER_SOURCE_PORT" - envFilterDestPort = "FILTER_DESTINATION_PORT" - envFilterPort = "FILTER_PORT" - envFilterSourcePortRange = "FILTER_SOURCE_PORT_RANGE" - envFilterDestPortRange = "FILTER_DESTINATION_PORT_RANGE" - envFilterPortRange = "FILTER_PORT_RANGE" - envFilterSourcePorts = "FILTER_SOURCE_PORTS" - envFilterDestPorts = "FILTER_DESTINATION_PORTS" - envFilterPorts = "FILTER_PORTS" - envFilterICMPType = "FILTER_ICMP_TYPE" - envFilterICMPCode = "FILTER_ICMP_CODE" - envFilterPeerIPAddress = "FILTER_PEER_IP" - envFilterTCPFlags = "FILTER_TCP_FLAGS" - envFilterPktDrops = "FILTER_DROPS" + envFilterRules = "FLOW_FILTER_RULES" envEnablePacketTranslation = "ENABLE_PKT_TRANSLATION" envEnableEbpfMgr = "EBPF_PROGRAM_MANAGER_MODE" envListSeparator = "," @@ -517,109 +502,126 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"}) - - config = append(config, c.configureFlowFilter(coll.Spec.Agent.EBPF.FlowFilter, config)...) + if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 { + if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil { + config = append(config, filterRules...) + } + } else { + if filter := c.configureFlowFilter(coll.Spec.Agent.EBPF.FlowFilter); filter != nil { + config = append(config, filter...) + } + } } return config, nil } -// nolint:cyclop -func (c *AgentController) configureFlowFilter(filter *flowslatest.EBPFFlowFilter, config []corev1.EnvVar) []corev1.EnvVar { - if filter.CIDR != "" { - config = append(config, corev1.EnvVar{Name: envFilterIPCIDR, - Value: filter.CIDR, - }) +func mapFlowFilterRuleToFilter(rule *flowslatest.EBPFFlowFilterRule) ebpfconfig.FlowFilter { + f := ebpfconfig.FlowFilter{ + FilterIPCIDR: rule.CIDR, + FilterAction: rule.Action, + FilterDirection: rule.Direction, + FilterProtocol: rule.Protocol, } - if filter.Action != "" { - config = append(config, corev1.EnvVar{Name: envFilterAction, - Value: filter.Action, - }) + + if rule.ICMPType != nil && *rule.ICMPType != 0 { + f.FilterICMPType = *rule.ICMPType } - if filter.Direction != "" { - config = append(config, corev1.EnvVar{Name: envFilterDirection, - Value: filter.Direction, - }) + if rule.ICMPCode != nil && *rule.ICMPCode != 0 { + f.FilterICMPCode = *rule.ICMPCode } - if filter.Protocol != "" { - config = append(config, corev1.EnvVar{Name: envFilterProtocol, - Value: filter.Protocol, - }) + + processPorts(rule.SourcePorts, &f.FilterSourcePort, &f.FilterSourcePorts, &f.FilterSourcePortRange) + processPorts(rule.DestPorts, &f.FilterDestinationPort, &f.FilterDestinationPorts, &f.FilterDestinationPortRange) + processPorts(rule.Ports, &f.FilterPort, &f.FilterPorts, &f.FilterPortRange) + + if rule.PeerIP != "" { + f.FilterPeerIP = rule.PeerIP + } + if rule.TCPFlags != "" { + f.FilterTCPFlags = rule.TCPFlags } + if rule.PktDrops != nil && *rule.PktDrops { + f.FilterDrops = *rule.PktDrops + } + if rule.Sampling != nil && *rule.Sampling != 0 { + f.FilterSample = *rule.Sampling + } + + return f +} + +func mapFlowFilterToFilter(filter *flowslatest.EBPFFlowFilter) ebpfconfig.FlowFilter { + f := ebpfconfig.FlowFilter{ + FilterIPCIDR: filter.CIDR, + FilterAction: filter.Action, + FilterDirection: filter.Direction, + FilterProtocol: filter.Protocol, + } + if filter.ICMPType != nil && *filter.ICMPType != 0 { - config = append(config, corev1.EnvVar{Name: envFilterICMPType, - Value: strconv.Itoa(*filter.ICMPType), - }) + f.FilterICMPType = *filter.ICMPType } if filter.ICMPCode != nil && *filter.ICMPCode != 0 { - config = append(config, corev1.EnvVar{Name: envFilterICMPCode, - Value: strconv.Itoa(*filter.ICMPCode)}) + f.FilterICMPCode = *filter.ICMPCode } - if filter.SourcePorts.Type == intstr.String { - if strings.Contains(filter.SourcePorts.String(), "-") { - config = append(config, corev1.EnvVar{Name: envFilterSourcePortRange, - Value: filter.SourcePorts.String(), - }) - } - if strings.Contains(filter.SourcePorts.String(), ",") { - config = append(config, corev1.EnvVar{Name: envFilterSourcePorts, - Value: filter.SourcePorts.String(), - }) - } + + processPorts(filter.SourcePorts, &f.FilterSourcePort, &f.FilterSourcePorts, &f.FilterSourcePortRange) + processPorts(filter.DestPorts, &f.FilterDestinationPort, &f.FilterDestinationPorts, &f.FilterDestinationPortRange) + processPorts(filter.Ports, &f.FilterPort, &f.FilterPorts, &f.FilterPortRange) + + if filter.PeerIP != "" { + f.FilterPeerIP = filter.PeerIP } - if filter.SourcePorts.Type == intstr.Int { - config = append(config, corev1.EnvVar{Name: envFilterSourcePort, - Value: strconv.Itoa(filter.SourcePorts.IntValue()), - }) + if filter.TCPFlags != "" { + f.FilterTCPFlags = filter.TCPFlags } - if filter.DestPorts.Type == intstr.String { - if strings.Contains(filter.DestPorts.String(), "-") { - config = append(config, corev1.EnvVar{Name: envFilterDestPortRange, - Value: filter.DestPorts.String(), - }) - } - if strings.Contains(filter.DestPorts.String(), ",") { - config = append(config, corev1.EnvVar{Name: envFilterDestPorts, - Value: filter.DestPorts.String(), - }) - } + if filter.PktDrops != nil && *filter.PktDrops { + f.FilterDrops = *filter.PktDrops } - if filter.DestPorts.Type == intstr.Int { - config = append(config, corev1.EnvVar{Name: envFilterDestPort, - Value: strconv.Itoa(filter.DestPorts.IntValue()), - }) + if filter.Sampling != nil && *filter.Sampling != 0 { + f.FilterSample = *filter.Sampling } - if filter.Ports.Type == intstr.String { - if strings.Contains(filter.Ports.String(), "-") { - config = append(config, corev1.EnvVar{Name: envFilterPortRange, - Value: filter.Ports.String(), - }) + + return f +} + +func processPorts(ports intstr.IntOrString, single *int32, list *string, rangeField *string) { + if ports.Type == intstr.String { + portStr := ports.String() + if strings.Contains(portStr, "-") { + *rangeField = portStr } - if strings.Contains(filter.Ports.String(), ",") { - config = append(config, corev1.EnvVar{Name: envFilterPorts, - Value: filter.Ports.String(), - }) + if strings.Contains(portStr, ",") { + *list = portStr } + } else if ports.Type == intstr.Int { + *single = int32(ports.IntValue()) } - if filter.Ports.Type == intstr.Int { - config = append(config, corev1.EnvVar{Name: envFilterPort, - Value: strconv.Itoa(filter.Ports.IntValue()), - }) - } - if filter.PeerIP != "" { - config = append(config, corev1.EnvVar{Name: envFilterPeerIPAddress, - Value: filter.PeerIP}) +} + +func (c *AgentController) configureFlowFiltersRules(rules []flowslatest.EBPFFlowFilterRule) []corev1.EnvVar { + filters := make([]ebpfconfig.FlowFilter, 0) + for i := range rules { + filters = append(filters, mapFlowFilterRuleToFilter(&rules[i])) } - if filter.TCPFlags != "" { - config = append(config, corev1.EnvVar{Name: envFilterTCPFlags, - Value: filter.TCPFlags, - }) + + jsonData, err := json.Marshal(filters) + if err != nil { + return nil } - if filter.PktDrops != nil && *filter.PktDrops { - config = append(config, corev1.EnvVar{Name: envFilterPktDrops, Value: "true"}) + return []corev1.EnvVar{{Name: envFilterRules, Value: string(jsonData)}} +} + +func (c *AgentController) configureFlowFilter(filter *flowslatest.EBPFFlowFilter) []corev1.EnvVar { + f := mapFlowFilterToFilter(filter) + jsonData, err := json.Marshal([]ebpfconfig.FlowFilter{f}) + if err != nil { + return nil } - return config + + return []corev1.EnvVar{{Name: envFilterRules, Value: string(jsonData)}} } func (c *AgentController) securityContext(coll *flowslatest.FlowCollector) *corev1.SecurityContext { diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index ab9d68e6a..99f6db0f9 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -437,7 +437,7 @@ in edge debug or support scenarios.
action enum - Action defines the action to perform on the flows that match the filter.
+ `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.

Enum: Accept, Reject
@@ -446,17 +446,17 @@ in edge debug or support scenarios.
cidr string - CIDR defines the IP CIDR to filter flows by. -Example: 10.10.10.0/24 or 100:100:100:100::/64
+ `cidr` defines the IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64`
false destPorts int or string - DestPorts defines the destination ports to filter flows by. -To filter a single port, set a single port as an integer value. For example, destPorts: 80. -To filter a range of ports, use a "start-end" range in string format. For example, destPorts: "80-100". + `destPorts` optionally defines the destination ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
false @@ -464,7 +464,7 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: direction enum - Direction defines the direction to filter flows by.
+ `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.

Enum: Ingress, Egress
@@ -473,45 +473,45 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: enable boolean - Set `enable` to `true` to enable eBPF flow filtering feature.
+ Set `enable` to `true` to enable the eBPF flow filtering feature.
false icmpCode integer - ICMPCode defines the ICMP code to filter flows by.
+ `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
false icmpType integer - ICMPType defines the ICMP type to filter flows by.
+ `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
false peerIP string - PeerIP defines the IP address to filter flows by. -Example: 10.10.10.10
+ `peerIP` optionally defines the remote IP address to filter flows by. +Example: `10.10.10.10`.
false pktDrops boolean - `pktDrops`, to filter flows with packet drops
+ `pktDrops` optionally filters only flows containing packet drops.
false ports int or string - Ports defines the ports to filter flows by. it can be user for either source or destination ports. -To filter a single port, set a single port as an integer value. For example, ports: 80. -To filter a range of ports, use a "start-end" range in string format. For example, ports: "80-100". + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. +To filter a single port, set a single port as an integer value. For example, `ports: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
false @@ -519,18 +519,167 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: protocol enum - Protocol defines the protocol to filter flows by.
+ `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.

Enum: TCP, UDP, ICMP, ICMPv6, SCTP
false + + rules + []object + + `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ + false + + sampling + integer + + `sampling` sampling rate for the matched flow
+
+ Format: int32
+ + false + + sourcePorts + int or string + + `sourcePorts` optionally defines the source ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+ + false + + tcpFlags + enum + + `tcpFlags` optionally defines TCP flags to filter flows by. +In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+
+ Enum: SYN, SYN-ACK, ACK, FIN, RST, URG, ECE, CWR, FIN-ACK, RST-ACK
+ + false + + + + +### FlowCollector.spec.agent.ebpf.flowFilter.rules[index] +[↩ Parent](#flowcollectorspecagentebpfflowfilter) + + + +`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -538,7 +687,8 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: @@ -8100,6 +8250,155 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: Enum: TCP, UDP, ICMP, ICMPv6, SCTP
+ + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionRequired
actionenum + `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
+
+ Enum: Accept, Reject
+
false
cidrstring + `cidr` defines the IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+
false
destPortsint or string + `destPorts` optionally defines the destination ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
false
directionenum + `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
+
+ Enum: Ingress, Egress
+
false
icmpCodeinteger + `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
+
false
icmpTypeinteger + `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
+
false
peerIPstring + `peerIP` optionally defines the remote IP address to filter flows by. +Example: `10.10.10.10`.
+
false
pktDropsboolean + `pktDrops` optionally filters only flows containing packet drops.
+
false
portsint or string + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. +To filter a single port, set a single port as an integer value. For example, `ports: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
false
protocolenum + `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
+
+ Enum: TCP, UDP, ICMP, ICMPv6, SCTP
+
false
samplinginteger + `sampling` sampling rate for the matched flow
+
+ Format: int32
+
false
sourcePorts int or string - SourcePorts defines the source ports to filter flows by. -To filter a single port, set a single port as an integer value. For example, sourcePorts: 80. -To filter a range of ports, use a "start-end" range in string format. For example, sourcePorts: "80-100". + `sourcePorts` optionally defines the source ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
falsetcpFlags enum - `tcpFlags` defines the TCP flags to filter flows by.
+ `tcpFlags` optionally defines TCP flags to filter flows by. +In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.

Enum: SYN, SYN-ACK, ACK, FIN, RST, URG, ECE, CWR, FIN-ACK, RST-ACK
false
rules[]object + `flowFilterRules` defines a list of ebpf agent flow filtering rules
+
false
samplinginteger + `sampling` sampling rate for the matched flow
+
+ Format: int32
+
false
sourcePortsint or string + `sourcePorts` optionally defines the source ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
false
tcpFlagsenum + `tcpFlags` optionally defines TCP flags to filter flows by. +In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`.
+
+ Enum: SYN, SYN-ACK, ACK, FIN, RST, URG, ECE, CWR, FIN-ACK, RST-ACK
+
false
+ + +### FlowCollector.spec.agent.ebpf.flowFilter.rules[index] +[↩ Parent](#flowcollectorspecagentebpfflowfilter-1) + + + +`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/go.mod b/go.mod index 1ff9ac009..1f3c92335 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.6.0 github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001 + github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20241008130234-a20397fb8f88 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.35.1 github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f @@ -31,44 +32,111 @@ require ( ) require ( + github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect + github.com/agoda-com/opentelemetry-logs-go v0.5.0 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/hub v1.0.1 // indirect + github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cilium/ebpf v0.16.0 // indirect + github.com/containernetworking/cni v1.1.2 // indirect + github.com/containernetworking/plugins v1.2.0 // indirect + github.com/coreos/go-iptables v0.6.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-kit/kit v0.13.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gopacket/gopacket v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb // indirect github.com/imdario/mergo v0.3.16 // indirect + github.com/ip2location/ip2location-go/v9 v9.7.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/libp2p/go-reuseport v0.3.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/minio-go/v7 v7.0.77 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/netobserv/gopipes v0.3.0 // indirect + github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 // indirect + github.com/netsampler/goflow2 v1.3.7 // indirect + github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 // indirect + github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334 // indirect + github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/pion/dtls/v2 v2.2.4 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/transport/v2 v2.0.0 // indirect + github.com/pion/udp v0.1.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.20.3 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 // indirect + github.com/segmentio/kafka-go v0.4.47 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/urfave/cli/v2 v2.27.2 // indirect + github.com/vishvananda/netlink v1.3.0 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + github.com/vmware/go-ipfix v0.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.22.0 // indirect @@ -78,14 +146,23 @@ require ( golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 // indirect + lukechampine.com/uint128 v1.2.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace github.com/prometheus/common v0.55.0 => github.com/netobserv/prometheus-common v0.55.0-netobserv + +replace github.com/netobserv/netobserv-ebpf-agent => github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b diff --git a/go.sum b/go.sum index c97b326d3..abc0aa6ef 100644 --- a/go.sum +++ b/go.sum @@ -1,189 +1,1331 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agoda-com/opentelemetry-logs-go v0.5.0 h1:9L6hRUiOX/Laoazk3u2hTdcMDOpkQXi2kMg5YBYA/I4= +github.com/agoda-com/opentelemetry-logs-go v0.5.0/go.mod h1:35B5ypjX5pkVCPJR01i6owJSYWe8cnbWLpEyHgAGD/E= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v1.2.0 h1:1v0TJPDtlhgpW4nJ+GvxCLSlUDC3+gW0CQQvlmfDR/s= +github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bpfman/bpfman-operator v0.5.5-0.20241023163832-0bf84bbd3927 h1:2odrvZ4MI/tfUkJYAz/d35EiDISbEHsv3360CaF7Rco= github.com/bpfman/bpfman-operator v0.5.5-0.20241023163832-0bf84bbd3927/go.mod h1:BREOhrpjbTdN3f/3bRx9/YSJsVwz79eUGafR2njaN4Q= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= +github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= +github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gaissmai/cidrtree v0.1.4 h1:/aYnv1LIwjtSDHNr1eNN99WJeh6vLrB+Sgr1tRMhHDc= +github.com/gaissmai/cidrtree v0.1.4/go.mod h1:nrjEeeMZmvoJpLcSvZ3qIVFxw/+9GHKi7wDHHmHKGRI= +github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 h1:Vh7rylVZRZCj6W41lRlP17xPk4Nq260H4Xo/DDYmEZk= +github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= +github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= +github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopacket/gopacket v1.2.0 h1:eXbzFad7f73P1n2EJHQlsKuvIMJjVXK5tXoSca78I3A= +github.com/gopacket/gopacket v1.2.0/go.mod h1:BrAKEy5EOGQ76LSqh7DMAr7z0NNPdczWm2GxCG7+I8M= +github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.15.0/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb h1:tsEKRC3PU9rMw18w/uAptoijhgG4EvlA5kfJPtwrMDk= +github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb/go.mod h1:NtmN9h8vrTveVQRLHcX2HQ5wIPBDCsZ351TGbZWgg38= +github.com/hetznercloud/hcloud-go v1.22.0/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/ip2location/ip2location-go/v9 v9.7.0 h1:ipwl67HOWcrw+6GOChkEXcreRQR37NabqBd2ayYa4Q0= +github.com/ip2location/ip2location-go/v9 v9.7.0/go.mod h1:MPLnsKxwQlvd2lBNcQCsLoyzJLDBFizuO67wXXdzoyI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= +github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= +github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170 h1:rtPle+U5e7Fia0j44gm+p5QMgOIXXB3A8GtFeCCh8Kk= +github.com/k8snetworkplumbingwg/multi-networkpolicy v0.0.0-20200914073308-0f33b9190170/go.mod h1:CF9uYILB8GY25A/6Hhi1AWKc29qbyLu8r7Gs+uINGZE= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0/go.mod h1:wxt2YWRVItDtaQmVSmaN5ubE2L1c9CiNoHQwSJnM8Ko= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= +github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f h1:mjj2aCHv9orQy7Y0OGs03wZNtuQHfNgCKY44eOIloe0= +github.com/mariomac/guara v0.0.0-20220523124851-5fc279816f1f/go.mod h1:Ec37gLe3vH+cnOp7x3qfd+0sz0pnP3CyIXKmQJ2ZOXU= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 h1:ql8x//rJsHMjS+qqEag8n3i4azw1QneKh5PieH9UEbY= +github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875/go.mod h1:kfOoFJuHWp76v1RgZCb9/gVUc7XdY877S2uVYbNliGc= +github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= +github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= +github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= +github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= +github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= +github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw= +github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b h1:E8vhCqGnKNO++9HFup4rXqpl9I1uyTJYpaKetV1elAA= +github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b/go.mod h1:20e1OPAs7h3k9PvNZWS9D6BnXEtkTk2LlfzD66uhvxY= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001 h1:Hb5RxMfFafng2+QbnkiEWTS4TUUz7zcpUS8RiauzWXw= github.com/netobserv/flowlogs-pipeline v1.7.0-community.0.20241126155124-18e017f67001/go.mod h1:wnCpWttAFkLSSxOcfCkd9zA5pwV/1OcxS5tAfAxNWEc= +github.com/netobserv/gopipes v0.3.0 h1:IYmPnnAVCdSK7VmHmpFhrVBOEm45qpgbZmJz1sSW+60= +github.com/netobserv/gopipes v0.3.0/go.mod h1:N7/Gz05EOF0CQQSKWsv3eof22Cj2PB08Pbttw98YFYU= +github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 h1:RmnoJe/ci5q+QdM7upFdxiU+D8F3L3qTd5wXCwwHefw= +github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500/go.mod h1:LHXpc5tjKvsfZn0pwLKrvlgEhZcCaw3Di9mUEZGAI4E= github.com/netobserv/prometheus-common v0.55.0-netobserv h1:Fapr74g0S3gRh/kTTyv9Ytm4DJJfFuUTEToiU/np9eg= github.com/netobserv/prometheus-common v0.55.0-netobserv/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc= +github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f h1:B+uJ4LmjO+qwMTZP2YhlpMziMPD4MD1++WdCAV2y+GI= github.com/openshift/api v0.0.0-20240722135205-ae4f370f361f/go.mod h1:OOh6Qopf21pSzqNVCB5gomomBXb8o5sGKZxG2KNpaXM= +github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb3izbf6JfOEK+pJTYpEvteRR73mCh2g/A= +github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= +github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334 h1:DRWKIJpIDHgp9JWOHOwDywnfBnJOyHolGpg3OioY+dI= +github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334/go.mod h1:xn0ACVOiv+fi6wJN5y0XeYRbDrAHLgDLCEdageHCObI= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg= +github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4= +github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= +github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8= +github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 h1:QU2cs0xxKYvF1JfibP/8vs+pFy6OvIpqNR2lYC4jYNU= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/go.mod h1:Rd8YnCqz+2FYsiGmE2DMlaLjQRB4v2jFNnzCt9YY4IM= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 h1:V/4Cj2GytqdqK7OMEz6c4LNjey3SNyfw3pg5jPKtJvQ= +github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24/go.mod h1:MDRkz271loM/PrYN+wUNEaTMDGSP760MQzB0yEjdgSQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 h1:q815fjV3G+4JvXNo2VwT2m+/msMU0sUkCK68CgHV9Y8= +github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91/go.mod h1:qIWCTaK0xQlXNlNlIVoZjKMZFopqfMZcg4JcRqGoYc0= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= +github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= +github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vmware/go-ipfix v0.9.0 h1:4/N5eFliqULEaCUQV0lafOpN/1bItPE9OTAPGhrIXus= +github.com/vmware/go-ipfix v0.9.0/go.mod h1:MYEdL6Uel2ufOZyVCKvIAaw9hwnewK8aPr7rnwRbxMY= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= +github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 h1:xvhQxJ/C9+RTnAj5DpTg7LSM1vbbMTiXt7e9hsfqHNw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0/go.mod h1:Fcvs2Bz1jkDM+Wf5/ozBGmi3tQ/c9zPKLnsipnfhGAo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -192,46 +1334,205 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= +google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= +gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.31.2 h1:Uw1zUP2D/4wiSjKWVVzSOcCGLuW/+IdRwjjC0FJooYU= k8s.io/kube-aggregator v0.31.2/go.mod h1:41/VIXH+/Qcg9ERNAY6bRF/WQR6xL1wFgYagdHac1X4= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8 h1:1Wof1cGQgA5pqgo8MxKPtf+qN6Sh/0JzznmeGPm1HnE= k8s.io/kube-openapi v0.0.0-20240808142205-8e686545bdb8/go.mod h1:Os6V6dZwLNii3vxFpxcNaTmH8LJJBkOTg1N0tOA0fvA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E= +sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/vendor/github.com/Knetic/govaluate/.gitignore b/vendor/github.com/Knetic/govaluate/.gitignore new file mode 100644 index 000000000..da210fb31 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +coverage.out + +manual_test.go +*.out +*.err diff --git a/vendor/github.com/Knetic/govaluate/.travis.yml b/vendor/github.com/Knetic/govaluate/.travis.yml new file mode 100644 index 000000000..35ae404ab --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/.travis.yml @@ -0,0 +1,10 @@ +language: go + +script: ./test.sh + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 diff --git a/vendor/github.com/Knetic/govaluate/CONTRIBUTORS b/vendor/github.com/Knetic/govaluate/CONTRIBUTORS new file mode 100644 index 000000000..c1a7fe42d --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/CONTRIBUTORS @@ -0,0 +1,15 @@ +This library was authored by George Lester, and contains contributions from: + +vjeantet (regex support) +iasci (ternary operator) +oxtoacart (parameter structures, deferred parameter retrieval) +wmiller848 (bitwise operators) +prashantv (optimization of bools) +dpaolella (exposure of variables used in an expression) +benpaxton (fix for missing type checks during literal elide process) +abrander (panic-finding testing tool, float32 conversions) +xfennec (fix for dates being parsed in the current Location) +bgaifullin (lifting restriction on complex/struct types) +gautambt (hexadecimal literals) +felixonmars (fix multiple typos in test names) +sambonfire (automatic type conversion for accessor function calls) \ No newline at end of file diff --git a/vendor/github.com/Knetic/govaluate/EvaluableExpression.go b/vendor/github.com/Knetic/govaluate/EvaluableExpression.go new file mode 100644 index 000000000..a5fe50d47 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/EvaluableExpression.go @@ -0,0 +1,276 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +const isoDateFormat string = "2006-01-02T15:04:05.999999999Z0700" +const shortCircuitHolder int = -1 + +var DUMMY_PARAMETERS = MapParameters(map[string]interface{}{}) + +/* + EvaluableExpression represents a set of ExpressionTokens which, taken together, + are an expression that can be evaluated down into a single value. +*/ +type EvaluableExpression struct { + + /* + Represents the query format used to output dates. Typically only used when creating SQL or Mongo queries from an expression. + Defaults to the complete ISO8601 format, including nanoseconds. + */ + QueryDateFormat string + + /* + Whether or not to safely check types when evaluating. + If true, this library will return error messages when invalid types are used. + If false, the library will panic when operators encounter types they can't use. + + This is exclusively for users who need to squeeze every ounce of speed out of the library as they can, + and you should only set this to false if you know exactly what you're doing. + */ + ChecksTypes bool + + tokens []ExpressionToken + evaluationStages *evaluationStage + inputExpression string +} + +/* + Parses a new EvaluableExpression from the given [expression] string. + Returns an error if the given expression has invalid syntax. +*/ +func NewEvaluableExpression(expression string) (*EvaluableExpression, error) { + + functions := make(map[string]ExpressionFunction) + return NewEvaluableExpressionWithFunctions(expression, functions) +} + +/* + Similar to [NewEvaluableExpression], except that instead of a string, an already-tokenized expression is given. + This is useful in cases where you may be generating an expression automatically, or using some other parser (e.g., to parse from a query language) +*/ +func NewEvaluableExpressionFromTokens(tokens []ExpressionToken) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + + err = checkBalance(tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Similar to [NewEvaluableExpression], except enables the use of user-defined functions. + Functions passed into this will be available to the expression. +*/ +func NewEvaluableExpressionWithFunctions(expression string, functions map[string]ExpressionFunction) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + ret.inputExpression = expression + + ret.tokens, err = parseTokens(expression, functions) + if err != nil { + return nil, err + } + + err = checkBalance(ret.tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(ret.tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(ret.tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Same as `Eval`, but automatically wraps a map of parameters into a `govalute.Parameters` structure. +*/ +func (this EvaluableExpression) Evaluate(parameters map[string]interface{}) (interface{}, error) { + + if parameters == nil { + return this.Eval(nil) + } + + return this.Eval(MapParameters(parameters)) +} + +/* + Runs the entire expression using the given [parameters]. + e.g., If the expression contains a reference to the variable "foo", it will be taken from `parameters.Get("foo")`. + + This function returns errors if the combination of expression and parameters cannot be run, + such as if a variable in the expression is not present in [parameters]. + + In all non-error circumstances, this returns the single value result of the expression and parameters given. + e.g., if the expression is "1 + 1", this will return 2.0. + e.g., if the expression is "foo + 1" and parameters contains "foo" = 2, this will return 3.0 +*/ +func (this EvaluableExpression) Eval(parameters Parameters) (interface{}, error) { + + if this.evaluationStages == nil { + return nil, nil + } + + if parameters != nil { + parameters = &sanitizedParameters{parameters} + } else { + parameters = DUMMY_PARAMETERS + } + + return this.evaluateStage(this.evaluationStages, parameters) +} + +func (this EvaluableExpression) evaluateStage(stage *evaluationStage, parameters Parameters) (interface{}, error) { + + var left, right interface{} + var err error + + if stage.leftStage != nil { + left, err = this.evaluateStage(stage.leftStage, parameters) + if err != nil { + return nil, err + } + } + + if stage.isShortCircuitable() { + switch stage.symbol { + case AND: + if left == false { + return false, nil + } + case OR: + if left == true { + return true, nil + } + case COALESCE: + if left != nil { + return left, nil + } + + case TERNARY_TRUE: + if left == false { + right = shortCircuitHolder + } + case TERNARY_FALSE: + if left != nil { + right = shortCircuitHolder + } + } + } + + if right != shortCircuitHolder && stage.rightStage != nil { + right, err = this.evaluateStage(stage.rightStage, parameters) + if err != nil { + return nil, err + } + } + + if this.ChecksTypes { + if stage.typeCheck == nil { + + err = typeCheck(stage.leftTypeCheck, left, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + + err = typeCheck(stage.rightTypeCheck, right, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + } else { + // special case where the type check needs to know both sides to determine if the operator can handle it + if !stage.typeCheck(left, right) { + errorMsg := fmt.Sprintf(stage.typeErrorFormat, left, stage.symbol.String()) + return nil, errors.New(errorMsg) + } + } + } + + return stage.operator(left, right, parameters) +} + +func typeCheck(check stageTypeCheck, value interface{}, symbol OperatorSymbol, format string) error { + + if check == nil { + return nil + } + + if check(value) { + return nil + } + + errorMsg := fmt.Sprintf(format, value, symbol.String()) + return errors.New(errorMsg) +} + +/* + Returns an array representing the ExpressionTokens that make up this expression. +*/ +func (this EvaluableExpression) Tokens() []ExpressionToken { + + return this.tokens +} + +/* + Returns the original expression used to create this EvaluableExpression. +*/ +func (this EvaluableExpression) String() string { + + return this.inputExpression +} + +/* + Returns an array representing the variables contained in this EvaluableExpression. +*/ +func (this EvaluableExpression) Vars() []string { + var varlist []string + for _, val := range this.Tokens() { + if val.Kind == VARIABLE { + varlist = append(varlist, val.Value.(string)) + } + } + return varlist +} diff --git a/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go b/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go new file mode 100644 index 000000000..7e0ad1c88 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/EvaluableExpression_sql.go @@ -0,0 +1,167 @@ +package govaluate + +import ( + "errors" + "fmt" + "regexp" + "time" +) + +/* + Returns a string representing this expression as if it were written in SQL. + This function assumes that all parameters exist within the same table, and that the table essentially represents + a serialized object of some sort (e.g., hibernate). + If your data model is more normalized, you may need to consider iterating through each actual token given by `Tokens()` + to create your query. + + Boolean values are considered to be "1" for true, "0" for false. + + Times are formatted according to this.QueryDateFormat. +*/ +func (this EvaluableExpression) ToSQLQuery() (string, error) { + + var stream *tokenStream + var transactions *expressionOutputStream + var transaction string + var err error + + stream = newTokenStream(this.tokens) + transactions = new(expressionOutputStream) + + for stream.hasNext() { + + transaction, err = this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + transactions.add(transaction) + } + + return transactions.createString(" "), nil +} + +func (this EvaluableExpression) findNextSQLString(stream *tokenStream, transactions *expressionOutputStream) (string, error) { + + var token ExpressionToken + var ret string + + token = stream.next() + + switch token.Kind { + + case STRING: + ret = fmt.Sprintf("'%v'", token.Value) + case PATTERN: + ret = fmt.Sprintf("'%s'", token.Value.(*regexp.Regexp).String()) + case TIME: + ret = fmt.Sprintf("'%s'", token.Value.(time.Time).Format(this.QueryDateFormat)) + + case LOGICALOP: + switch logicalSymbols[token.Value.(string)] { + + case AND: + ret = "AND" + case OR: + ret = "OR" + } + + case BOOLEAN: + if token.Value.(bool) { + ret = "1" + } else { + ret = "0" + } + + case VARIABLE: + ret = fmt.Sprintf("[%s]", token.Value.(string)) + + case NUMERIC: + ret = fmt.Sprintf("%g", token.Value.(float64)) + + case COMPARATOR: + switch comparatorSymbols[token.Value.(string)] { + + case EQ: + ret = "=" + case NEQ: + ret = "<>" + case REQ: + ret = "RLIKE" + case NREQ: + ret = "NOT RLIKE" + default: + ret = fmt.Sprintf("%s", token.Value.(string)) + } + + case TERNARY: + + switch ternarySymbols[token.Value.(string)] { + + case COALESCE: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("COALESCE(%v, %v)", left, right) + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return "", errors.New("Ternary operators are unsupported in SQL output") + } + case PREFIX: + switch prefixSymbols[token.Value.(string)] { + + case INVERT: + ret = fmt.Sprintf("NOT") + default: + + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("%s%s", token.Value.(string), right) + } + case MODIFIER: + + switch modifierSymbols[token.Value.(string)] { + + case EXPONENT: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("POW(%s, %s)", left, right) + case MODULUS: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("MOD(%s, %s)", left, right) + default: + ret = fmt.Sprintf("%s", token.Value.(string)) + } + case CLAUSE: + ret = "(" + case CLAUSE_CLOSE: + ret = ")" + case SEPARATOR: + ret = "," + + default: + errorMsg := fmt.Sprintf("Unrecognized query token '%s' of kind '%s'", token.Value, token.Kind) + return "", errors.New(errorMsg) + } + + return ret, nil +} diff --git a/vendor/github.com/Knetic/govaluate/ExpressionToken.go b/vendor/github.com/Knetic/govaluate/ExpressionToken.go new file mode 100644 index 000000000..f849f3813 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/ExpressionToken.go @@ -0,0 +1,9 @@ +package govaluate + +/* + Represents a single parsed token. +*/ +type ExpressionToken struct { + Kind TokenKind + Value interface{} +} diff --git a/vendor/github.com/Knetic/govaluate/LICENSE b/vendor/github.com/Knetic/govaluate/LICENSE new file mode 100644 index 000000000..24b9b4591 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2016 George Lester + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Knetic/govaluate/MANUAL.md b/vendor/github.com/Knetic/govaluate/MANUAL.md new file mode 100644 index 000000000..e06582851 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/MANUAL.md @@ -0,0 +1,176 @@ +govaluate +==== + +This library contains quite a lot of functionality, this document is meant to be formal documentation on the operators and features of it. +Some of this documentation may duplicate what's in README.md, but should never conflict. + +# Types + +This library only officially deals with four types; `float64`, `bool`, `string`, and arrays. + +All numeric literals, with or without a radix, will be converted to `float64` for evaluation. For instance; in practice, there is no difference between the literals "1.0" and "1", they both end up as `float64`. This matters to users because if you intend to return numeric values from your expressions, then the returned value will be `float64`, not any other numeric type. + +Any string _literal_ (not parameter) which is interpretable as a date will be converted to a `float64` representation of that date's unix time. Any `time.Time` parameters will not be operable with these date literals; such parameters will need to use the `time.Time.Unix()` method to get a numeric representation. + +Arrays are untyped, and can be mixed-type. Internally they're all just `interface{}`. Only two operators can interact with arrays, `IN` and `,`. All other operators will refuse to operate on arrays. + +# Operators + +## Modifiers + +### Addition, concatenation `+` + +If either left or right sides of the `+` operator are a `string`, then this operator will perform string concatenation and return that result. If neither are string, then both must be numeric, and this will return a numeric result. + +Any other case is invalid. + +### Arithmetic `-` `*` `/` `**` `%` + +`**` refers to "take to the power of". For instance, `3 ** 4` == 81. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Bitwise shifts, masks `>>` `<<` `|` `&` `^` + +All of these operators convert their `float64` left and right sides to `int64`, perform their operation, and then convert back. +Given how this library assumes numeric are represented (as `float64`), it is unlikely that this behavior will change, even though it may cause havoc with extremely large or small numbers. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Negation `-` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +### Inversion `!` + +Prefix only. This can never have a left-hand value. + +* _Right side_: bool +* _Returns_: bool + +### Bitwise NOT `~` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +## Logical Operators + +For all logical operators, this library will short-circuit the operation if the left-hand side is sufficient to determine what to do. For instance, `true || expensiveOperation()` will not actually call `expensiveOperation()`, since it knows the left-hand side is `true`. + +### Logical AND/OR `&&` `||` + +* _Left side_: bool +* _Right side_: bool +* _Returns_: bool + +### Ternary true `?` + +Checks if the left side is `true`. If so, returns the right side. If the left side is `false`, returns `nil`. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: bool +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Ternary false `:` + +Checks if the left side is `nil`. If so, returns the right side. If the left side is non-nil, returns the left side. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Null coalescence `??` + +Similar to the C# operator. If the left value is non-nil, it returns that. If not, then the right-value is returned. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: No specific type - whichever is passed to it. + +## Comparators + +### Numeric/lexicographic comparators `>` `<` `>=` `<=` + +If both sides are numeric, this returns the usual greater/lesser behavior that would be expected. +If both sides are string, this returns the lexicographic comparison of the strings. This uses Go's standard lexicographic compare. + +* _Accepts_: Left and right side must either be both string, or both numeric. +* _Returns_: bool + +### Regex comparators `=~` `!~` + +These use go's standard `regexp` flavor of regex. The left side is expected to be the candidate string, the right side is the pattern. `=~` returns whether or not the candidate string matches the regex pattern given on the right. `!~` is the inverted version of the same logic. + +* _Left side_: string +* _Right side_: string +* _Returns_: bool + +## Arrays + +### Separator `,` + +The separator, always paired with parenthesis, creates arrays. It must always have both a left and right-hand value, so for instance `(, 0)` and `(0,)` are invalid uses of it. + +Again, this should always be used with parenthesis; like `(1, 2, 3, 4)`. + +### Membership `IN` + +The only operator with a text name, this operator checks the right-hand side array to see if it contains a value that is equal to the left-side value. +Equality is determined by the use of the `==` operator, and this library doesn't check types between the values. Any two values, when cast to `interface{}`, and can still be checked for equality with `==` will act as expected. + +Note that you can use a parameter for the array, but it must be an `[]interface{}`. + +* _Left side_: Any type. +* _Right side_: array +* _Returns_: bool + +# Parameters + +Parameters must be passed in every time the expression is evaluated. Parameters can be of any type, but will not cause errors unless actually used in an erroneous way. There is no difference in behavior for any of the above operators for parameters - they are type checked when used. + +All `int` and `float` values of any width will be converted to `float64` before use. + +At no point is the parameter structure, or any value thereof, modified by this library. + +## Alternates to maps + +The default form of parameters as a map may not serve your use case. You may have parameters in some other structure, you may want to change the no-parameter-found behavior, or maybe even just have some debugging print statements invoked when a parameter is accessed. + +To do this, define a type that implements the `govaluate.Parameters` interface. When you want to evaluate, instead call `EvaluableExpression.Eval` and pass your parameter structure. + +# Functions + +During expression parsing (_not_ evaluation), a map of functions can be given to `govaluate.NewEvaluableExpressionWithFunctions` (the lengthiest and finest of function names). The resultant expression will be able to invoke those functions during evaluation. Once parsed, an expression cannot have functions added or removed - a new expression will need to be created if you want to change the functions, or behavior of said functions. + +Functions always take the form `()`, including parens. Functions can have an empty list of parameters, like `()`, but still must have parens. + +If the expression contains something that looks like it ought to be a function (such as `foo()`), but no such function was given to it, it will error on parsing. + +Functions must be of type `map[string]govaluate.ExpressionFunction`. `ExpressionFunction`, for brevity, has the following signature: + +`func(args ...interface{}) (interface{}, error)` + +Where `args` is whatever is passed to the function when called. If a non-nil error is returned from a function during evaluation, the evaluation stops and ultimately returns that error to the caller of `Evaluate()` or `Eval()`. + +## Built-in functions + +There aren't any builtin functions. The author is opposed to maintaining a standard library of functions to be used. + +Every use case of this library is different, and even in simple use cases (such as parameters, see above) different users need different behavior, naming, or even functionality. The author prefers that users make their own decisions about what functions they need, and how they operate. + +# Equality + +The `==` and `!=` operators involve a moderately complex workflow. They use [`reflect.DeepEqual`](https://golang.org/pkg/reflect/#DeepEqual). This is for complicated reasons, but there are some types in Go that cannot be compared with the native `==` operator. Arrays, in particular, cannot be compared - Go will panic if you try. One might assume this could be handled with the type checking system in `govaluate`, but unfortunately without reflection there is no way to know if a variable is a slice/array. Worse, structs can be incomparable if they _contain incomparable types_. + +It's all very complicated. Fortunately, Go includes the `reflect.DeepEqual` function to handle all the edge cases. Currently, `govaluate` uses that for all equality/inequality. diff --git a/vendor/github.com/Knetic/govaluate/OperatorSymbol.go b/vendor/github.com/Knetic/govaluate/OperatorSymbol.go new file mode 100644 index 000000000..4b810658b --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/OperatorSymbol.go @@ -0,0 +1,309 @@ +package govaluate + +/* + Represents the valid symbols for operators. + +*/ +type OperatorSymbol int + +const ( + VALUE OperatorSymbol = iota + LITERAL + NOOP + EQ + NEQ + GT + LT + GTE + LTE + REQ + NREQ + IN + + AND + OR + + PLUS + MINUS + BITWISE_AND + BITWISE_OR + BITWISE_XOR + BITWISE_LSHIFT + BITWISE_RSHIFT + MULTIPLY + DIVIDE + MODULUS + EXPONENT + + NEGATE + INVERT + BITWISE_NOT + + TERNARY_TRUE + TERNARY_FALSE + COALESCE + + FUNCTIONAL + ACCESS + SEPARATE +) + +type operatorPrecedence int + +const ( + noopPrecedence operatorPrecedence = iota + valuePrecedence + functionalPrecedence + prefixPrecedence + exponentialPrecedence + additivePrecedence + bitwisePrecedence + bitwiseShiftPrecedence + multiplicativePrecedence + comparatorPrecedence + ternaryPrecedence + logicalAndPrecedence + logicalOrPrecedence + separatePrecedence +) + +func findOperatorPrecedenceForSymbol(symbol OperatorSymbol) operatorPrecedence { + + switch symbol { + case NOOP: + return noopPrecedence + case VALUE: + return valuePrecedence + case EQ: + fallthrough + case NEQ: + fallthrough + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + fallthrough + case REQ: + fallthrough + case NREQ: + fallthrough + case IN: + return comparatorPrecedence + case AND: + return logicalAndPrecedence + case OR: + return logicalOrPrecedence + case BITWISE_AND: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_XOR: + return bitwisePrecedence + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + return bitwiseShiftPrecedence + case PLUS: + fallthrough + case MINUS: + return additivePrecedence + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + return multiplicativePrecedence + case EXPONENT: + return exponentialPrecedence + case BITWISE_NOT: + fallthrough + case NEGATE: + fallthrough + case INVERT: + return prefixPrecedence + case COALESCE: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return ternaryPrecedence + case ACCESS: + fallthrough + case FUNCTIONAL: + return functionalPrecedence + case SEPARATE: + return separatePrecedence + } + + return valuePrecedence +} + +/* + Map of all valid comparators, and their string equivalents. + Used during parsing of expressions to determine if a symbol is, in fact, a comparator. + Also used during evaluation to determine exactly which comparator is being used. +*/ +var comparatorSymbols = map[string]OperatorSymbol{ + "==": EQ, + "!=": NEQ, + ">": GT, + ">=": GTE, + "<": LT, + "<=": LTE, + "=~": REQ, + "!~": NREQ, + "in": IN, +} + +var logicalSymbols = map[string]OperatorSymbol{ + "&&": AND, + "||": OR, +} + +var bitwiseSymbols = map[string]OperatorSymbol{ + "^": BITWISE_XOR, + "&": BITWISE_AND, + "|": BITWISE_OR, +} + +var bitwiseShiftSymbols = map[string]OperatorSymbol{ + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var additiveSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, +} + +var multiplicativeSymbols = map[string]OperatorSymbol{ + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, +} + +var exponentialSymbolsS = map[string]OperatorSymbol{ + "**": EXPONENT, +} + +var prefixSymbols = map[string]OperatorSymbol{ + "-": NEGATE, + "!": INVERT, + "~": BITWISE_NOT, +} + +var ternarySymbols = map[string]OperatorSymbol{ + "?": TERNARY_TRUE, + ":": TERNARY_FALSE, + "??": COALESCE, +} + +// this is defined separately from additiveSymbols et al because it's needed for parsing, not stage planning. +var modifierSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, + "**": EXPONENT, + "&": BITWISE_AND, + "|": BITWISE_OR, + "^": BITWISE_XOR, + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var separatorSymbols = map[string]OperatorSymbol{ + ",": SEPARATE, +} + +/* + Returns true if this operator is contained by the given array of candidate symbols. + False otherwise. +*/ +func (this OperatorSymbol) IsModifierType(candidate []OperatorSymbol) bool { + + for _, symbolType := range candidate { + if this == symbolType { + return true + } + } + + return false +} + +/* + Generally used when formatting type check errors. + We could store the stringified symbol somewhere else and not require a duplicated codeblock to translate + OperatorSymbol to string, but that would require more memory, and another field somewhere. + Adding operators is rare enough that we just stringify it here instead. +*/ +func (this OperatorSymbol) String() string { + + switch this { + case NOOP: + return "NOOP" + case VALUE: + return "VALUE" + case EQ: + return "=" + case NEQ: + return "!=" + case GT: + return ">" + case LT: + return "<" + case GTE: + return ">=" + case LTE: + return "<=" + case REQ: + return "=~" + case NREQ: + return "!~" + case AND: + return "&&" + case OR: + return "||" + case IN: + return "in" + case BITWISE_AND: + return "&" + case BITWISE_OR: + return "|" + case BITWISE_XOR: + return "^" + case BITWISE_LSHIFT: + return "<<" + case BITWISE_RSHIFT: + return ">>" + case PLUS: + return "+" + case MINUS: + return "-" + case MULTIPLY: + return "*" + case DIVIDE: + return "/" + case MODULUS: + return "%" + case EXPONENT: + return "**" + case NEGATE: + return "-" + case INVERT: + return "!" + case BITWISE_NOT: + return "~" + case TERNARY_TRUE: + return "?" + case TERNARY_FALSE: + return ":" + case COALESCE: + return "??" + } + return "" +} diff --git a/vendor/github.com/Knetic/govaluate/README.md b/vendor/github.com/Knetic/govaluate/README.md new file mode 100644 index 000000000..2e5716d4f --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/README.md @@ -0,0 +1,233 @@ +govaluate +==== + +[![Build Status](https://travis-ci.org/Knetic/govaluate.svg?branch=master)](https://travis-ci.org/Knetic/govaluate) +[![Godoc](https://img.shields.io/badge/godoc-reference-5272B4.svg)](https://godoc.org/github.com/Knetic/govaluate) +[![Go Report Card](https://goreportcard.com/badge/github.com/Knetic/govaluate)](https://goreportcard.com/report/github.com/Knetic/govaluate) +[![Gocover](https://gocover.io/_badge/github.com/Knetic/govaluate)](https://gocover.io/github.com/Knetic/govaluate) + +Provides support for evaluating arbitrary C-like artithmetic/string expressions. + +Why can't you just write these expressions in code? +-- + +Sometimes, you can't know ahead-of-time what an expression will look like, or you want those expressions to be configurable. +Perhaps you've got a set of data running through your application, and you want to allow your users to specify some validations to run on it before committing it to a database. Or maybe you've written a monitoring framework which is capable of gathering a bunch of metrics, then evaluating a few expressions to see if any metrics should be alerted upon, but the conditions for alerting are different for each monitor. + +A lot of people wind up writing their own half-baked style of evaluation language that fits their needs, but isn't complete. Or they wind up baking the expression into the actual executable, even if they know it's subject to change. These strategies may work, but they take time to implement, time for users to learn, and induce technical debt as requirements change. This library is meant to cover all the normal C-like expressions, so that you don't have to reinvent one of the oldest wheels on a computer. + +How do I use it? +-- + +You create a new EvaluableExpression, then call "Evaluate" on it. + +```go + expression, err := govaluate.NewEvaluableExpression("10 > 0"); + result, err := expression.Evaluate(nil); + // result is now set to "true", the bool value. +``` + +Cool, but how about with parameters? + +```go + expression, err := govaluate.NewEvaluableExpression("foo > 0"); + + parameters := make(map[string]interface{}, 8) + parameters["foo"] = -1; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +That's cool, but we can almost certainly have done all that in code. What about a complex use case that involves some math? + +```go + expression, err := govaluate.NewEvaluableExpression("(requests_made * requests_succeeded / 100) >= 90"); + + parameters := make(map[string]interface{}, 8) + parameters["requests_made"] = 100; + parameters["requests_succeeded"] = 80; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +Or maybe you want to check the status of an alive check ("smoketest") page, which will be a string? + +```go + expression, err := govaluate.NewEvaluableExpression("http_response_body == 'service is ok'"); + + parameters := make(map[string]interface{}, 8) + parameters["http_response_body"] = "service is ok"; + + result, err := expression.Evaluate(parameters); + // result is now set to "true", the bool value. +``` + +These examples have all returned boolean values, but it's equally possible to return numeric ones. + +```go + expression, err := govaluate.NewEvaluableExpression("(mem_used / total_mem) * 100"); + + parameters := make(map[string]interface{}, 8) + parameters["total_mem"] = 1024; + parameters["mem_used"] = 512; + + result, err := expression.Evaluate(parameters); + // result is now set to "50.0", the float64 value. +``` + +You can also do date parsing, though the formats are somewhat limited. Stick to RF3339, ISO8061, unix date, or ruby date formats. If you're having trouble getting a date string to parse, check the list of formats actually used: [parsing.go:248](https://github.com/Knetic/govaluate/blob/0580e9b47a69125afa0e4ebd1cf93c49eb5a43ec/parsing.go#L258). + +```go + expression, err := govaluate.NewEvaluableExpression("'2014-01-02' > '2014-01-01 23:59:59'"); + result, err := expression.Evaluate(nil); + + // result is now set to true +``` + +Expressions are parsed once, and can be re-used multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once. Like so; + +```go + expression, err := govaluate.NewEvaluableExpression("response_time <= 100"); + parameters := make(map[string]interface{}, 8) + + for { + parameters["response_time"] = pingSomething(); + result, err := expression.Evaluate(parameters) + } +``` + +The normal C-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parenthesis to clarify which portions of an expression should be run first. + +Escaping characters +-- + +Sometimes you'll have parameters that have spaces, slashes, pluses, ampersands or some other character +that this library interprets as something special. For example, the following expression will not +act as one might expect: + + "response-time < 100" + +As written, the library will parse it as "[response] minus [time] is less than 100". In reality, +"response-time" is meant to be one variable that just happens to have a dash in it. + +There are two ways to work around this. First, you can escape the entire parameter name: + + "[response-time] < 100" + +Or you can use backslashes to escape only the minus sign. + + "response\\-time < 100" + +Backslashes can be used anywhere in an expression to escape the very next character. Square bracketed parameter names can be used instead of plain parameter names at any time. + +Functions +-- + +You may have cases where you want to call a function on a parameter during execution of the expression. Perhaps you want to aggregate some set of data, but don't know the exact aggregation you want to use until you're writing the expression itself. Or maybe you have a mathematical operation you want to perform, for which there is no operator; like `log` or `tan` or `sqrt`. For cases like this, you can provide a map of functions to `NewEvaluableExpressionWithFunctions`, which will then be able to use them during execution. For instance; + +```go + functions := map[string]govaluate.ExpressionFunction { + "strlen": func(args ...interface{}) (interface{}, error) { + length := len(args[0].(string)) + return (float64)(length), nil + }, + } + + expString := "strlen('someReallyLongInputString') <= 16" + expression, _ := govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + + result, _ := expression.Evaluate(nil) + // result is now "false", the boolean value +``` + +Functions can accept any number of arguments, correctly handles nested functions, and arguments can be of any type (even if none of this library's operators support evaluation of that type). For instance, each of these usages of functions in an expression are valid (assuming that the appropriate functions and parameters are given): + +```go +"sqrt(x1 ** y1, x2 ** y2)" +"max(someValue, abs(anotherValue), 10 * lastValue)" +``` + +Functions cannot be passed as parameters, they must be known at the time when the expression is parsed, and are unchangeable after parsing. + +Accessors +-- + +If you have structs in your parameters, you can access their fields and methods in the usual way. For instance, given a struct that has a method "Echo", present in the parameters as `foo`, the following is valid: + + "foo.Echo('hello world')" + +Fields are accessed in a similar way. Assuming `foo` has a field called "Length": + + "foo.Length > 9000" + +Accessors can be nested to any depth, like the following + + "foo.Bar.Baz.SomeFunction()" + +However it is not _currently_ supported to access values in `map`s. So the following will not work + + "foo.SomeMap['key']" + +This may be convenient, but note that using accessors involves a _lot_ of reflection. This makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). +If at all reasonable, the author recommends extracting the values you care about into a parameter map beforehand, or defining a struct that implements the `Parameters` interface, and which grabs fields as required. If there are functions you want to use, it's better to pass them as expression functions (see the above section). These approaches use no reflection, and are designed to be fast and clean. + +What operators and types does this support? +-- + +* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` +* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` +* Logical ops: `||` `&&` +* Numeric constants, as 64-bit floating point (`12345.678`) +* String constants (single quotes: `'foobar'`) +* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant) +* Boolean constants: `true` `false` +* Parenthesis to control order of evaluation `(` `)` +* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`) +* Prefixes: `!` `-` `~` +* Ternary conditional: `?` `:` +* Null coalescence: `??` + +See [MANUAL.md](https://github.com/Knetic/govaluate/blob/master/MANUAL.md) for exacting details on what types each operator supports. + +Types +-- + +Some operators don't make sense when used with some types. For instance, what does it mean to get the modulo of a string? What happens if you check to see if two numbers are logically AND'ed together? + +Everyone has a different intuition about the answers to these questions. To prevent confusion, this library will _refuse to operate_ upon types for which there is not an unambiguous meaning for the operation. See [MANUAL.md](https://github.com/Knetic/govaluate/blob/master/MANUAL.md) for details about what operators are valid for which types. + +Benchmarks +-- + +If you're concerned about the overhead of this library, a good range of benchmarks are built into this repo. You can run them with `go test -bench=.`. The library is built with an eye towards being quick, but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. + +For a very rough idea of performance, here are the results output from a benchmark run on a 3rd-gen Macbook Pro (Linux Mint 17.1). + +``` +BenchmarkSingleParse-12 1000000 1382 ns/op +BenchmarkSimpleParse-12 200000 10771 ns/op +BenchmarkFullParse-12 30000 49383 ns/op +BenchmarkEvaluationSingle-12 50000000 30.1 ns/op +BenchmarkEvaluationNumericLiteral-12 10000000 119 ns/op +BenchmarkEvaluationLiteralModifiers-12 10000000 236 ns/op +BenchmarkEvaluationParameters-12 5000000 260 ns/op +BenchmarkEvaluationParametersModifiers-12 3000000 547 ns/op +BenchmarkComplexExpression-12 2000000 963 ns/op +BenchmarkRegexExpression-12 100000 20357 ns/op +BenchmarkConstantRegexExpression-12 1000000 1392 ns/op +ok +``` + +API Breaks +-- + +While this library has very few cases which will ever result in an API break, it can (and [has](https://github.com/Knetic/govaluate/releases/tag/v2.0.0)) happened. If you are using this in production, vendor the commit you've tested against, or use gopkg.in to redirect your import (e.g., `import "gopkg.in/Knetic/govaluate.v2"`). Master branch (while infrequent) _may_ at some point contain API breaking changes, and the author will have no way to communicate these to downstreams, other than creating a new major release. + +Releases will explicitly state when an API break happens, and if they do not specify an API break it should be safe to upgrade. + +License +-- + +This project is licensed under the MIT general use license. You're free to integrate, fork, and play with this code as you feel fit without consulting the author, as long as you provide proper credit to the author in your works. diff --git a/vendor/github.com/Knetic/govaluate/TokenKind.go b/vendor/github.com/Knetic/govaluate/TokenKind.go new file mode 100644 index 000000000..7c9516d2d --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/TokenKind.go @@ -0,0 +1,75 @@ +package govaluate + +/* + Represents all valid types of tokens that a token can be. +*/ +type TokenKind int + +const ( + UNKNOWN TokenKind = iota + + PREFIX + NUMERIC + BOOLEAN + STRING + PATTERN + TIME + VARIABLE + FUNCTION + SEPARATOR + ACCESSOR + + COMPARATOR + LOGICALOP + MODIFIER + + CLAUSE + CLAUSE_CLOSE + + TERNARY +) + +/* + GetTokenKindString returns a string that describes the given TokenKind. + e.g., when passed the NUMERIC TokenKind, this returns the string "NUMERIC". +*/ +func (kind TokenKind) String() string { + + switch kind { + + case PREFIX: + return "PREFIX" + case NUMERIC: + return "NUMERIC" + case BOOLEAN: + return "BOOLEAN" + case STRING: + return "STRING" + case PATTERN: + return "PATTERN" + case TIME: + return "TIME" + case VARIABLE: + return "VARIABLE" + case FUNCTION: + return "FUNCTION" + case SEPARATOR: + return "SEPARATOR" + case COMPARATOR: + return "COMPARATOR" + case LOGICALOP: + return "LOGICALOP" + case MODIFIER: + return "MODIFIER" + case CLAUSE: + return "CLAUSE" + case CLAUSE_CLOSE: + return "CLAUSE_CLOSE" + case TERNARY: + return "TERNARY" + case ACCESSOR: + return "ACCESSOR" + } + + return "UNKNOWN" +} diff --git a/vendor/github.com/Knetic/govaluate/evaluationStage.go b/vendor/github.com/Knetic/govaluate/evaluationStage.go new file mode 100644 index 000000000..11ea58724 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/evaluationStage.go @@ -0,0 +1,516 @@ +package govaluate + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strings" +) + +const ( + logicalErrorFormat string = "Value '%v' cannot be used with the logical operator '%v', it is not a bool" + modifierErrorFormat string = "Value '%v' cannot be used with the modifier '%v', it is not a number" + comparatorErrorFormat string = "Value '%v' cannot be used with the comparator '%v', it is not a number" + ternaryErrorFormat string = "Value '%v' cannot be used with the ternary operator '%v', it is not a bool" + prefixErrorFormat string = "Value '%v' cannot be used with the prefix '%v'" +) + +type evaluationOperator func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) +type stageTypeCheck func(value interface{}) bool +type stageCombinedTypeCheck func(left interface{}, right interface{}) bool + +type evaluationStage struct { + symbol OperatorSymbol + + leftStage, rightStage *evaluationStage + + // the operation that will be used to evaluate this stage (such as adding [left] to [right] and return the result) + operator evaluationOperator + + // ensures that both left and right values are appropriate for this stage. Returns an error if they aren't operable. + leftTypeCheck stageTypeCheck + rightTypeCheck stageTypeCheck + + // if specified, will override whatever is used in "leftTypeCheck" and "rightTypeCheck". + // primarily used for specific operators that don't care which side a given type is on, but still requires one side to be of a given type + // (like string concat) + typeCheck stageCombinedTypeCheck + + // regardless of which type check is used, this string format will be used as the error message for type errors + typeErrorFormat string +} + +var ( + _true = interface{}(true) + _false = interface{}(false) +) + +func (this *evaluationStage) swapWith(other *evaluationStage) { + + temp := *other + other.setToNonStage(*this) + this.setToNonStage(temp) +} + +func (this *evaluationStage) setToNonStage(other evaluationStage) { + + this.symbol = other.symbol + this.operator = other.operator + this.leftTypeCheck = other.leftTypeCheck + this.rightTypeCheck = other.rightTypeCheck + this.typeCheck = other.typeCheck + this.typeErrorFormat = other.typeErrorFormat +} + +func (this *evaluationStage) isShortCircuitable() bool { + + switch this.symbol { + case AND: + fallthrough + case OR: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + fallthrough + case COALESCE: + return true + } + + return false +} + +func noopStageRight(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return right, nil +} + +func addStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + // string concat if either are strings + if isString(left) || isString(right) { + return fmt.Sprintf("%v%v", left, right), nil + } + + return left.(float64) + right.(float64), nil +} +func subtractStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) - right.(float64), nil +} +func multiplyStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) * right.(float64), nil +} +func divideStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) / right.(float64), nil +} +func exponentStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Pow(left.(float64), right.(float64)), nil +} +func modulusStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Mod(left.(float64), right.(float64)), nil +} +func gteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) >= right.(string)), nil + } + return boolIface(left.(float64) >= right.(float64)), nil +} +func gtStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) > right.(string)), nil + } + return boolIface(left.(float64) > right.(float64)), nil +} +func lteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) <= right.(string)), nil + } + return boolIface(left.(float64) <= right.(float64)), nil +} +func ltStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) < right.(string)), nil + } + return boolIface(left.(float64) < right.(float64)), nil +} +func equalStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(reflect.DeepEqual(left, right)), nil +} +func notEqualStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!reflect.DeepEqual(left, right)), nil +} +func andStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) && right.(bool)), nil +} +func orStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) || right.(bool)), nil +} +func negateStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return -right.(float64), nil +} +func invertStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!right.(bool)), nil +} +func bitwiseNotStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(^int64(right.(float64))), nil +} +func ternaryIfStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left.(bool) { + return right, nil + } + return nil, nil +} +func ternaryElseStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left != nil { + return left, nil + } + return right, nil +} + +func regexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var pattern *regexp.Regexp + var err error + + switch right.(type) { + case string: + pattern, err = regexp.Compile(right.(string)) + if err != nil { + return nil, errors.New(fmt.Sprintf("Unable to compile regexp pattern '%v': %v", right, err)) + } + case *regexp.Regexp: + pattern = right.(*regexp.Regexp) + } + + return pattern.Match([]byte(left.(string))), nil +} + +func notRegexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + ret, err := regexStage(left, right, parameters) + if err != nil { + return nil, err + } + + return !(ret.(bool)), nil +} + +func bitwiseOrStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) | int64(right.(float64))), nil +} +func bitwiseAndStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) & int64(right.(float64))), nil +} +func bitwiseXORStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) ^ int64(right.(float64))), nil +} +func leftShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) << uint64(right.(float64))), nil +} +func rightShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) >> uint64(right.(float64))), nil +} + +func makeParameterStage(parameterName string) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + value, err := parameters.Get(parameterName) + if err != nil { + return nil, err + } + + return value, nil + } +} + +func makeLiteralStage(literal interface{}) evaluationOperator { + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return literal, nil + } +} + +func makeFunctionStage(function ExpressionFunction) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + if right == nil { + return function() + } + + switch right.(type) { + case []interface{}: + return function(right.([]interface{})...) + default: + return function(right) + } + } +} + +func typeConvertParam(p reflect.Value, t reflect.Type) (ret reflect.Value, err error) { + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Argument type conversion failed: failed to convert '%s' to '%s'", p.Kind().String(), t.Kind().String()) + err = errors.New(errorMsg) + ret = p + } + }() + + return p.Convert(t), nil +} + +func typeConvertParams(method reflect.Value, params []reflect.Value) ([]reflect.Value, error) { + + methodType := method.Type() + numIn := methodType.NumIn() + numParams := len(params) + + if numIn != numParams { + if numIn > numParams { + return nil, fmt.Errorf("Too few arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + return nil, fmt.Errorf("Too many arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + + for i := 0; i < numIn; i++ { + t := methodType.In(i) + p := params[i] + pt := p.Type() + + if t.Kind() != pt.Kind() { + np, err := typeConvertParam(p, t) + if err != nil { + return nil, err + } + params[i] = np + } + } + + return params, nil +} + +func makeAccessorStage(pair []string) evaluationOperator { + + reconstructed := strings.Join(pair, ".") + + return func(left interface{}, right interface{}, parameters Parameters) (ret interface{}, err error) { + + var params []reflect.Value + + value, err := parameters.Get(pair[0]) + if err != nil { + return nil, err + } + + // while this library generally tries to handle panic-inducing cases on its own, + // accessors are a sticky case which have a lot of possible ways to fail. + // therefore every call to an accessor sets up a defer that tries to recover from panics, converting them to errors. + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Failed to access '%s': %v", reconstructed, r.(string)) + err = errors.New(errorMsg) + ret = nil + } + }() + + for i := 1; i < len(pair); i++ { + + coreValue := reflect.ValueOf(value) + + var corePtrVal reflect.Value + + // if this is a pointer, resolve it. + if coreValue.Kind() == reflect.Ptr { + corePtrVal = coreValue + coreValue = coreValue.Elem() + } + + if coreValue.Kind() != reflect.Struct { + return nil, errors.New("Unable to access '" + pair[i] + "', '" + pair[i-1] + "' is not a struct") + } + + field := coreValue.FieldByName(pair[i]) + if field != (reflect.Value{}) { + value = field.Interface() + continue + } + + method := coreValue.MethodByName(pair[i]) + if method == (reflect.Value{}) { + if corePtrVal.IsValid() { + method = corePtrVal.MethodByName(pair[i]) + } + if method == (reflect.Value{}) { + return nil, errors.New("No method or field '" + pair[i] + "' present on parameter '" + pair[i-1] + "'") + } + } + + switch right.(type) { + case []interface{}: + + givenParams := right.([]interface{}) + params = make([]reflect.Value, len(givenParams)) + for idx, _ := range givenParams { + params[idx] = reflect.ValueOf(givenParams[idx]) + } + + default: + + if right == nil { + params = []reflect.Value{} + break + } + + params = []reflect.Value{reflect.ValueOf(right.(interface{}))} + } + + params, err = typeConvertParams(method, params) + + if err != nil { + return nil, errors.New("Method call failed - '" + pair[0] + "." + pair[1] + "': " + err.Error()) + } + + returned := method.Call(params) + retLength := len(returned) + + if retLength == 0 { + return nil, errors.New("Method call '" + pair[i-1] + "." + pair[i] + "' did not return any values.") + } + + if retLength == 1 { + + value = returned[0].Interface() + continue + } + + if retLength == 2 { + + errIface := returned[1].Interface() + err, validType := errIface.(error) + + if validType && errIface != nil { + return returned[0].Interface(), err + } + + value = returned[0].Interface() + continue + } + + return nil, errors.New("Method call '" + pair[0] + "." + pair[1] + "' did not return either one value, or a value and an error. Cannot interpret meaning.") + } + + value = castToFloat64(value) + return value, nil + } +} + +func separatorStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var ret []interface{} + + switch left.(type) { + case []interface{}: + ret = append(left.([]interface{}), right) + default: + ret = []interface{}{left, right} + } + + return ret, nil +} + +func inStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + for _, value := range right.([]interface{}) { + if left == value { + return true, nil + } + } + return false, nil +} + +// + +func isString(value interface{}) bool { + + switch value.(type) { + case string: + return true + } + return false +} + +func isRegexOrString(value interface{}) bool { + + switch value.(type) { + case string: + return true + case *regexp.Regexp: + return true + } + return false +} + +func isBool(value interface{}) bool { + switch value.(type) { + case bool: + return true + } + return false +} + +func isFloat64(value interface{}) bool { + switch value.(type) { + case float64: + return true + } + return false +} + +/* + Addition usually means between numbers, but can also mean string concat. + String concat needs one (or both) of the sides to be a string. +*/ +func additionTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if !isString(left) && !isString(right) { + return false + } + return true +} + +/* + Comparison can either be between numbers, or lexicographic between two strings, + but never between the two. +*/ +func comparatorTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if isString(left) && isString(right) { + return true + } + return false +} + +func isArray(value interface{}) bool { + switch value.(type) { + case []interface{}: + return true + } + return false +} + +/* + Converting a boolean to an interface{} requires an allocation. + We can use interned bools to avoid this cost. +*/ +func boolIface(b bool) interface{} { + if b { + return _true + } + return _false +} diff --git a/vendor/github.com/Knetic/govaluate/expressionFunctions.go b/vendor/github.com/Knetic/govaluate/expressionFunctions.go new file mode 100644 index 000000000..ac6592b3f --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/expressionFunctions.go @@ -0,0 +1,8 @@ +package govaluate + +/* + Represents a function that can be called from within an expression. + This method must return an error if, for any reason, it is unable to produce exactly one unambiguous result. + An error returned will halt execution of the expression. +*/ +type ExpressionFunction func(arguments ...interface{}) (interface{}, error) diff --git a/vendor/github.com/Knetic/govaluate/expressionOutputStream.go b/vendor/github.com/Knetic/govaluate/expressionOutputStream.go new file mode 100644 index 000000000..88a841639 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/expressionOutputStream.go @@ -0,0 +1,46 @@ +package govaluate + +import ( + "bytes" +) + +/* + Holds a series of "transactions" which represent each token as it is output by an outputter (such as ToSQLQuery()). + Some outputs (such as SQL) require a function call or non-c-like syntax to represent an expression. + To accomplish this, this struct keeps track of each translated token as it is output, and can return and rollback those transactions. +*/ +type expressionOutputStream struct { + transactions []string +} + +func (this *expressionOutputStream) add(transaction string) { + this.transactions = append(this.transactions, transaction) +} + +func (this *expressionOutputStream) rollback() string { + + index := len(this.transactions) - 1 + ret := this.transactions[index] + + this.transactions = this.transactions[:index] + return ret +} + +func (this *expressionOutputStream) createString(delimiter string) string { + + var retBuffer bytes.Buffer + var transaction string + + penultimate := len(this.transactions) - 1 + + for i := 0; i < penultimate; i++ { + + transaction = this.transactions[i] + + retBuffer.WriteString(transaction) + retBuffer.WriteString(delimiter) + } + retBuffer.WriteString(this.transactions[penultimate]) + + return retBuffer.String() +} diff --git a/vendor/github.com/Knetic/govaluate/lexerState.go b/vendor/github.com/Knetic/govaluate/lexerState.go new file mode 100644 index 000000000..6726e909e --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/lexerState.go @@ -0,0 +1,373 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +type lexerState struct { + isEOF bool + isNullable bool + kind TokenKind + validNextKinds []TokenKind +} + +// lexer states. +// Constant for all purposes except compiler. +var validLexerStates = []lexerState{ + + lexerState{ + kind: UNKNOWN, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + }, + }, + + lexerState{ + + kind: CLAUSE, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: CLAUSE_CLOSE, + isEOF: true, + isNullable: true, + validNextKinds: []TokenKind{ + + COMPARATOR, + MODIFIER, + NUMERIC, + BOOLEAN, + VARIABLE, + STRING, + PATTERN, + TIME, + CLAUSE, + CLAUSE_CLOSE, + LOGICALOP, + TERNARY, + SEPARATOR, + }, + }, + + lexerState{ + + kind: NUMERIC, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: BOOLEAN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: STRING, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: TIME, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: PATTERN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: VARIABLE, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: MODIFIER, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + BOOLEAN, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: COMPARATOR, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + PATTERN, + }, + }, + lexerState{ + + kind: LOGICALOP, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: PREFIX, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: TERNARY, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: FUNCTION, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + }, + }, + lexerState{ + + kind: ACCESSOR, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: SEPARATOR, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + }, + }, +} + +func (this lexerState) canTransitionTo(kind TokenKind) bool { + + for _, validKind := range this.validNextKinds { + + if validKind == kind { + return true + } + } + + return false +} + +func checkExpressionSyntax(tokens []ExpressionToken) error { + + var state lexerState + var lastToken ExpressionToken + var err error + + state = validLexerStates[0] + + for _, token := range tokens { + + if !state.canTransitionTo(token.Kind) { + + // call out a specific error for tokens looking like they want to be functions. + if lastToken.Kind == VARIABLE && token.Kind == CLAUSE { + return errors.New("Undefined function " + lastToken.Value.(string)) + } + + firstStateName := fmt.Sprintf("%s [%v]", state.kind.String(), lastToken.Value) + nextStateName := fmt.Sprintf("%s [%v]", token.Kind.String(), token.Value) + + return errors.New("Cannot transition token types from " + firstStateName + " to " + nextStateName) + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return err + } + + if !state.isNullable && token.Value == nil { + + errorMsg := fmt.Sprintf("Token kind '%v' cannot have a nil value", token.Kind.String()) + return errors.New(errorMsg) + } + + lastToken = token + } + + if !state.isEOF { + return errors.New("Unexpected end of expression") + } + return nil +} + +func getLexerStateForToken(kind TokenKind) (lexerState, error) { + + for _, possibleState := range validLexerStates { + + if possibleState.kind == kind { + return possibleState, nil + } + } + + errorMsg := fmt.Sprintf("No lexer state found for token kind '%v'\n", kind.String()) + return validLexerStates[0], errors.New(errorMsg) +} diff --git a/vendor/github.com/Knetic/govaluate/lexerStream.go b/vendor/github.com/Knetic/govaluate/lexerStream.go new file mode 100644 index 000000000..b72e6bdb1 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/lexerStream.go @@ -0,0 +1,39 @@ +package govaluate + +type lexerStream struct { + source []rune + position int + length int +} + +func newLexerStream(source string) *lexerStream { + + var ret *lexerStream + var runes []rune + + for _, character := range source { + runes = append(runes, character) + } + + ret = new(lexerStream) + ret.source = runes + ret.length = len(runes) + return ret +} + +func (this *lexerStream) readCharacter() rune { + + var character rune + + character = this.source[this.position] + this.position += 1 + return character +} + +func (this *lexerStream) rewind(amount int) { + this.position -= amount +} + +func (this lexerStream) canRead() bool { + return this.position < this.length +} diff --git a/vendor/github.com/Knetic/govaluate/parameters.go b/vendor/github.com/Knetic/govaluate/parameters.go new file mode 100644 index 000000000..6c5b9ecb5 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/parameters.go @@ -0,0 +1,32 @@ +package govaluate + +import ( + "errors" +) + +/* + Parameters is a collection of named parameters that can be used by an EvaluableExpression to retrieve parameters + when an expression tries to use them. +*/ +type Parameters interface { + + /* + Get gets the parameter of the given name, or an error if the parameter is unavailable. + Failure to find the given parameter should be indicated by returning an error. + */ + Get(name string) (interface{}, error) +} + +type MapParameters map[string]interface{} + +func (p MapParameters) Get(name string) (interface{}, error) { + + value, found := p[name] + + if !found { + errorMessage := "No parameter '" + name + "' found." + return nil, errors.New(errorMessage) + } + + return value, nil +} diff --git a/vendor/github.com/Knetic/govaluate/parsing.go b/vendor/github.com/Knetic/govaluate/parsing.go new file mode 100644 index 000000000..40c7ed2c4 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/parsing.go @@ -0,0 +1,526 @@ +package govaluate + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + "unicode" +) + +func parseTokens(expression string, functions map[string]ExpressionFunction) ([]ExpressionToken, error) { + + var ret []ExpressionToken + var token ExpressionToken + var stream *lexerStream + var state lexerState + var err error + var found bool + + stream = newLexerStream(expression) + state = validLexerStates[0] + + for stream.canRead() { + + token, err, found = readToken(stream, state, functions) + + if err != nil { + return ret, err + } + + if !found { + break + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return ret, err + } + + // append this valid token + ret = append(ret, token) + } + + err = checkBalance(ret) + if err != nil { + return nil, err + } + + return ret, nil +} + +func readToken(stream *lexerStream, state lexerState, functions map[string]ExpressionFunction) (ExpressionToken, error, bool) { + + var function ExpressionFunction + var ret ExpressionToken + var tokenValue interface{} + var tokenTime time.Time + var tokenString string + var kind TokenKind + var character rune + var found bool + var completed bool + var err error + + // numeric is 0-9, or . or 0x followed by digits + // string starts with ' + // variable is alphanumeric, always starts with a letter + // bracket always means variable + // symbols are anything non-alphanumeric + // all others read into a buffer until they reach the end of the stream + for stream.canRead() { + + character = stream.readCharacter() + + if unicode.IsSpace(character) { + continue + } + + kind = UNKNOWN + + // numeric constant + if isNumeric(character) { + + if stream.canRead() && character == '0' { + character = stream.readCharacter() + + if stream.canRead() && character == 'x' { + tokenString, _ = readUntilFalse(stream, false, true, true, isHexDigit) + tokenValueInt, err := strconv.ParseUint(tokenString, 16, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse hex value '%v' to uint64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = NUMERIC + tokenValue = float64(tokenValueInt) + break + } else { + stream.rewind(1) + } + } + + tokenString = readTokenUntilFalse(stream, isNumeric) + tokenValue, err = strconv.ParseFloat(tokenString, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse numeric value '%v' to float64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + kind = NUMERIC + break + } + + // comma, separator + if character == ',' { + + tokenValue = "," + kind = SEPARATOR + break + } + + // escaped variable + if character == '[' { + + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotClosingBracket) + kind = VARIABLE + + if !completed { + return ExpressionToken{}, errors.New("Unclosed parameter bracket"), false + } + + // above method normally rewinds us to the closing bracket, which we want to skip. + stream.rewind(-1) + break + } + + // regular variable - or function? + if unicode.IsLetter(character) { + + tokenString = readTokenUntilFalse(stream, isVariableName) + + tokenValue = tokenString + kind = VARIABLE + + // boolean? + if tokenValue == "true" { + + kind = BOOLEAN + tokenValue = true + } else { + + if tokenValue == "false" { + + kind = BOOLEAN + tokenValue = false + } + } + + // textual operator? + if tokenValue == "in" || tokenValue == "IN" { + + // force lower case for consistency + tokenValue = "in" + kind = COMPARATOR + } + + // function? + function, found = functions[tokenString] + if found { + kind = FUNCTION + tokenValue = function + } + + // accessor? + accessorIndex := strings.Index(tokenString, ".") + if accessorIndex > 0 { + + // check that it doesn't end with a hanging period + if tokenString[len(tokenString)-1] == '.' { + errorMsg := fmt.Sprintf("Hanging accessor on token '%s'", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = ACCESSOR + splits := strings.Split(tokenString, ".") + tokenValue = splits + + // check that none of them are unexported + for i := 1; i < len(splits); i++ { + + firstCharacter := getFirstRune(splits[i]) + + if unicode.ToUpper(firstCharacter) != firstCharacter { + errorMsg := fmt.Sprintf("Unable to access unexported field '%s' in token '%s'", splits[i], tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + } + } + break + } + + if !isNotQuote(character) { + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotQuote) + + if !completed { + return ExpressionToken{}, errors.New("Unclosed string literal"), false + } + + // advance the stream one position, since reading until false assumes the terminator is a real token + stream.rewind(-1) + + // check to see if this can be parsed as a time. + tokenTime, found = tryParseTime(tokenValue.(string)) + if found { + kind = TIME + tokenValue = tokenTime + } else { + kind = STRING + } + break + } + + if character == '(' { + tokenValue = character + kind = CLAUSE + break + } + + if character == ')' { + tokenValue = character + kind = CLAUSE_CLOSE + break + } + + // must be a known symbol + tokenString = readTokenUntilFalse(stream, isNotAlphanumeric) + tokenValue = tokenString + + // quick hack for the case where "-" can mean "prefixed negation" or "minus", which are used + // very differently. + if state.canTransitionTo(PREFIX) { + _, found = prefixSymbols[tokenString] + if found { + + kind = PREFIX + break + } + } + _, found = modifierSymbols[tokenString] + if found { + + kind = MODIFIER + break + } + + _, found = logicalSymbols[tokenString] + if found { + + kind = LOGICALOP + break + } + + _, found = comparatorSymbols[tokenString] + if found { + + kind = COMPARATOR + break + } + + _, found = ternarySymbols[tokenString] + if found { + + kind = TERNARY + break + } + + errorMessage := fmt.Sprintf("Invalid token: '%s'", tokenString) + return ret, errors.New(errorMessage), false + } + + ret.Kind = kind + ret.Value = tokenValue + + return ret, nil, (kind != UNKNOWN) +} + +func readTokenUntilFalse(stream *lexerStream, condition func(rune) bool) string { + + var ret string + + stream.rewind(1) + ret, _ = readUntilFalse(stream, false, true, true, condition) + return ret +} + +/* + Returns the string that was read until the given [condition] was false, or whitespace was broken. + Returns false if the stream ended before whitespace was broken or condition was met. +*/ +func readUntilFalse(stream *lexerStream, includeWhitespace bool, breakWhitespace bool, allowEscaping bool, condition func(rune) bool) (string, bool) { + + var tokenBuffer bytes.Buffer + var character rune + var conditioned bool + + conditioned = false + + for stream.canRead() { + + character = stream.readCharacter() + + // Use backslashes to escape anything + if allowEscaping && character == '\\' { + + character = stream.readCharacter() + tokenBuffer.WriteString(string(character)) + continue + } + + if unicode.IsSpace(character) { + + if breakWhitespace && tokenBuffer.Len() > 0 { + conditioned = true + break + } + if !includeWhitespace { + continue + } + } + + if condition(character) { + tokenBuffer.WriteString(string(character)) + } else { + conditioned = true + stream.rewind(1) + break + } + } + + return tokenBuffer.String(), conditioned +} + +/* + Checks to see if any optimizations can be performed on the given [tokens], which form a complete, valid expression. + The returns slice will represent the optimized (or unmodified) list of tokens to use. +*/ +func optimizeTokens(tokens []ExpressionToken) ([]ExpressionToken, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var err error + var index int + + for index, token = range tokens { + + // if we find a regex operator, and the right-hand value is a constant, precompile and replace with a pattern. + if token.Kind != COMPARATOR { + continue + } + + symbol = comparatorSymbols[token.Value.(string)] + if symbol != REQ && symbol != NREQ { + continue + } + + index++ + token = tokens[index] + if token.Kind == STRING { + + token.Kind = PATTERN + token.Value, err = regexp.Compile(token.Value.(string)) + + if err != nil { + return tokens, err + } + + tokens[index] = token + } + } + return tokens, nil +} + +/* + Checks the balance of tokens which have multiple parts, such as parenthesis. +*/ +func checkBalance(tokens []ExpressionToken) error { + + var stream *tokenStream + var token ExpressionToken + var parens int + + stream = newTokenStream(tokens) + + for stream.hasNext() { + + token = stream.next() + if token.Kind == CLAUSE { + parens++ + continue + } + if token.Kind == CLAUSE_CLOSE { + parens-- + continue + } + } + + if parens != 0 { + return errors.New("Unbalanced parenthesis") + } + return nil +} + +func isDigit(character rune) bool { + return unicode.IsDigit(character) +} + +func isHexDigit(character rune) bool { + + character = unicode.ToLower(character) + + return unicode.IsDigit(character) || + character == 'a' || + character == 'b' || + character == 'c' || + character == 'd' || + character == 'e' || + character == 'f' +} + +func isNumeric(character rune) bool { + + return unicode.IsDigit(character) || character == '.' +} + +func isNotQuote(character rune) bool { + + return character != '\'' && character != '"' +} + +func isNotAlphanumeric(character rune) bool { + + return !(unicode.IsDigit(character) || + unicode.IsLetter(character) || + character == '(' || + character == ')' || + character == '[' || + character == ']' || // starting to feel like there needs to be an `isOperation` func (#59) + !isNotQuote(character)) +} + +func isVariableName(character rune) bool { + + return unicode.IsLetter(character) || + unicode.IsDigit(character) || + character == '_' || + character == '.' +} + +func isNotClosingBracket(character rune) bool { + + return character != ']' +} + +/* + Attempts to parse the [candidate] as a Time. + Tries a series of standardized date formats, returns the Time if one applies, + otherwise returns false through the second return. +*/ +func tryParseTime(candidate string) (time.Time, bool) { + + var ret time.Time + var found bool + + timeFormats := [...]string{ + time.ANSIC, + time.UnixDate, + time.RubyDate, + time.Kitchen, + time.RFC3339, + time.RFC3339Nano, + "2006-01-02", // RFC 3339 + "2006-01-02 15:04", // RFC 3339 with minutes + "2006-01-02 15:04:05", // RFC 3339 with seconds + "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone + "2006-01-02T15Z0700", // ISO8601 with hour + "2006-01-02T15:04Z0700", // ISO8601 with minutes + "2006-01-02T15:04:05Z0700", // ISO8601 with seconds + "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds + } + + for _, format := range timeFormats { + + ret, found = tryParseExactTime(candidate, format) + if found { + return ret, true + } + } + + return time.Now(), false +} + +func tryParseExactTime(candidate string, format string) (time.Time, bool) { + + var ret time.Time + var err error + + ret, err = time.ParseInLocation(format, candidate, time.Local) + if err != nil { + return time.Now(), false + } + + return ret, true +} + +func getFirstRune(candidate string) rune { + + for _, character := range candidate { + return character + } + + return 0 +} diff --git a/vendor/github.com/Knetic/govaluate/sanitizedParameters.go b/vendor/github.com/Knetic/govaluate/sanitizedParameters.go new file mode 100644 index 000000000..28bd795d9 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/sanitizedParameters.go @@ -0,0 +1,43 @@ +package govaluate + +// sanitizedParameters is a wrapper for Parameters that does sanitization as +// parameters are accessed. +type sanitizedParameters struct { + orig Parameters +} + +func (p sanitizedParameters) Get(key string) (interface{}, error) { + value, err := p.orig.Get(key) + if err != nil { + return nil, err + } + + return castToFloat64(value), nil +} + +func castToFloat64(value interface{}) interface{} { + switch value.(type) { + case uint8: + return float64(value.(uint8)) + case uint16: + return float64(value.(uint16)) + case uint32: + return float64(value.(uint32)) + case uint64: + return float64(value.(uint64)) + case int8: + return float64(value.(int8)) + case int16: + return float64(value.(int16)) + case int32: + return float64(value.(int32)) + case int64: + return float64(value.(int64)) + case int: + return float64(value.(int)) + case float32: + return float64(value.(float32)) + } + + return value +} diff --git a/vendor/github.com/Knetic/govaluate/stagePlanner.go b/vendor/github.com/Knetic/govaluate/stagePlanner.go new file mode 100644 index 000000000..d71ed129d --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/stagePlanner.go @@ -0,0 +1,724 @@ +package govaluate + +import ( + "errors" + "fmt" + "time" +) + +var stageSymbolMap = map[OperatorSymbol]evaluationOperator{ + EQ: equalStage, + NEQ: notEqualStage, + GT: gtStage, + LT: ltStage, + GTE: gteStage, + LTE: lteStage, + REQ: regexStage, + NREQ: notRegexStage, + AND: andStage, + OR: orStage, + IN: inStage, + BITWISE_OR: bitwiseOrStage, + BITWISE_AND: bitwiseAndStage, + BITWISE_XOR: bitwiseXORStage, + BITWISE_LSHIFT: leftShiftStage, + BITWISE_RSHIFT: rightShiftStage, + PLUS: addStage, + MINUS: subtractStage, + MULTIPLY: multiplyStage, + DIVIDE: divideStage, + MODULUS: modulusStage, + EXPONENT: exponentStage, + NEGATE: negateStage, + INVERT: invertStage, + BITWISE_NOT: bitwiseNotStage, + TERNARY_TRUE: ternaryIfStage, + TERNARY_FALSE: ternaryElseStage, + COALESCE: ternaryElseStage, + SEPARATE: separatorStage, +} + +/* + A "precedent" is a function which will recursively parse new evaluateionStages from a given stream of tokens. + It's called a `precedent` because it is expected to handle exactly what precedence of operator, + and defer to other `precedent`s for other operators. +*/ +type precedent func(stream *tokenStream) (*evaluationStage, error) + +/* + A convenience function for specifying the behavior of a `precedent`. + Most `precedent` functions can be described by the same function, just with different type checks, symbols, and error formats. + This struct is passed to `makePrecedentFromPlanner` to create a `precedent` function. +*/ +type precedencePlanner struct { + validSymbols map[string]OperatorSymbol + validKinds []TokenKind + + typeErrorFormat string + + next precedent + nextRight precedent +} + +var planPrefix precedent +var planExponential precedent +var planMultiplicative precedent +var planAdditive precedent +var planBitwise precedent +var planShift precedent +var planComparator precedent +var planLogicalAnd precedent +var planLogicalOr precedent +var planTernary precedent +var planSeparator precedent + +func init() { + + // all these stages can use the same code (in `planPrecedenceLevel`) to execute, + // they simply need different type checks, symbols, and recursive precedents. + // While not all precedent phases are listed here, most can be represented this way. + planPrefix = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: prefixSymbols, + validKinds: []TokenKind{PREFIX}, + typeErrorFormat: prefixErrorFormat, + nextRight: planFunction, + }) + planExponential = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: exponentialSymbolsS, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planFunction, + }) + planMultiplicative = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: multiplicativeSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planExponential, + }) + planAdditive = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: additiveSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planMultiplicative, + }) + planShift = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseShiftSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planAdditive, + }) + planBitwise = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planShift, + }) + planComparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: comparatorSymbols, + validKinds: []TokenKind{COMPARATOR}, + typeErrorFormat: comparatorErrorFormat, + next: planBitwise, + }) + planLogicalAnd = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"&&": AND}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planComparator, + }) + planLogicalOr = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"||": OR}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planLogicalAnd, + }) + planTernary = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: ternarySymbols, + validKinds: []TokenKind{TERNARY}, + typeErrorFormat: ternaryErrorFormat, + next: planLogicalOr, + }) + planSeparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: separatorSymbols, + validKinds: []TokenKind{SEPARATOR}, + next: planTernary, + }) +} + +/* + Given a planner, creates a function which will evaluate a specific precedence level of operators, + and link it to other `precedent`s which recurse to parse other precedence levels. +*/ +func makePrecedentFromPlanner(planner *precedencePlanner) precedent { + + var generated precedent + var nextRight precedent + + generated = func(stream *tokenStream) (*evaluationStage, error) { + return planPrecedenceLevel( + stream, + planner.typeErrorFormat, + planner.validSymbols, + planner.validKinds, + nextRight, + planner.next, + ) + } + + if planner.nextRight != nil { + nextRight = planner.nextRight + } else { + nextRight = generated + } + + return generated +} + +/* + Creates a `evaluationStageList` object which represents an execution plan (or tree) + which is used to completely evaluate a set of tokens at evaluation-time. + The three stages of evaluation can be thought of as parsing strings to tokens, then tokens to a stage list, then evaluation with parameters. +*/ +func planStages(tokens []ExpressionToken) (*evaluationStage, error) { + + stream := newTokenStream(tokens) + + stage, err := planTokens(stream) + if err != nil { + return nil, err + } + + // while we're now fully-planned, we now need to re-order same-precedence operators. + // this could probably be avoided with a different planning method + reorderStages(stage) + + stage = elideLiterals(stage) + return stage, nil +} + +func planTokens(stream *tokenStream) (*evaluationStage, error) { + + if !stream.hasNext() { + return nil, nil + } + + return planSeparator(stream) +} + +/* + The most usual method of parsing an evaluation stage for a given precedence. + Most stages use the same logic +*/ +func planPrecedenceLevel( + stream *tokenStream, + typeErrorFormat string, + validSymbols map[string]OperatorSymbol, + validKinds []TokenKind, + rightPrecedent precedent, + leftPrecedent precedent) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var leftStage, rightStage *evaluationStage + var checks typeChecks + var err error + var keyFound bool + + if leftPrecedent != nil { + + leftStage, err = leftPrecedent(stream) + if err != nil { + return nil, err + } + } + + for stream.hasNext() { + + token = stream.next() + + if len(validKinds) > 0 { + + keyFound = false + for _, kind := range validKinds { + if kind == token.Kind { + keyFound = true + break + } + } + + if !keyFound { + break + } + } + + if validSymbols != nil { + + if !isString(token.Value) { + break + } + + symbol, keyFound = validSymbols[token.Value.(string)] + if !keyFound { + break + } + } + + if rightPrecedent != nil { + rightStage, err = rightPrecedent(stream) + if err != nil { + return nil, err + } + } + + checks = findTypeChecks(symbol) + + return &evaluationStage{ + + symbol: symbol, + leftStage: leftStage, + rightStage: rightStage, + operator: stageSymbolMap[symbol], + + leftTypeCheck: checks.left, + rightTypeCheck: checks.right, + typeCheck: checks.combined, + typeErrorFormat: typeErrorFormat, + }, nil + } + + stream.rewind() + return leftStage, nil +} + +/* + A special case where functions need to be of higher precedence than values, and need a special wrapped execution stage operator. +*/ +func planFunction(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var rightStage *evaluationStage + var err error + + token = stream.next() + + if token.Kind != FUNCTION { + stream.rewind() + return planAccessor(stream) + } + + rightStage, err = planAccessor(stream) + if err != nil { + return nil, err + } + + return &evaluationStage{ + + symbol: FUNCTIONAL, + rightStage: rightStage, + operator: makeFunctionStage(token.Value.(ExpressionFunction)), + typeErrorFormat: "Unable to run function '%v': %v", + }, nil +} + +func planAccessor(stream *tokenStream) (*evaluationStage, error) { + + var token, otherToken ExpressionToken + var rightStage *evaluationStage + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + if token.Kind != ACCESSOR { + stream.rewind() + return planValue(stream) + } + + // check if this is meant to be a function or a field. + // fields have a clause next to them, functions do not. + // if it's a function, parse the arguments. Otherwise leave the right stage null. + if stream.hasNext() { + + otherToken = stream.next() + if otherToken.Kind == CLAUSE { + + stream.rewind() + + rightStage, err = planTokens(stream) + if err != nil { + return nil, err + } + } else { + stream.rewind() + } + } + + return &evaluationStage{ + + symbol: ACCESS, + rightStage: rightStage, + operator: makeAccessorStage(token.Value.([]string)), + typeErrorFormat: "Unable to access parameter field or method '%v': %v", + }, nil +} + +/* + A truly special precedence function, this handles all the "lowest-case" errata of the process, including literals, parmeters, + clauses, and prefixes. +*/ +func planValue(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var ret *evaluationStage + var operator evaluationOperator + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + switch token.Kind { + + case CLAUSE: + + ret, err = planTokens(stream) + if err != nil { + return nil, err + } + + // advance past the CLAUSE_CLOSE token. We know that it's a CLAUSE_CLOSE, because at parse-time we check for unbalanced parens. + stream.next() + + // the stage we got represents all of the logic contained within the parens + // but for technical reasons, we need to wrap this stage in a "noop" stage which breaks long chains of precedence. + // see github #33. + ret = &evaluationStage{ + rightStage: ret, + operator: noopStageRight, + symbol: NOOP, + } + + return ret, nil + + case CLAUSE_CLOSE: + + // when functions have empty params, this will be hit. In this case, we don't have any evaluation stage to do, + // so we just return nil so that the stage planner continues on its way. + stream.rewind() + return nil, nil + + case VARIABLE: + operator = makeParameterStage(token.Value.(string)) + + case NUMERIC: + fallthrough + case STRING: + fallthrough + case PATTERN: + fallthrough + case BOOLEAN: + symbol = LITERAL + operator = makeLiteralStage(token.Value) + case TIME: + symbol = LITERAL + operator = makeLiteralStage(float64(token.Value.(time.Time).Unix())) + + case PREFIX: + stream.rewind() + return planPrefix(stream) + } + + if operator == nil { + errorMsg := fmt.Sprintf("Unable to plan token kind: '%s', value: '%v'", token.Kind.String(), token.Value) + return nil, errors.New(errorMsg) + } + + return &evaluationStage{ + symbol: symbol, + operator: operator, + }, nil +} + +/* + Convenience function to pass a triplet of typechecks between `findTypeChecks` and `planPrecedenceLevel`. + Each of these members may be nil, which indicates that type does not matter for that value. +*/ +type typeChecks struct { + left stageTypeCheck + right stageTypeCheck + combined stageCombinedTypeCheck +} + +/* + Maps a given [symbol] to a set of typechecks to be used during runtime. +*/ +func findTypeChecks(symbol OperatorSymbol) typeChecks { + + switch symbol { + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + return typeChecks{ + combined: comparatorTypeCheck, + } + case REQ: + fallthrough + case NREQ: + return typeChecks{ + left: isString, + right: isRegexOrString, + } + case AND: + fallthrough + case OR: + return typeChecks{ + left: isBool, + right: isBool, + } + case IN: + return typeChecks{ + right: isArray, + } + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_AND: + fallthrough + case BITWISE_XOR: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case PLUS: + return typeChecks{ + combined: additionTypeCheck, + } + case MINUS: + fallthrough + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + fallthrough + case EXPONENT: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case NEGATE: + return typeChecks{ + right: isFloat64, + } + case INVERT: + return typeChecks{ + right: isBool, + } + case BITWISE_NOT: + return typeChecks{ + right: isFloat64, + } + case TERNARY_TRUE: + return typeChecks{ + left: isBool, + } + + // unchecked cases + case EQ: + fallthrough + case NEQ: + return typeChecks{} + case TERNARY_FALSE: + fallthrough + case COALESCE: + fallthrough + default: + return typeChecks{} + } +} + +/* + During stage planning, stages of equal precedence are parsed such that they'll be evaluated in reverse order. + For commutative operators like "+" or "-", it's no big deal. But for order-specific operators, it ruins the expected result. +*/ +func reorderStages(rootStage *evaluationStage) { + + // traverse every rightStage until we find multiples in a row of the same precedence. + var identicalPrecedences []*evaluationStage + var currentStage, nextStage *evaluationStage + var precedence, currentPrecedence operatorPrecedence + + nextStage = rootStage + precedence = findOperatorPrecedenceForSymbol(rootStage.symbol) + + for nextStage != nil { + + currentStage = nextStage + nextStage = currentStage.rightStage + + // left depth first, since this entire method only looks for precedences down the right side of the tree + if currentStage.leftStage != nil { + reorderStages(currentStage.leftStage) + } + + currentPrecedence = findOperatorPrecedenceForSymbol(currentStage.symbol) + + if currentPrecedence == precedence { + identicalPrecedences = append(identicalPrecedences, currentStage) + continue + } + + // precedence break. + // See how many in a row we had, and reorder if there's more than one. + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } + + identicalPrecedences = []*evaluationStage{currentStage} + precedence = currentPrecedence + } + + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } +} + +/* + Performs a "mirror" on a subtree of stages. + This mirror functionally inverts the order of execution for all members of the [stages] list. + That list is assumed to be a root-to-leaf (ordered) list of evaluation stages, where each is a right-hand stage of the last. +*/ +func mirrorStageSubtree(stages []*evaluationStage) { + + var rootStage, inverseStage, carryStage, frontStage *evaluationStage + + stagesLength := len(stages) + + // reverse all right/left + for _, frontStage = range stages { + + carryStage = frontStage.rightStage + frontStage.rightStage = frontStage.leftStage + frontStage.leftStage = carryStage + } + + // end left swaps with root right + rootStage = stages[0] + frontStage = stages[stagesLength-1] + + carryStage = frontStage.leftStage + frontStage.leftStage = rootStage.rightStage + rootStage.rightStage = carryStage + + // for all non-root non-end stages, right is swapped with inverse stage right in list + for i := 0; i < (stagesLength-2)/2+1; i++ { + + frontStage = stages[i+1] + inverseStage = stages[stagesLength-i-1] + + carryStage = frontStage.rightStage + frontStage.rightStage = inverseStage.rightStage + inverseStage.rightStage = carryStage + } + + // swap all other information with inverse stages + for i := 0; i < stagesLength/2; i++ { + + frontStage = stages[i] + inverseStage = stages[stagesLength-i-1] + frontStage.swapWith(inverseStage) + } +} + +/* + Recurses through all operators in the entire tree, eliding operators where both sides are literals. +*/ +func elideLiterals(root *evaluationStage) *evaluationStage { + + if root.leftStage != nil { + root.leftStage = elideLiterals(root.leftStage) + } + + if root.rightStage != nil { + root.rightStage = elideLiterals(root.rightStage) + } + + return elideStage(root) +} + +/* + Elides a specific stage, if possible. + Returns the unmodified [root] stage if it cannot or should not be elided. + Otherwise, returns a new stage representing the condensed value from the elided stages. +*/ +func elideStage(root *evaluationStage) *evaluationStage { + + var leftValue, rightValue, result interface{} + var err error + + // right side must be a non-nil value. Left side must be nil or a value. + if root.rightStage == nil || + root.rightStage.symbol != LITERAL || + root.leftStage == nil || + root.leftStage.symbol != LITERAL { + return root + } + + // don't elide some operators + switch root.symbol { + case SEPARATE: + fallthrough + case IN: + return root + } + + // both sides are values, get their actual values. + // errors should be near-impossible here. If we encounter them, just abort this optimization. + leftValue, err = root.leftStage.operator(nil, nil, nil) + if err != nil { + return root + } + + rightValue, err = root.rightStage.operator(nil, nil, nil) + if err != nil { + return root + } + + // typcheck, since the grammar checker is a bit loose with which operator symbols go together. + err = typeCheck(root.leftTypeCheck, leftValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + err = typeCheck(root.rightTypeCheck, rightValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + if root.typeCheck != nil && !root.typeCheck(leftValue, rightValue) { + return root + } + + // pre-calculate, and return a new stage representing the result. + result, err = root.operator(leftValue, rightValue, nil) + if err != nil { + return root + } + + return &evaluationStage{ + symbol: LITERAL, + operator: makeLiteralStage(result), + } +} diff --git a/vendor/github.com/Knetic/govaluate/test.sh b/vendor/github.com/Knetic/govaluate/test.sh new file mode 100644 index 000000000..11aa8b332 --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Script that runs tests, code coverage, and benchmarks all at once. +# Builds a symlink in /tmp, mostly to avoid messing with GOPATH at the user's shell level. + +TEMPORARY_PATH="/tmp/govaluate_test" +SRC_PATH="${TEMPORARY_PATH}/src" +FULL_PATH="${TEMPORARY_PATH}/src/govaluate" + +# set up temporary directory +rm -rf "${FULL_PATH}" +mkdir -p "${SRC_PATH}" + +ln -s $(pwd) "${FULL_PATH}" +export GOPATH="${TEMPORARY_PATH}" + +pushd "${TEMPORARY_PATH}/src/govaluate" + +# run the actual tests. +export GOVALUATE_TORTURE_TEST="true" +go test -bench=. -benchmem #-coverprofile coverage.out +status=$? + +if [ "${status}" != 0 ]; +then + exit $status +fi + +# coverage +# disabled because travis go1.4 seems not to support it suddenly? +#go tool cover -func=coverage.out + +popd diff --git a/vendor/github.com/Knetic/govaluate/tokenStream.go b/vendor/github.com/Knetic/govaluate/tokenStream.go new file mode 100644 index 000000000..d0029209d --- /dev/null +++ b/vendor/github.com/Knetic/govaluate/tokenStream.go @@ -0,0 +1,36 @@ +package govaluate + +type tokenStream struct { + tokens []ExpressionToken + index int + tokenLength int +} + +func newTokenStream(tokens []ExpressionToken) *tokenStream { + + var ret *tokenStream + + ret = new(tokenStream) + ret.tokens = tokens + ret.tokenLength = len(tokens) + return ret +} + +func (this *tokenStream) rewind() { + this.index -= 1 +} + +func (this *tokenStream) next() ExpressionToken { + + var token ExpressionToken + + token = this.tokens[this.index] + + this.index += 1 + return token +} + +func (this tokenStream) hasNext() bool { + + return this.index < this.tokenLength +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore new file mode 100644 index 000000000..ef8fdbd5f --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/.gitignore @@ -0,0 +1,28 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +coverage.html +coverage.txt + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Intellij +*.iml +.idea/ + diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md new file mode 100644 index 000000000..2b5df641e --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CHANGELOG.md @@ -0,0 +1,102 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [v0.5.0] 2024-04-21 + +### Changed + +- opentelemetry updated to 1.25.0 + +### Removed + +- Drop support for [Go 1.20](https://go.dev/doc/go1.20) + +## [v0.4.3] 2023-11-02 + +### Fixed + +- fix: race on batch processing (#30) + +## [v0.4.2] 2023-10-30 + +### Fixed + +- fix: accept any 2xx status code in otlplogshttp client (#26) +- fix: show the error body when status code is unknown (#27) +- fix: grpc rapid-reset vulnerability (#28) + +## [v0.4.1] 2023-10-13 + +### Fixed + +- autoconfiguration always emit error message on initialization (#23) +- fix variables and private methods names (#22) +- merge the logRecord resources with those provided by the logProvider (#21) + +## [v0.4.0] 2023-10-02 + +### Changed + +- opentelemetry updated to 1.19.0 +- drop compatibility guarantee of Go [1.19](https://go.dev/doc/go1.19) + +## [v0.3.0] 2023-09-13 + +### Changed + +- opentelemetry updated to 1.18.0 + +### Fixed + +- stdoutlogs writer parameter was ignored + +## [v0.2.0] 2023-08-30 + +### Changed + +- opentelemetry updated to 1.17.0 +- `github.com/golang/protobuf/proto` replaced with `google.golang.org/protobuf` +- `otlp/internal` package moved to `otlp/otlplogs/internal` +- more unit tests added + +## [v0.1.2] 2023-08-05 + +### Fixed + +- reverted to all-in-one package +- inconsistent v0.1.0 go package + +## [v0.1.0] 2023-08-05 + +### Added + +- otlplogsgrpc exporter with `grpc` protocol +- `http/json` protocol supported in otlplogshttp exporter +- `stdout` logs logger +- Package split into separate `otel`, `sdk`, `exporters/otlp/otlplogs` and `exporters/stdout/stdoutlogs` packages +- `OTEL_EXPORTER_OTLP_PROTOCOL` env variable to configure `grpc`, `http/protobuf` and `http/json` otlp formats with OTEL + logs exporter +- `autoconfigure` sdk package with `OTEL_LOGS_EXPORTER` env variable support with `none`,`otlp` and `logging` options to + autoconfigure logger provider + +## [v0.0.1] 2023-07-25 + +### Added + +- implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api) with Stable API and SDK + API interfaces. +- Package all-in-one for logs `github.com/agoda-com/opentelemetry-logs-go` +- Package module `semconv` + with [Logs Exceptions Semantic Conventions](https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes) +- Package module `logs` + with [Stable Log Model](https://opentelemetry.io/docs/specs/otel/logs/data-model), [Logger](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#logger) + and [LoggerProvider](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider) interfaces +- Package module `sdk` with [Logger SDK](https://opentelemetry.io/docs/specs/otel/logs/sdk/) implementation +- Package module `exporters` + with [Built-in processors](https://opentelemetry.io/docs/specs/otel/logs/sdk/#built-in-processors), `otlp` interface + and `noop` and `http/protobuf` exporters diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS new file mode 100644 index 000000000..9f8212f99 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/CODEOWNERS @@ -0,0 +1,13 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @chameleon82 + +CODEOWNERS @chameleon82 diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile new file mode 100644 index 000000000..df59b790c --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/Makefile @@ -0,0 +1,9 @@ +.PHONY: test-coverage + +test-coverage: + go test -coverprofile=coverage.out ./... + go tool cover -html=coverage.out -o coverage.html + +.PHONY: test-race +test-race: + go test -race ./... diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md new file mode 100644 index 000000000..69ff62d10 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/README.md @@ -0,0 +1,121 @@ +# OpenTelemetry-Logs-Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/agoda-com/opentelemetry-logs-go.svg)](https://pkg.go.dev/github.com/agoda-com/opentelemetry-logs-go) +[![codecov](https://codecov.io/github/agoda-com/opentelemetry-logs-go/graph/badge.svg?token=F1NW0R0W75)](https://codecov.io/github/agoda-com/opentelemetry-logs-go) + +OpenTelemetry-Logs-Go is the [Go](https://golang.org) implementation of [OpenTelemetry](https://opentelemetry.io/) Logs. +It provides API to directly send logging data to observability platforms. It is an extension of official +[open-telemetry/opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) to support Logs. + +## Project Life Cycle + +This project was created due log module freeze in +official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) repository: + +``` +The Logs signal development is halted for this project while we stablize the Metrics SDK. +No Logs Pull Requests are currently being accepted. +``` + +This project will be deprecated once official [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go) +repository Logs module will have status "Stable". + +## Compatibility + +Minimal supported go version `1.21` + +## Project packages + +| Packages | Description | +|----------------------------------|----------------------------------------------------------------------------| +| [autoconfigure](./autoconfigure) | Autoconfiguration SDK. Allow to configure log exporters with env variables | +| [sdk](./sdk) | Opentelemetry Logs SDK | +| [exporters/otlp](./exporters) | OTLP format exporter | +| [exporters/stdout](./exporters) | Console exporter | + +## Getting Started + +This is an implementation of [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/) and not +intended to use by developers directly. It is provided for logging library authors to build log appenders, which use +this API to bridge between existing logging libraries and the OpenTelemetry log data model. + +Example bellow will show how logging library could be instrumented with current API: + +```go +package myInstrumentedLogger + +import ( + otel "github.com/agoda-com/opentelemetry-logs-go" + "github.com/agoda-com/opentelemetry-logs-go/logs" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +const ( + instrumentationName = "otel/zap" + instrumentationVersion = "0.0.1" +) + +var ( + logger = otel.GetLoggerProvider().Logger( + instrumentationName, + logs.WithInstrumentationVersion(instrumentationVersion), + logs.WithSchemaURL(semconv.SchemaURL), + ) +) + +func (c otlpCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + + lrc := logs.LogRecordConfig{ + Body: &ent.Message, + ... + } + logRecord := logs.NewLogRecord(lrc) + logger.Emit(logRecord) +} +``` + +and application initialization code: + +```go +package main + +import ( + "os" + "context" + "github.com/agoda-com/opentelemetry-logs-go" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" +) + +func newResource() *resource.Resource { + host, _ := os.Hostname() + return resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceName("otlplogs-example"), + semconv.ServiceVersion("0.0.1"), + semconv.HostName(host), + ) +} + +func main() { + ctx := context.Background() + + exporter, _ := otlplogs.NewExporter(ctx, otlplogs.WithClient(otlplogshttp.NewClient())) + loggerProvider := sdk.NewLoggerProvider( + sdk.WithBatcher(exporter), + sdk.WithResource(newResource()), + ) + otel.SetLoggerProvider(loggerProvider) + + myInstrumentedLogger.Info("Hello OpenTelemetry") +} +``` + +## References + +Logger Bridge API implementations for `zap`, `slog`, `zerolog` and other +loggers can be found in https://github.com/agoda-com/opentelemetry-go + diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go new file mode 100644 index 000000000..11886f1ed --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/clients.go @@ -0,0 +1,50 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogs + +import ( + "context" + logspb "go.opentelemetry.io/proto/otlp/logs/v1" +) + +type Client interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Start should establish connection(s) to endpoint(s). It is + // called just once by the exporter, so the implementation + // does not need to worry about idempotence and locking. + Start(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Stop should close the connections. The function is called + // only once by the exporter, so the implementation does not + // need to worry about idempotence, but it may be called + // concurrently with UploadLogs, so proper + // locking is required. The function serves as a + // synchronization point - after the function returns, the + // process of closing connections is assumed to be finished. + Stop(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // UploadLogs should transform the passed logs to the wire + // format and send it to the collector. May be called + // concurrently. + UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go new file mode 100644 index 000000000..8b59295ad --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/exporter.go @@ -0,0 +1,112 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogs + +import ( + "context" + "errors" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform" + logssdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" + "sync" +) + +var ( + errAlreadyStarted = errors.New("already started") +) + +type Exporter struct { + client Client + + mu sync.RWMutex + started bool + + startOnce sync.Once + stopOnce sync.Once +} + +// Start establishes a connection to the receiving endpoint. +func (e *Exporter) Start(ctx context.Context) error { + var err = errAlreadyStarted + e.startOnce.Do(func() { + e.mu.Lock() + e.started = true + e.mu.Unlock() + err = e.client.Start(ctx) + }) + + return err +} + +func (e *Exporter) Shutdown(ctx context.Context) error { + e.mu.RLock() + started := e.started + e.mu.RUnlock() + + if !started { + return nil + } + + var err error + + e.stopOnce.Do(func() { + err = e.client.Stop(ctx) + e.mu.Lock() + e.started = false + e.mu.Unlock() + }) + + return err +} + +// Export exports a batch of logs. +func (e *Exporter) Export(ctx context.Context, ll []logssdk.ReadableLogRecord) error { + protoLogs := logstransform.Logs(ll) + if len(protoLogs) == 0 { + return nil + } + + err := e.client.UploadLogs(ctx, protoLogs) + if err != nil { + return err + } + return nil +} + +// New creates new exporter with client +// Deprecated: Use NewExporter instead. Will be removed in v0.1.0 +func New(ctx context.Context, client Client) (*Exporter, error) { + return NewExporter(ctx, WithClient(client)) +} + +// NewExporter creates new Exporter +func NewExporter(ctx context.Context, options ...ExporterOption) (*Exporter, error) { + // Create new client using env variables + config := NewExporterConfig(options...) + + for _, opt := range options { + config = opt.apply(config) + } + + exp := &Exporter{ + client: config.client, + } + + if err := exp.Start(ctx); err != nil { + return nil, err + } + return exp, nil +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go new file mode 100644 index 000000000..36009a6ba --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig/envconfig.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package envconfig + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/agoda-com/opentelemetry-logs-go/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + name, err := url.QueryUnescape(n) + if err != nil { + global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.QueryUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go new file mode 100644 index 000000000..04b78fcae --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/attribute.go @@ -0,0 +1,160 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logstransform + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// Iterator transforms an attribute iterator into OTLP key-values. +func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// ResourceAttributes transforms a Resource OTLP key-values. +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { + return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *commonpb.AnyValue { + av := new(commonpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &commonpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &commonpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &commonpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go new file mode 100644 index 000000000..8dc278f22 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform/logs.go @@ -0,0 +1,126 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logstransform + +import ( + sdk "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + logspb "go.opentelemetry.io/proto/otlp/logs/v1" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + "time" +) + +// Logs transforms OpenTelemetry LogRecord's into a OTLP ResourceLogs +func Logs(sdl []sdk.ReadableLogRecord) []*logspb.ResourceLogs { + + var resourceLogs []*logspb.ResourceLogs + + for _, sd := range sdl { + + lr := logRecord(sd) + + var is *commonpb.InstrumentationScope + var schemaURL = "" + if sd.InstrumentationScope() != nil { + is = &commonpb.InstrumentationScope{ + Name: sd.InstrumentationScope().Name, + Version: sd.InstrumentationScope().Version, + } + schemaURL = sd.InstrumentationScope().SchemaURL + } + + // Create a log resource + resourceLog := &logspb.ResourceLogs{ + Resource: &resourcepb.Resource{ + Attributes: KeyValues(sd.Resource().Attributes()), + }, + // provide a resource description if available + ScopeLogs: []*logspb.ScopeLogs{ + { + Scope: is, + SchemaUrl: schemaURL, + LogRecords: []*logspb.LogRecord{lr}, + }, + }, + } + + resourceLogs = append(resourceLogs, resourceLog) + } + + return resourceLogs +} + +func logRecord(record sdk.ReadableLogRecord) *logspb.LogRecord { + var body *commonpb.AnyValue = nil + if record.Body() != nil { + body = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: *record.Body(), + }, + } + } + + var traceIDBytes []byte + if record.TraceId() != nil { + tid := *record.TraceId() + traceIDBytes = tid[:] + } + var spanIDBytes []byte + if record.SpanId() != nil { + sid := *record.SpanId() + spanIDBytes = sid[:] + } + var traceFlags byte = 0 + if record.TraceFlags() != nil { + tf := *record.TraceFlags() + traceFlags = byte(tf) + } + var ts time.Time + if record.Timestamp() != nil { + ts = *record.Timestamp() + } else { + ts = record.ObservedTimestamp() + } + + var kv []*commonpb.KeyValue + if record.Attributes() != nil { + kv = KeyValues(*record.Attributes()) + } + + var st = "" + if record.SeverityText() != nil { + st = *record.SeverityText() + } + + var sn = logspb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED + if record.SeverityNumber() != nil { + sn = logspb.SeverityNumber(*record.SeverityNumber()) + } + + logRecord := &logspb.LogRecord{ + TimeUnixNano: uint64(ts.UnixNano()), + ObservedTimeUnixNano: uint64(record.ObservedTimestamp().UnixNano()), + TraceId: traceIDBytes, // provide the associated trace ID if available + SpanId: spanIDBytes, // provide the associated span ID if available + Flags: uint32(traceFlags), // provide the associated trace flags + Body: body, // provide the associated log body if available + Attributes: kv, // provide additional log attributes if available + SeverityText: st, + SeverityNumber: sn, + } + return logRecord +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go new file mode 100644 index 000000000..0827da0c6 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/envconfig.go @@ -0,0 +1,192 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlpconfig + +import ( + "crypto/tls" + "crypto/x509" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig" + "net/url" + "os" + "path" + "strings" + "time" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +func stringToProtocol(u string) Protocol { + switch strings.ToLower(u) { + case string(ExporterProtocolGrpc): + return ExporterProtocolGrpc + case string(ExporterProtocolHttpProtobuf): + return ExporterProtocolHttpProtobuf + case string(ExporterProtocolHttpJson): + return ExporterProtocolHttpJson + default: + return ExporterProtocolHttpProtobuf + } +} + +// ApplyEnvProtocol Apply Protocol from environment to provided default value +// This function is subject to change or removal in future versions. +func ApplyEnvProtocol(protocol Protocol) Protocol { + DefaultEnvOptionsReader.Apply( + envconfig.WithString("PROTOCOL", func(s string) { + protocol = stringToProtocol(s) + }), + envconfig.WithString("LOGS_PROTOCOL", func(s string) { + protocol = stringToProtocol(s) + }), + ) + + return protocol +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Logs.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Logs.URLPath = path.Join(u.Path, DefaultLogsPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("LOGS_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Logs.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Logs.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithString("PROTOCOL", func(s string) { + opts = append(opts, withProtocol(s)) + }), + envconfig.WithString("LOGS_PROTOCOL", func(s string) { + opts = append(opts, withProtocol(s)) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("LOGS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("LOGS_CLIENT_CERTIFICATE", "LOGS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("LOGS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("LOGS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("LOGS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("LOGS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) + + return opts +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Logs.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} + +func withProtocol(b string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Protocol = stringToProtocol(b) + return cfg + }) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go new file mode 100644 index 000000000..83112bba7 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/options.go @@ -0,0 +1,348 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlpconfig + +import ( + "crypto/tls" + "fmt" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry" + "go.opentelemetry.io/otel" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + "path" + "strings" + "time" +) + +const ( + // DefaultLogsPath is a default URL path for endpoint that + // receives logs. + DefaultLogsPath string = "/v1/logs" + // DefaultTimeout is a default max waiting time for the backend to process + // each logs batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + SignalConfig struct { + Endpoint string + Protocol Protocol + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + } + + Config struct { + // Signal specific configurations + Logs SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead. +func CleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Logs: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultLogsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Logs.URLPath = CleanPath(cfg.Logs.URLPath, DefaultLogsPath) + return cfg +} + +func GetUserAgentHeader() string { + return "OTel OTLP Exporter Go/" + otel.Version() +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + cfg := Config{ + Logs: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultLogsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(GetUserAgentHeader())}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Logs.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Logs.GRPCCredentials)) + } else if cfg.Logs.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Logs.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Logs.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if len(cfg.DialOptions) != 0 { + cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Endpoint = endpoint + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Logs.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Logs.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Timeout = duration + return cfg + }) +} + +func WithProtocol(protocol Protocol) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Logs.Protocol = protocol + return cfg + }) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go new file mode 100644 index 000000000..d361b0383 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/optiontypes.go @@ -0,0 +1,58 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlpconfig + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +type Protocol string + +const ( + ExporterProtocolGrpc Protocol = "grpc" + ExporterProtocolHttpProtobuf Protocol = "http/protobuf" + ExporterProtocolHttpJson Protocol = "http/json" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector. +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go new file mode 100644 index 000000000..88cf551ed --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig/tls.go @@ -0,0 +1,20 @@ +package otlpconfig + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go new file mode 100644 index 000000000..39a421963 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/partialsuccess.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +var _ error = PartialSuccess{} + +// LogRecordPartialSuccessError returns an error describing a partial success +// response for the log signal. +func LogRecordPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "logs", + } +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go new file mode 100644 index 000000000..11c76e53c --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry/retry.go @@ -0,0 +1,152 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go new file mode 100644 index 000000000..1ce4df527 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/options.go @@ -0,0 +1,67 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogs + +import ( + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp" +) + +type ExporterConfig struct { + client Client +} + +type ExporterOption interface { + apply(ExporterConfig) ExporterConfig +} + +type exporterOptionFunc func(ExporterConfig) ExporterConfig + +func (fn exporterOptionFunc) apply(config ExporterConfig) ExporterConfig { + return fn(config) +} + +// NewExporterConfig creates new configuration for exporter +func NewExporterConfig(options ...ExporterOption) ExporterConfig { + + config := ExporterConfig{} + + for _, option := range options { + config = option.apply(config) + } + + if config.client == nil { + // Default is http/protobuf client + protocol := otlpconfig.ApplyEnvProtocol(otlpconfig.ExporterProtocolHttpProtobuf) + + if protocol == otlpconfig.ExporterProtocolGrpc { + config.client = otlplogsgrpc.NewClient() + } else { + config.client = otlplogshttp.NewClient() + } + } + + return config +} + +func WithClient(client Client) ExporterOption { + return exporterOptionFunc(func(cfg ExporterConfig) ExporterConfig { + cfg.client = client + return cfg + }) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go new file mode 100644 index 000000000..bd4b19cd8 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/client.go @@ -0,0 +1,292 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogsgrpc + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry" + + "go.opentelemetry.io/otel" + collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logspb "go.opentelemetry.io/proto/otlp/logs/v1" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type grpcClient struct { + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc collogspb.LogsServiceClient +} + +func NewClient(opts ...Option) *grpcClient { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &grpcClient{ + endpoint: cfg.Logs.Endpoint, + exportTimeout: cfg.Logs.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, + } + + if len(cfg.Logs.Headers) > 0 { + c.metadata = metadata.New(cfg.Logs.Headers) + } + + return c +} + +// Start establishes a gRPC connection to the collector. +func (c *grpcClient) Start(ctx context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the grpcClient was + // created, create one using the configuration they did provide. + conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + // The otlplogs.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = collogspb.NewLogsServiceClient(c.conn) + c.tscMu.Unlock() + + return nil +} + +var errAlreadyStopped = errors.New("the grpcClient is already stopped") + +// Stop shuts down the grpcClient. +// +// Any active connections to a remote endpoint are closed if they were created +// by the grpcClient. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadLogs method of the grpcClient. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All grpcClient held resources +// will still be released in this situation. +// +// If the grpcClient has already stopped, an error will be returned describing +// this. +func (c *grpcClient) Stop(ctx context.Context) error { + // Make sure to return context error if the context is done when calling this method. + err := ctx.Err() + + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the grpcClient is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlplogs.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // grpcClient is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the grpcClient is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err +} + +var errShutdown = errors.New("the grpcClient is shutdown") + +// UploadLogs sends log records. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the grpcClient was created with. +func (c *grpcClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error { + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the grpcClient is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + resp, err := c.tsc.Export(iCtx, &collogspb.ExportLogsServiceRequest{ + ResourceLogs: protoLogs, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedLogRecords() + if n != 0 || msg != "" { + err := internal.LogRecordPartialSuccessError(n, msg) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *grpcClient) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the grpcClient stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } + }() + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.ResourceExhausted, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + return true, throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns a duration to wait for if an explicit throttle time +// is included in the response status. +func throttleDelay(s *status.Status) time.Duration { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return t.RetryDelay.AsDuration() + } + } + return 0 +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *grpcClient) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go new file mode 100644 index 000000000..a049ad1f5 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc/options.go @@ -0,0 +1,191 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogsgrpc + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry" + "go.opentelemetry.io/otel" +) + +// Option applies an option to the gRPC driver. +type Option interface { + applyGRPCOption(otlpconfig.Config) otlpconfig.Config +} + +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of logs batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables grpcClient transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, grpcClient security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithEndpoint sets the target endpoint the exporter will connect to. If +// unset, localhost:4317 will be used as a default. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) otlpconfig.Compression { + if compressor == "gzip" { + return otlpconfig.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return otlpconfig.NoCompression +} + +// WithCompressor sets the compressor for the gRPC grpcClient to use when sending +// requests. It is the responsibility of the caller to ensure that the +// compressor set has been registered with google.golang.org/grpc/encoding. +// This can be done by encoding.RegisterCompressor. Some compressors +// auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"`. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.Logs.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The grpcClient +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time a grpcClient will attempt to export a +// batch of logs. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of logs is dropped. +// +// If unset, the default timeout will be set to 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of logs. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go new file mode 100644 index 000000000..c25d0a051 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/client.go @@ -0,0 +1,371 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogshttp + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry" + "go.opentelemetry.io/otel" + collogspb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logspb "go.opentelemetry.io/proto/otlp/logs/v1" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "io" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" +) + +const contentTypeProto = "application/x-protobuf" +const contentTypeJson = "application/json" + +var gzPool = sync.Pool{ + New: func() interface{} { + w := gzip.NewWriter(io.Discard) + return w + }, +} + +// Keep it in sync with golang's DefaultTransport from net/http! We +// have our own copy to avoid handling a situation where the +// DefaultTransport is overwritten with some different implementation +// of http.RoundTripper or it's modified by other package. +var ourTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +type httpClient struct { + name string + cfg otlpconfig.SignalConfig + generalCfg otlpconfig.Config + requestFunc retry.RequestFunc + client *http.Client + stopCh chan struct{} + stopOnce sync.Once +} + +// NewClient creates a new HTTP logs httpClient. +func NewClient(opts ...Option) *httpClient { + + cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) + + // Fix Protocol to Default if incorrect one was provided + if cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpJson && cfg.Logs.Protocol != otlpconfig.ExporterProtocolHttpProtobuf { + cfg.Logs.Protocol = otlpconfig.ExporterProtocolHttpProtobuf + } + + client := &http.Client{ + Transport: ourTransport, + Timeout: cfg.Logs.Timeout, + } + if cfg.Logs.TLSCfg != nil { + transport := ourTransport.Clone() + transport.TLSClientConfig = cfg.Logs.TLSCfg + client.Transport = transport + } + + stopCh := make(chan struct{}) + return &httpClient{ + name: "logs", + cfg: cfg.Logs, + generalCfg: cfg, + requestFunc: cfg.RetryConfig.RequestFunc(evaluate), + stopCh: stopCh, + client: client, + } +} + +// Start does nothing in a HTTP httpClient. +func (d *httpClient) Start(ctx context.Context) error { + // nothing to do + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// Stop shuts down the httpClient and interrupt any in-flight request. +func (d *httpClient) Stop(ctx context.Context) error { + d.stopOnce.Do(func() { + close(d.stopCh) + }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// retryableError represents a request failure that can be retried. +type retryableError struct { + throttle int64 +} + +// evaluate returns if err is retry-able. If it is and it includes an explicit +// throttling delay, that delay is also returned. +func evaluate(err error) (bool, time.Duration) { + if err == nil { + return false, 0 + } + + rErr, ok := err.(retryableError) + if !ok { + return false, 0 + } + + return true, time.Duration(rErr.throttle) +} + +func (d *httpClient) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { + // Unify the parent context Done signal with the httpClient's stop + // channel. + ctx, cancel := context.WithCancel(ctx) + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + // Nothing to do, either cancelled or deadline + // happened. + case <-d.stopCh: + cancel() + } + }(ctx, cancel) + return ctx, cancel +} + +func (d *httpClient) newRequest(body []byte) (request, error) { + u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} + r, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return request{Request: r}, err + } + + r.Header.Set("User-Agent", otlpconfig.GetUserAgentHeader()) + + for k, v := range d.cfg.Headers { + r.Header.Set(k, v) + } + switch d.cfg.Protocol { + case otlpconfig.ExporterProtocolHttpJson: + r.Header.Set("Content-Type", contentTypeJson) + default: + r.Header.Set("Content-Type", contentTypeProto) + } + + req := request{Request: r} + switch Compression(d.cfg.Compression) { + case NoCompression: + r.ContentLength = (int64)(len(body)) + req.bodyReader = bodyReader(body) + case GzipCompression: + // Ensure the content length is not used. + r.ContentLength = -1 + r.Header.Set("Content-Encoding", "gzip") + + gz := gzPool.Get().(*gzip.Writer) + defer gzPool.Put(gz) + + var b bytes.Buffer + gz.Reset(&b) + + if _, err := gz.Write(body); err != nil { + return req, err + } + // Close needs to be called to ensure body if fully written. + if err := gz.Close(); err != nil { + return req, err + } + + req.bodyReader = bodyReader(b.Bytes()) + } + + return req, nil +} + +// bodyReader returns a closure returning a new reader for buf. +func bodyReader(buf []byte) func() io.ReadCloser { + return func() io.ReadCloser { + return io.NopCloser(bytes.NewReader(buf)) + } +} + +func (d *httpClient) getScheme() string { + if d.cfg.Insecure { + return "http" + } + return "https" +} + +// request wraps an http.Request with a resettable body reader. +type request struct { + *http.Request + + // bodyReader allows the same body to be used for multiple requests. + bodyReader func() io.ReadCloser +} + +// reset reinitializes the request Body and uses ctx for the request. +func (r *request) reset(ctx context.Context) { + r.Body = r.bodyReader() + r.Request = r.Request.WithContext(ctx) +} + +// newResponseError returns a retryableError and will extract any explicit +// throttle delay contained in headers. +func newResponseError(header http.Header) error { + var rErr retryableError + if s, ok := header["Retry-After"]; ok { + if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { + rErr.throttle = t + } + } + return rErr +} + +func (e retryableError) Error() string { + return "retry-able request failure" +} + +func (d *httpClient) UploadLogs(ctx context.Context, protoLogs []*logspb.ResourceLogs) error { + + // Export the logs using the OTLP logs exporter httpClient + exportLogs := &collogspb.ExportLogsServiceRequest{ + ResourceLogs: protoLogs, + } + + // Serialize the OTLP logs payload + var rawRequest []byte + switch d.cfg.Protocol { + case otlpconfig.ExporterProtocolHttpJson: + rawRequest, _ = protojson.MarshalOptions{ + UseProtoNames: false, + }.Marshal(exportLogs) + default: + rawRequest, _ = proto.Marshal(exportLogs) + } + + ctx, cancel := d.contextWithStop(ctx) + defer cancel() + + request, err := d.newRequest(rawRequest) + if err != nil { + return err + } + + return d.requestFunc(ctx, func(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + request.reset(ctx) + resp, err := d.client.Do(request.Request) + if err != nil { + return err + } + + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } + + switch sc := resp.StatusCode; { + case sc >= 200 && sc <= 299: + // Success, do not retry. + // Read the partial success message, if any. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + + if respData.Len() != 0 { + var respProto collogspb.ExportLogsServiceResponse + switch d.cfg.Protocol { + case otlpconfig.ExporterProtocolHttpJson: + if err := protojson.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + default: + if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + } + + // TODO: partialsuccess can't be handled properly by OTEL as current otlp.internal.PartialSuccess is custom + // need to have that interface in official OTEL otlp.internal package + if respProto.PartialSuccess != nil { + msg := respProto.PartialSuccess.GetErrorMessage() + n := respProto.PartialSuccess.GetRejectedLogRecords() + if n != 0 || msg != "" { + err := internal.LogRecordPartialSuccessError(n, msg) + otel.Handle(err) + } + } + } + return nil + case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable: + // Retry-able failures. Drain the body to reuse the connection. + if _, err := io.Copy(io.Discard, resp.Body); err != nil { + otel.Handle(err) + } + return newResponseError(resp.Header) + default: + buffer := make([]byte, 4096) + _, _ = resp.Body.Read(buffer) + if len(buffer) == 0 { + return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) + } + return fmt.Errorf("failed to send to %s: %s\n%s", request.URL, resp.Status, buffer) + } + }) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (d *httpClient) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + Insecure bool + }{ + Type: string(d.cfg.Protocol), + Endpoint: d.cfg.Endpoint, + Insecure: d.cfg.Insecure, + } +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go new file mode 100644 index 000000000..7d0e4da00 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp/options.go @@ -0,0 +1,127 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otlplogshttp + +import ( + "crypto/tls" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry" + "time" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression otlpconfig.Compression + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression = Compression(otlpconfig.NoCompression) + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression = Compression(otlpconfig.GzipCompression) +) + +// Option applies an option to the HTTP httpClient. +type Option interface { + applyHTTPOption(otlpconfig.Config) otlpconfig.Config +} + +func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { + converted := make([]otlpconfig.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + +// RetryConfig defines configuration for retrying batches in case of export +// failure using an exponential backoff. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.HTTPOption +} + +func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyHTTPOption(cfg) +} + +// WithEndpoint allows one to set the address of the collector +// endpoint that the driver will use to send logs. If +// unset, it will instead try to use +// the default endpoint (localhost:4318). Note that the endpoint +// must not contain any URL path. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithJsonProtocol will apply http/json protocol to Http client +func WithJsonProtocol() Option { + return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpJson)} +} + +// WithProtobufProtocol will apply http/protobuf protocol to Http client +func WithProtobufProtocol() Option { + return wrappedOption{otlpconfig.WithProtocol(otlpconfig.ExporterProtocolHttpProtobuf)} +} + +// WithCompression tells the driver to compress the sent data. +func WithCompression(compression Compression) Option { + return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))} +} + +// WithURLPath allows one to override the default URL path used +// for sending logs. If unset, default ("/v1/logs") will be used. +func WithURLPath(urlPath string) Option { + return wrappedOption{otlpconfig.WithURLPath(urlPath)} +} + +// WithTLSClientConfig can be used to set up a custom TLS +// configuration for the httpClient used to send payloads to the +// collector. Use it if you want to use a custom certificate. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)} +} + +// WithInsecure tells the driver to connect to the collector using the +// HTTP scheme, instead of HTTPS. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithHeaders allows one to tell the driver to send additional HTTP +// headers with the payloads. Specifying headers like Content-Length, +// Content-Encoding and Content-Type may result in a broken driver. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTimeout tells the driver the max waiting time for the backend to process +// each logs batch. If unset, the default will be 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry configures the retry policy for transient errors that may occurs +// when exporting logs. An exponential back-off algorithm is used to ensure +// endpoints are not overwhelmed with retries. If unset, the default retry +// policy will retry after 5 seconds and increase exponentially after each +// error for a total of 1 minute. +func WithRetry(rc RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))} +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go new file mode 100644 index 000000000..801f40f20 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/internal_logging.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // Package global import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + "unsafe" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go new file mode 100644 index 000000000..ab78fd29b --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/logs.go @@ -0,0 +1,128 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package global + +import ( + "github.com/agoda-com/opentelemetry-logs-go/logs" + "sync" + "sync/atomic" +) + +// loggerProvider is a placeholder for a configured SDK LoggerProvider. +// +// All LoggerProvider functionality is forwarded to a delegate once +// configured. +type loggerProvider struct { + mtx sync.Mutex + loggers map[il]*logger + delegate logs.LoggerProvider +} + +// Compile-time guarantee that loggerProvider implements the LoggerProvider +// interface. +var _ logs.LoggerProvider = &loggerProvider{} + +// setDelegate configures p to delegate all LoggerProvider functionality to +// provider. +// +// All Loggers provided prior to this function call are switched out to be +// Loggers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *loggerProvider) setDelegate(provider logs.LoggerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.loggers) == 0 { + return + } + + for _, t := range p.loggers { + t.setDelegate(provider) + } + + p.loggers = nil +} + +// Logger implements LoggerProvider. +func (p *loggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Logger(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the logger in the loggers map. + + c := logs.NewLoggerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.loggers == nil { + p.loggers = make(map[il]*logger) + } + + if val, ok := p.loggers[key]; ok { + return val + } + + t := &logger{name: name, opts: opts, provider: p} + p.loggers[key] = t + return t +} + +type il struct { + name string + version string +} + +// logger is a placeholder for a logs.Logger. +// +// All Logger functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopLogger. +type logger struct { + name string + opts []logs.LoggerOption + provider *loggerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that logger implements the logs.Logger interface. +var _ logs.Logger = &logger{} + +func (t *logger) Emit(logRecord logs.LogRecord) { + delegate := t.delegate.Load() + if delegate != nil { + delegate.(logs.Logger).Emit(logRecord) + } +} + +// setDelegate configures t to delegate all Logger functionality to Loggers +// created by provider. +// +// All subsequent calls to the Logger methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *logger) setDelegate(provider logs.LoggerProvider) { + t.delegate.Store(provider.Logger(t.name, t.opts...)) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go new file mode 100644 index 000000000..53219030f --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/internal/global/state.go @@ -0,0 +1,66 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package global + +import ( + "errors" + "github.com/agoda-com/opentelemetry-logs-go/logs" + "sync" + "sync/atomic" +) + +type ( + loggerProviderHolder struct { + lp logs.LoggerProvider + } +) + +var ( + globalOtelLogger = defaultLoggerValue() + + delegateLoggerOnce sync.Once +) + +// LoggerProvider is the internal implementation for global.LoggerProvider. +func LoggerProvider() logs.LoggerProvider { + return globalOtelLogger.Load().(loggerProviderHolder).lp +} + +// SetLoggerProvider is the internal implementation for global.SetLoggerProvider. +func SetLoggerProvider(lp logs.LoggerProvider) { + current := LoggerProvider() + + if _, cOk := current.(*loggerProvider); cOk { + if _, tpOk := lp.(*loggerProvider); tpOk && current == lp { + // Do not assign the default delegating LoggerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in logger provider"), + "Setting logger provider to it's current value. No delegate will be configured", + ) + return + } + } + + globalOtelLogger.Store(loggerProviderHolder{lp: lp}) +} + +func defaultLoggerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(loggerProviderHolder{lp: &loggerProvider{}}) + return v +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go new file mode 100644 index 000000000..490ba903f --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs.go @@ -0,0 +1,35 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package otel + +import ( + "github.com/agoda-com/opentelemetry-logs-go/internal/global" + "github.com/agoda-com/opentelemetry-logs-go/logs" +) + +// GetLoggerProvider returns the registered global logger provider. +// If none is registered then an instance of NoopLoggerProvider is returned. +// +// loggerProvider := otel.GetLoggerProvider() +func GetLoggerProvider() logs.LoggerProvider { + return global.LoggerProvider() +} + +// SetLoggerProvider registers `lp` as the global logger provider. +func SetLoggerProvider(lp logs.LoggerProvider) { + global.SetLoggerProvider(lp) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go new file mode 100644 index 000000000..b8ca22273 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/config.go @@ -0,0 +1,89 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/logs" + +import "go.opentelemetry.io/otel/attribute" + +// LoggerConfig is a group of options for a Logger. +type LoggerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Logger. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *LoggerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *LoggerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Logger. +func (t *LoggerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewLoggerConfig applies all the options to a returned LoggerConfig. +func NewLoggerConfig(options ...LoggerOption) LoggerConfig { + var config LoggerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// LoggerOption applies an option to a LoggerConfig. +type LoggerOption interface { + apply(LoggerConfig) LoggerConfig +} + +type loggerOptionFunc func(LoggerConfig) LoggerConfig + +func (fn loggerOptionFunc) apply(cfg LoggerConfig) LoggerConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) LoggerOption { + return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) LoggerOption { + return loggerOptionFunc(func(config LoggerConfig) LoggerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Logger. +func WithSchemaURL(schemaURL string) LoggerOption { + return loggerOptionFunc(func(cfg LoggerConfig) LoggerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go new file mode 100644 index 000000000..db5f43546 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/doc.go @@ -0,0 +1,68 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package logs provides an implementation of the logging part of the +OpenTelemetry API. + +This package defines a log backend API. The API is not intended to be called by application developers directly. +It is provided for logging library authors to build log appenders, which use this API to bridge between existing +logging libraries and the OpenTelemetry log data model. + +To participate in logging a LogRecord needs to be created for the +operation being performed as part of a logging workflow. In its simplest form: + + var logger logger.Logger + + func init() { + logger = otel.Logger() + } + + func operation(ctx context.Context) { + logRecord := logger.NewLogRecord(..) + logger.Emit(logRecord) + } + +A Logger is unique to the instrumentation and is used to create Logs. +Instrumentation should be designed to accept a LoggerProvider from which it +can create its own unique Logger. Alternatively, the registered global +LoggerProvider from the github.com/agoda-com/opentelemetry-logs-go package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + logger logging.Logger + } + + func NewInstrumentation(tp logging.LoggerProvider) *Instrumentation { + if lp == nil { + lp = otel.LoggerProvider() + } + return &Instrumentation{ + logger: lp.Logger(name, logs.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + + // ... + } +*/ +package logs // import "github.com/agoda-com/opentelemetry-logs-go/logs" diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go new file mode 100644 index 000000000..cfe454e4b --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/logs.go @@ -0,0 +1,167 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + "time" +) + +// LogRecordConfig contains mutable fields usable for constructing +// an immutable LogRecord. +type LogRecordConfig struct { + Timestamp *time.Time + ObservedTimestamp time.Time + TraceId *trace.TraceID + SpanId *trace.SpanID + TraceFlags *trace.TraceFlags + SeverityText *string + SeverityNumber *SeverityNumber + Body *string + Resource *resource.Resource + InstrumentationScope *instrumentation.Scope + Attributes *[]attribute.KeyValue +} + +// NewLogRecord constructs a LogRecord using values from the provided +// LogRecordConfig. +func NewLogRecord(config LogRecordConfig) LogRecord { + return LogRecord{ + timestamp: config.Timestamp, + observedTimestamp: config.ObservedTimestamp, + traceId: config.TraceId, + spanId: config.SpanId, + traceFlags: config.TraceFlags, + severityText: config.SeverityText, + severityNumber: config.SeverityNumber, + body: config.Body, + resource: config.Resource, + instrumentationScope: config.InstrumentationScope, + attributes: config.Attributes, + } +} + +// LogRecord is an implementation of the OpenTelemetry Log API +// representing the individual component of a log. +// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition +type LogRecord struct { + timestamp *time.Time + observedTimestamp time.Time + traceId *trace.TraceID + spanId *trace.SpanID + traceFlags *trace.TraceFlags + severityText *string + severityNumber *SeverityNumber + body *string + resource *resource.Resource + instrumentationScope *instrumentation.Scope + attributes *[]attribute.KeyValue +} + +func (l LogRecord) Timestamp() *time.Time { return l.timestamp } +func (l LogRecord) ObservedTimestamp() time.Time { return l.observedTimestamp } +func (l LogRecord) TraceId() *trace.TraceID { return l.traceId } +func (l LogRecord) SpanId() *trace.SpanID { return l.spanId } +func (l LogRecord) TraceFlags() *trace.TraceFlags { return l.traceFlags } +func (l LogRecord) SeverityText() *string { return l.severityText } +func (l LogRecord) SeverityNumber() *SeverityNumber { return l.severityNumber } +func (l LogRecord) Body() *string { return l.body } +func (l LogRecord) Resource() *resource.Resource { return l.resource } +func (l LogRecord) InstrumentationScope() *instrumentation.Scope { return l.instrumentationScope } +func (l LogRecord) Attributes() *[]attribute.KeyValue { return l.attributes } +func (l LogRecord) private() {} + +// SeverityNumber Possible values for LogRecord.SeverityNumber. +type SeverityNumber int32 + +const ( + // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + UNSPECIFIED SeverityNumber = 0 + TRACE SeverityNumber = 1 + TRACE2 SeverityNumber = 2 + TRACE3 SeverityNumber = 3 + TRACE4 SeverityNumber = 4 + DEBUG SeverityNumber = 5 + DEBUG2 SeverityNumber = 6 + DEBUG3 SeverityNumber = 7 + DEBUG4 SeverityNumber = 8 + INFO SeverityNumber = 9 + INFO2 SeverityNumber = 10 + INFO3 SeverityNumber = 11 + INFO4 SeverityNumber = 12 + WARN SeverityNumber = 13 + WARN2 SeverityNumber = 14 + WARN3 SeverityNumber = 15 + WARN4 SeverityNumber = 16 + ERROR SeverityNumber = 17 + ERROR2 SeverityNumber = 18 + ERROR3 SeverityNumber = 19 + ERROR4 SeverityNumber = 20 + FATAL SeverityNumber = 21 + FATAL2 SeverityNumber = 22 + FATAL3 SeverityNumber = 23 + FATAL4 SeverityNumber = 24 +) + +// Logger is the creator of Logs +type Logger interface { + // Emit emits a log record + Emit(logRecord LogRecord) +} + +// LoggerProvider provides Loggers that are used by instrumentation code to +// log computational workflows. +// +// A LoggerProvider is the collection destination of logs +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Logs are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Loggers to instrument code. +type LoggerProvider interface { + // Logger returns a unique Logger scoped to be used by instrumentation code + // to log computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using logs to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Loggers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Logger + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Logger or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Logger. All implementations will ensure any LoggerProvider + // configuration changes are propagated to all provided Loggers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Logger(name string, options ...LoggerOption) Logger +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go new file mode 100644 index 000000000..3f4bb1d2d --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/logs/noop.go @@ -0,0 +1,38 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +// NewNoopLoggerProvider returns an implementation of LoggerProvider that +// performs no operations. The Logger created from the returned +// LoggerProvider also perform no operations. +func NewNoopLoggerProvider() LoggerProvider { + return noopLoggerProvider{} +} + +type noopLoggerProvider struct{} + +var _ LoggerProvider = noopLoggerProvider{} + +func (p noopLoggerProvider) Logger(string, ...LoggerOption) Logger { + return noopLogger{} +} + +type noopLogger struct{} + +var _ Logger = noopLogger{} + +func (n noopLogger) Emit(logRecord LogRecord) {} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go new file mode 100644 index 000000000..80fff0675 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env/env.go @@ -0,0 +1,105 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package env + +import ( + "os" + "strconv" +) + +// Environment variable names. +const ( + // BatchLogsProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchLogsProcessorScheduleDelayKey = "OTEL_BLRP_SCHEDULE_DELAY" + // BatchLogsProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchLogsProcessorExportTimeoutKey = "OTEL_BLRP_EXPORT_TIMEOUT" + // BatchLogsProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchLogsProcessorMaxQueueSizeKey = "OTEL_BLRP_MAX_QUEUE_SIZE" + // BatchLogsProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // EnvBatchLogsProcessorMaxQueueSize. + BatchLogsProcessorMaxExportBatchSizeKey = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value := os.Getenv(key) + if value == "" { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + //envconfig.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + //global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchLogsProcessorScheduleDelay returns the environment variable value for +// the OTEL_BLRP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchLogsProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchLogsProcessorScheduleDelayKey, defaultValue) +} + +// BatchLogsProcessorExportTimeout returns the environment variable value for +// the OTEL_BLRP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchLogsProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchLogsProcessorExportTimeoutKey, defaultValue) +} + +// BatchLogsProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BLRP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchLogsProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchLogsProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchLogsProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BLRP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchLogsProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchLogsProcessorMaxExportBatchSizeKey, defaultValue) +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go new file mode 100644 index 000000000..0691f6ac6 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/batch_log_record_processor.go @@ -0,0 +1,435 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "context" + "github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env" + "go.opentelemetry.io/otel" + "runtime" + "sync" + "sync/atomic" + "time" +) + +// Defaults for BatchLogRecordProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchLogRecordProcessorOption configures a BatchLogsProcessor. +type BatchLogRecordProcessorOption func(o *BatchLogRecordProcessorOptions) + +// BatchLogRecordProcessorOptions is configuration settings for a +// BatchLogsProcessor. +type BatchLogRecordProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer logs for delayed processing. If the + // queue gets full it drops the logs. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available logs when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting logs. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of logs to process in a single batch. + // If there are more than one batch worth of logs then it processes multiple batches + // of logs one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// WithMaxQueueSize returns a BatchLogRecordProcessorOption that configures the +// maximum queue size allowed for a BatchLogRecordProcessor. +func WithMaxQueueSize(size int) BatchLogRecordProcessorOption { + return func(o *BatchLogRecordProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchLogRecordProcessorOption that configures +// the maximum export batch size allowed for a BatchLogRecordProcessor. +func WithMaxExportBatchSize(size int) BatchLogRecordProcessorOption { + return func(o *BatchLogRecordProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchLogRecordProcessorOption that configures the +// maximum delay allowed for a BatchLogRecordProcessor before it will export any +// held log (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchLogRecordProcessorOption { + return func(o *BatchLogRecordProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchLogRecordProcessorOption that configures the +// amount of time a BatchLogRecordProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchLogRecordProcessorOption { + return func(o *BatchLogRecordProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchLogRecordProcessorOption that configures a +// BatchLogRecordProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchLogRecordProcessorOption { + return func(o *BatchLogRecordProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// batchLogRecordProcessor is a LogRecordProcessor that batches asynchronously-received +// logs and sends them to a logs.Exporter when complete. +type batchLogRecordProcessor struct { + e LogRecordExporter + o BatchLogRecordProcessorOptions + + queue chan ReadableLogRecord + dropped uint32 + + batch []ReadableLogRecord + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} + stopped atomic.Bool +} + +func (lrp *batchLogRecordProcessor) Shutdown(ctx context.Context) error { + var err error + lrp.stopOnce.Do(func() { + lrp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(lrp.stopCh) + lrp.stopWait.Wait() + if lrp.e != nil { + if err := lrp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +var _ LogRecordProcessor = (*batchLogRecordProcessor)(nil) + +// NewBatchLogRecordProcessor creates a new LogRecordProcessor that will send completed +// log batches to the exporter with the supplied options. +// +// If the exporter is nil, the logs processor will perform no action. +// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#batching-processor +func NewBatchLogRecordProcessor(exporter LogRecordExporter, options ...BatchLogRecordProcessorOption) LogRecordProcessor { + maxQueueSize := env.BatchLogsProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchLogsProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchLogRecordProcessorOptions{ + BatchTimeout: time.Duration(env.BatchLogsProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchLogsProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + blp := &batchLogRecordProcessor{ + e: exporter, + o: o, + batch: make([]ReadableLogRecord, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadableLogRecord, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + blp.stopWait.Add(1) + go func() { + defer blp.stopWait.Done() + blp.processQueue() + blp.drainQueue() + }() + + return blp +} + +func (lrp *batchLogRecordProcessor) OnEmit(rol ReadableLogRecord) { + + // Do not enqueue spans after Shutdown. + if lrp.stopped.Load() { + return + } + // Do not enqueue logs if we are just going to drop them. + if lrp.e == nil { + return + } + + lrp.enqueue(rol) +} + +type forceFlushLogs struct { + ReadableLogRecord + flushed chan struct{} +} + +// processQueue removes logs from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (lrp *batchLogRecordProcessor) processQueue() { + defer lrp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-lrp.stopCh: + return + case <-lrp.timer.C: + if err := lrp.exportLogs(ctx); err != nil { + otel.Handle(err) + } + case sd := <-lrp.queue: + if ffs, ok := sd.(forceFlushLogs); ok { + close(ffs.flushed) + continue + } + lrp.batchMutex.Lock() + lrp.batch = append(lrp.batch, sd) + shouldExport := len(lrp.batch) >= lrp.o.MaxExportBatchSize + lrp.batchMutex.Unlock() + if shouldExport { + if !lrp.timer.Stop() { + <-lrp.timer.C + } + if err := lrp.exportLogs(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (lrp *batchLogRecordProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-lrp.queue: + if sd == nil { + if err := lrp.exportLogs(ctx); err != nil { + otel.Handle(err) + } + return + } + + lrp.batchMutex.Lock() + lrp.batch = append(lrp.batch, sd) + shouldExport := len(lrp.batch) == lrp.o.MaxExportBatchSize + lrp.batchMutex.Unlock() + + if shouldExport { + if err := lrp.exportLogs(ctx); err != nil { + otel.Handle(err) + } + } + default: + close(lrp.queue) + } + } +} + +// exportLogs is a subroutine of processing and draining the queue. +func (lrp *batchLogRecordProcessor) exportLogs(ctx context.Context) error { + lrp.timer.Reset(lrp.o.BatchTimeout) + + lrp.batchMutex.Lock() + defer lrp.batchMutex.Unlock() + + if lrp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, lrp.o.ExportTimeout) + defer cancel() + } + + if l := len(lrp.batch); l > 0 { + //global.Debug("exporting logs", "count", len(lrp.batch), "total_dropped", atomic.LoadUint32(&lrp.dropped)) + err := lrp.e.Export(ctx, lrp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + lrp.batch = lrp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +func (lrp *batchLogRecordProcessor) enqueue(sd ReadableLogRecord) { + ctx := context.TODO() + if lrp.o.BlockOnQueueFull { + lrp.enqueueBlockOnQueueFull(ctx, sd) + } else { + lrp.enqueueDrop(ctx, sd) + } +} + +// ForceFlush exports all ended logs that have not yet been exported. +func (lrp *batchLogRecordProcessor) ForceFlush(ctx context.Context) error { + + // Interrupt if context is already canceled. + if err := ctx.Err(); err != nil { + return err + } + // Do nothing after Shutdown. + // Do not enqueue spans after Shutdown. + if lrp.stopped.Load() { + return nil + } + + var err error + if lrp.e != nil { + flushCh := make(chan struct{}) + if lrp.enqueueBlockOnQueueFull(ctx, forceFlushLogs{flushed: flushCh}) { + select { + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- lrp.exportLogs(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +func recoverSendOnClosedChan() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) +} + +func (lrp *batchLogRecordProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadableLogRecord) bool { + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-lrp.stopCh: + return false + default: + } + + select { + case lrp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (lrp *batchLogRecordProcessor) enqueueDrop(ctx context.Context, ld ReadableLogRecord) bool { + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-lrp.stopCh: + return false + default: + } + + select { + case lrp.queue <- ld: + return true + default: + atomic.AddUint32(&lrp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (lrp *batchLogRecordProcessor) MarshalLog() interface{} { + return struct { + Type string + LogRecordExporter LogRecordExporter + Config BatchLogRecordProcessorOptions + }{ + Type: "BatchLogRecordProcessor", + LogRecordExporter: lrp.e, + Config: lrp.o, + } +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go new file mode 100644 index 000000000..295270847 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_exporter.go @@ -0,0 +1,53 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "context" +) + +// LogRecordExporter Interface for various logs exporters +// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter +type LogRecordExporter interface { + + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Export exports a batch of logs. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + Export(ctx context.Context, batch []ReadableLogRecord) error + + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go new file mode 100644 index 000000000..c924ccef2 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/log_record_processor.go @@ -0,0 +1,67 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "context" + "sync" +) + +// LogRecordProcessor is an interface which allows hooks for LogRecord emitting. +// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor +type LogRecordProcessor interface { + + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEmit is called when logs sent. It is called synchronously and + // hence not block. + OnEmit(rol ReadableLogRecord) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to Process, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended logs to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed logs. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type logRecordProcessorState struct { + lp LogRecordProcessor + state sync.Once +} + +func newLogsProcessorState(lp LogRecordProcessor) *logRecordProcessorState { + return &logRecordProcessorState{lp: lp} +} + +type logRecordProcessorStates []*logRecordProcessorState diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go new file mode 100644 index 000000000..37eeb0675 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/logger.go @@ -0,0 +1,191 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "github.com/agoda-com/opentelemetry-logs-go/logs" + "github.com/agoda-com/opentelemetry-logs-go/semconv" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + "sync" + "time" +) + +type logger struct { + provider *LoggerProvider + instrumentationScope instrumentation.Scope +} + +var _ logs.Logger = &logger{} + +func (l logger) Emit(logRecord logs.LogRecord) { + lps := l.provider.getLogRecordProcessorStates() + if len(lps) == 0 { + return + } + + pr, err := resource.Merge(l.provider.resource, logRecord.Resource()) + if err != nil { + return + } + + elr := &exportableLogRecord{ + timestamp: logRecord.Timestamp(), + observedTimestamp: logRecord.ObservedTimestamp(), + traceId: logRecord.TraceId(), + spanId: logRecord.SpanId(), + traceFlags: logRecord.TraceFlags(), + severityText: logRecord.SeverityText(), + severityNumber: logRecord.SeverityNumber(), + body: logRecord.Body(), + resource: pr, + instrumentationScope: logRecord.InstrumentationScope(), + attributes: logRecord.Attributes(), + } + + for _, lp := range lps { + lp.lp.OnEmit(elr) + } +} + +// ReadableLogRecord Log structure +// see https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition +// see https://opentelemetry.io/docs/specs/otel/logs/sdk/#readablelogrecord +type ReadableLogRecord interface { + // Timestamp Time when the event occurred. + Timestamp() *time.Time + // ObservedTimestamp Time when the event was observed. + ObservedTimestamp() time.Time + // TraceId Request trace id. + TraceId() *trace.TraceID + // SpanId Request span id. + SpanId() *trace.SpanID + // TraceFlags W3C trace flag. + TraceFlags() *trace.TraceFlags + // SeverityText This is the original string representation of the severityNumber as it is known at the source + SeverityText() *string + // SeverityNumber Numerical value of the severityNumber. + SeverityNumber() *logs.SeverityNumber + // Body The body of the log record. + Body() *string + // Resource Describes the source of the log. + Resource() *resource.Resource + // InstrumentationScope returns information about the instrumentation + // scope that created the log. + InstrumentationScope() *instrumentation.Scope + // Attributes describe the aspects of the event. + Attributes() *[]attribute.KeyValue + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +type ReadWriteLogRecord interface { + SetResource(resource *resource.Resource) + // RecordException message, stacktrace, type + RecordException(*string, *string, *string) + ReadableLogRecord +} + +// exportableLogRecord is an implementation of the OpenTelemetry Log API +// representing the individual component of a log. +type exportableLogRecord struct { + // mu protects the contents of this log. + mu sync.Mutex + timestamp *time.Time + observedTimestamp time.Time + traceId *trace.TraceID + spanId *trace.SpanID + traceFlags *trace.TraceFlags + severityText *string + severityNumber *logs.SeverityNumber + body *string + resource *resource.Resource + instrumentationScope *instrumentation.Scope + attributes *[]attribute.KeyValue +} + +// newReadWriteLogRecord create +// This method may change in the future +func newReadWriteLogRecord( + ctx *trace.SpanContext, + body *string, + severityText *string, + severityNumber *logs.SeverityNumber, + resource *resource.Resource, + instrumentationScope *instrumentation.Scope, + attributes *[]attribute.KeyValue, + timestamp *time.Time) ReadWriteLogRecord { + + traceId := ctx.TraceID() + spanId := ctx.SpanID() + traceFlags := ctx.TraceFlags() + + return &exportableLogRecord{ + timestamp: timestamp, + observedTimestamp: time.Now(), + traceId: &traceId, + spanId: &spanId, + traceFlags: &traceFlags, + severityText: severityText, + severityNumber: severityNumber, + body: body, + resource: resource, + instrumentationScope: instrumentationScope, + attributes: attributes, + } +} + +func (r *exportableLogRecord) SetResource(resource *resource.Resource) { r.resource = resource } + +// RecordException helper to add Exception related information as attributes of Log Record +// see https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#recording-an-exception +func (r *exportableLogRecord) RecordException(message *string, stacktrace *string, exceptionType *string) { + if message == nil && exceptionType == nil { + // one of the fields must present + return + } + if message != nil { + *r.attributes = append(*r.attributes, semconv.ExceptionMessage(*message)) + } + if stacktrace != nil { + *r.attributes = append(*r.attributes, semconv.ExceptionStacktrace(*stacktrace)) + } + if exceptionType != nil { + *r.attributes = append(*r.attributes, semconv.ExceptionType(*exceptionType)) + } +} + +func (r *exportableLogRecord) Timestamp() *time.Time { return r.timestamp } +func (r *exportableLogRecord) ObservedTimestamp() time.Time { return r.observedTimestamp } +func (r *exportableLogRecord) TraceId() *trace.TraceID { return r.traceId } + +func (r *exportableLogRecord) SpanId() *trace.SpanID { return r.spanId } +func (r *exportableLogRecord) TraceFlags() *trace.TraceFlags { return r.traceFlags } +func (r *exportableLogRecord) InstrumentationScope() *instrumentation.Scope { + return r.instrumentationScope +} +func (r *exportableLogRecord) SeverityText() *string { return r.severityText } +func (r *exportableLogRecord) SeverityNumber() *logs.SeverityNumber { return r.severityNumber } +func (r *exportableLogRecord) Body() *string { return r.body } +func (r *exportableLogRecord) Resource() *resource.Resource { return r.resource } +func (r *exportableLogRecord) Attributes() *[]attribute.KeyValue { return r.attributes } +func (r *exportableLogRecord) private() {} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go new file mode 100644 index 000000000..c0e41e7e2 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider.go @@ -0,0 +1,274 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs // Package logs import "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" + +import ( + "context" + "fmt" + "github.com/agoda-com/opentelemetry-logs-go/internal/global" + "github.com/agoda-com/opentelemetry-logs-go/logs" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "sync" + "sync/atomic" +) + +const ( + defaultLoggerName = "github.com/agoda-com/opentelemetry-logs-go/sdk/logs/provider" +) + +// loggerProviderConfig Configuration for Logger Provider +type loggerProviderConfig struct { + processors []LogRecordProcessor + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// LoggerProviderOption configures a LoggerProvider. +type LoggerProviderOption interface { + apply(loggerProviderConfig) loggerProviderConfig +} +type loggerProviderOptionFunc func(loggerProviderConfig) loggerProviderConfig + +func (fn loggerProviderOptionFunc) apply(cfg loggerProviderConfig) loggerProviderConfig { + return fn(cfg) +} + +// WithLogRecordProcessor will configure processor to process logs +func WithLogRecordProcessor(logsProcessor LogRecordProcessor) LoggerProviderOption { + return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig { + cfg.processors = append(cfg.processors, logsProcessor) + return cfg + }) +} + +// WithSyncer registers the exporter with the LoggerProvider using a +// SimpleLogRecordProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleLogRecordProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e LogRecordExporter) LoggerProviderOption { + return WithLogRecordProcessor(NewSimpleLogRecordProcessor(e)) +} + +// WithBatcher registers the exporter with the LoggerProvider using a +// BatchLogRecordProcessor configured with the passed opts. +func WithBatcher(e LogRecordExporter, opts ...BatchLogRecordProcessorOption) LoggerProviderOption { + return WithLogRecordProcessor(NewBatchLogRecordProcessor(e, opts...)) +} + +// WithResource will configure OTLP logger with common resource attributes. +// +// Parameters: +// r (*resource.Resource) list of resources will be added to every log as resource level tags +func WithResource(r *resource.Resource) LoggerProviderOption { + return loggerProviderOptionFunc(func(cfg loggerProviderConfig) loggerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// LoggerProvider provide access to Logger. The API is not intended to be called by application developers directly. +// see https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#loggerprovider +type LoggerProvider struct { + mu sync.Mutex + namedLogger map[instrumentation.Scope]*logger + //cfg loggerProviderConfig + + logProcessors atomic.Pointer[logRecordProcessorStates] + isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the LoggerProvider. + resource *resource.Resource +} + +var _ logs.LoggerProvider = &LoggerProvider{} + +func (lp *LoggerProvider) Logger(name string, opts ...logs.LoggerOption) logs.Logger { + + if lp.isShutdown.Load() { + return logs.NewNoopLoggerProvider().Logger(name, opts...) + } + + c := logs.NewLoggerConfig(opts...) + + if name == "" { + name = defaultLoggerName + } + + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + t, ok := func() (logs.Logger, bool) { + lp.mu.Lock() + defer lp.mu.Unlock() + // Must check the flag after acquiring the mutex to avoid returning a valid logger if Shutdown() ran + // after the first check above but before we acquired the mutex. + if lp.isShutdown.Load() { + return logs.NewNoopLoggerProvider().Logger(name, opts...), true + } + + t, ok := lp.namedLogger[is] + if !ok { + t = &logger{ + provider: lp, + instrumentationScope: is, + } + } + return t, ok + }() + if !ok { + // This code is outside the mutex to not hold the lock while calling third party logging code: + // - That code may do slow things like I/O, which would prolong the duration the lock is held, + // slowing down all tracing consumers. + // - Logging code may be instrumented with logging and deadlock because it could try + // acquiring the same non-reentrant mutex. + global.Info("Logger created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + + } + return t +} + +var _ logs.LoggerProvider = &LoggerProvider{} + +func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { + o := loggerProviderConfig{} + + o = applyLoggerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidLoggerProviderConfig(o) + + lp := &LoggerProvider{ + namedLogger: make(map[instrumentation.Scope]*logger), + resource: o.resource, + } + + global.Info("LoggerProvider created", "config", o) + + lrpss := make(logRecordProcessorStates, 0, len(o.processors)) + for _, lrp := range o.processors { + lrpss = append(lrpss, newLogsProcessorState(lrp)) + } + lp.logProcessors.Store(&lrpss) + + return lp + +} + +func (p *LoggerProvider) getLogRecordProcessorStates() logRecordProcessorStates { + return *(p.logProcessors.Load()) +} + +func (p LoggerProvider) Shutdown(ctx context.Context) error { + // This check prevents deadlocks in case of recursive shutdown. + if p.isShutdown.Load() { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown has already been done concurrently. + if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? + return nil + } + + var retErr error + for _, lrps := range p.getLogRecordProcessorStates() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + lrps.state.Do(func() { + err = lrps.lp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } + } + } + p.logProcessors.Store(&logRecordProcessorStates{}) + return retErr + +} + +// ForceFlush immediately exports all logs that have not yet been exported for +// all the registered log processors. +func (p *LoggerProvider) ForceFlush(ctx context.Context) error { + lrpss := p.getLogRecordProcessorStates() + if len(lrpss) == 0 { + return nil + } + + for _, lrps := range lrpss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := lrps.lp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +func applyLoggerProviderEnvConfigs(cfg loggerProviderConfig) loggerProviderConfig { + for _, opt := range loggerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func loggerProviderOptionsFromEnv() []LoggerProviderOption { + var opts []LoggerProviderOption + + return opts +} + +// ensureValidLoggerProviderConfig ensures that given LoggerProviderConfig is valid. +func ensureValidLoggerProviderConfig(cfg loggerProviderConfig) loggerProviderConfig { + + if cfg.resource == nil { + cfg.resource = resource.Default() + } + + return cfg +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go new file mode 100644 index 000000000..453da09e1 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/sdk/logs/simple_log_record_processor.go @@ -0,0 +1,78 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logs + +import ( + "context" + "go.opentelemetry.io/otel" + "log" + "sync" +) + +type simpleLogRecordProcessor struct { + exporterMu sync.Mutex + stopOnce sync.Once + exporter LogRecordExporter +} + +func (lrp *simpleLogRecordProcessor) Shutdown(ctx context.Context) error { + return nil +} + +func (lrp *simpleLogRecordProcessor) ForceFlush(ctx context.Context) error { + return nil +} + +var _ LogRecordProcessor = (*simpleLogRecordProcessor)(nil) + +// NewSimpleLogRecordProcessor returns a new LogRecordProcessor that will synchronously +// send completed logs to the exporter immediately. +// +// This LogRecordProcessor is not recommended for production use. The synchronous +// nature of this LogRecordProcessor make it good for testing, debugging, or +// showing examples of other feature, but it will be slow and have a high +// computation resource usage overhead. The BatchLogsProcessor is recommended +// for production use instead. +func NewSimpleLogRecordProcessor(exporter LogRecordExporter) LogRecordProcessor { + slp := &simpleLogRecordProcessor{ + exporter: exporter, + } + log.Printf("SimpleLogsProcessor is not recommended for production use, consider using BatchLogRecordProcessor instead.") + + return slp +} + +// OnEmit Process immediately emits a LogRecord +func (lrp *simpleLogRecordProcessor) OnEmit(rol ReadableLogRecord) { + lrp.exporterMu.Lock() + defer lrp.exporterMu.Unlock() + + if err := lrp.exporter.Export(context.Background(), []ReadableLogRecord{rol}); err != nil { + otel.Handle(err) + } +} + +// MarshalLog is the marshaling function used by the logging system to represent this LogRecord Processor. +func (lrp *simpleLogRecordProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter LogRecordExporter + }{ + Type: "SimpleLogRecordProcessor", + Exporter: lrp.exporter, + } +} diff --git a/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go new file mode 100644 index 000000000..4f7215d71 --- /dev/null +++ b/vendor/github.com/agoda-com/opentelemetry-logs-go/semconv/attribute_group.go @@ -0,0 +1,70 @@ +/* +Copyright Agoda Services Co.,Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semconv + +import "go.opentelemetry.io/otel/attribute" + +// Describes Log Record attributes. +// see also https://opentelemetry.io/docs/specs/otel/logs/semantic_conventions/exceptions/#attributes +const ( + // ExceptionMessageKey is the attribute Key conforming to the "exception.message" + // semantic conventions. It represents the exception message. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the "exception.stacktrace" + // semantic conventions. It represents the stacktrace message of exception. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of exception + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message +// Examples: Division by zero; Can't convert 'int' object to str implicitly +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents the exception +// stacktrace +// Examples: Exception in thread "main" java.lang.RuntimeException: ... +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the exception type +// Examples: java.net.ConnectException; OSError +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} diff --git a/vendor/github.com/benbjohnson/clock/LICENSE b/vendor/github.com/benbjohnson/clock/LICENSE new file mode 100644 index 000000000..ce212cb1c --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/benbjohnson/clock/README.md b/vendor/github.com/benbjohnson/clock/README.md new file mode 100644 index 000000000..4f1f82fc6 --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/README.md @@ -0,0 +1,105 @@ +clock +===== + +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/benbjohnson/clock) + +Clock is a small library for mocking time in Go. It provides an interface +around the standard library's [`time`][time] package so that the application +can use the realtime clock while tests can use the mock clock. + +The module is currently maintained by @djmitche. + +[time]: https://pkg.go.dev/github.com/benbjohnson/clock + +## Usage + +### Realtime Clock + +Your application can maintain a `Clock` variable that will allow realtime and +mock clocks to be interchangeable. For example, if you had an `Application` type: + +```go +import "github.com/benbjohnson/clock" + +type Application struct { + Clock clock.Clock +} +``` + +You could initialize it to use the realtime clock like this: + +```go +var app Application +app.Clock = clock.New() +... +``` + +Then all timers and time-related functionality should be performed from the +`Clock` variable. + + +### Mocking time + +In your tests, you will want to use a `Mock` clock: + +```go +import ( + "testing" + + "github.com/benbjohnson/clock" +) + +func TestApplication_DoSomething(t *testing.T) { + mock := clock.NewMock() + app := Application{Clock: mock} + ... +} +``` + +Now that you've initialized your application to use the mock clock, you can +adjust the time programmatically. The mock clock always starts from the Unix +epoch (midnight UTC on Jan 1, 1970). + + +### Controlling time + +The mock clock provides the same functions that the standard library's `time` +package provides. For example, to find the current time, you use the `Now()` +function: + +```go +mock := clock.NewMock() + +// Find the current time. +mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC + +// Move the clock forward. +mock.Add(2 * time.Hour) + +// Check the time again. It's 2 hours later! +mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC +``` + +Timers and Tickers are also controlled by this same mock clock. They will only +execute when the clock is moved forward: + +```go +mock := clock.NewMock() +count := 0 + +// Kick off a timer to increment every 1 mock second. +go func() { + ticker := mock.Ticker(1 * time.Second) + for { + <-ticker.C + count++ + } +}() +runtime.Gosched() + +// Move the clock forward 10 seconds. +mock.Add(10 * time.Second) + +// This prints 10. +fmt.Println(count) +``` diff --git a/vendor/github.com/benbjohnson/clock/clock.go b/vendor/github.com/benbjohnson/clock/clock.go new file mode 100644 index 000000000..14ddc0795 --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/clock.go @@ -0,0 +1,422 @@ +package clock + +import ( + "context" + "sort" + "sync" + "time" +) + +// Re-export of time.Duration +type Duration = time.Duration + +// Clock represents an interface to the functions in the standard library time +// package. Two implementations are available in the clock package. The first +// is a real-time clock which simply wraps the time package's functions. The +// second is a mock clock which will only change when +// programmatically adjusted. +type Clock interface { + After(d time.Duration) <-chan time.Time + AfterFunc(d time.Duration, f func()) *Timer + Now() time.Time + Since(t time.Time) time.Duration + Until(t time.Time) time.Duration + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time + Ticker(d time.Duration) *Ticker + Timer(d time.Duration) *Timer + WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) + WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) +} + +// New returns an instance of a real-time clock. +func New() Clock { + return &clock{} +} + +// clock implements a real-time clock by simply wrapping the time package functions. +type clock struct{} + +func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (c *clock) AfterFunc(d time.Duration, f func()) *Timer { + return &Timer{timer: time.AfterFunc(d, f)} +} + +func (c *clock) Now() time.Time { return time.Now() } + +func (c *clock) Since(t time.Time) time.Duration { return time.Since(t) } + +func (c *clock) Until(t time.Time) time.Duration { return time.Until(t) } + +func (c *clock) Sleep(d time.Duration) { time.Sleep(d) } + +func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) } + +func (c *clock) Ticker(d time.Duration) *Ticker { + t := time.NewTicker(d) + return &Ticker{C: t.C, ticker: t} +} + +func (c *clock) Timer(d time.Duration) *Timer { + t := time.NewTimer(d) + return &Timer{C: t.C, timer: t} +} + +func (c *clock) WithDeadline(parent context.Context, d time.Time) (context.Context, context.CancelFunc) { + return context.WithDeadline(parent, d) +} + +func (c *clock) WithTimeout(parent context.Context, t time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(parent, t) +} + +// Mock represents a mock clock that only moves forward programmically. +// It can be preferable to a real-time clock when testing time-based functionality. +type Mock struct { + // mu protects all other fields in this struct, and the data that they + // point to. + mu sync.Mutex + + now time.Time // current time + timers clockTimers // tickers & timers +} + +// NewMock returns an instance of a mock clock. +// The current time of the mock clock on initialization is the Unix epoch. +func NewMock() *Mock { + return &Mock{now: time.Unix(0, 0)} +} + +// Add moves the current time of the mock clock forward by the specified duration. +// This should only be called from a single goroutine at a time. +func (m *Mock) Add(d time.Duration) { + // Calculate the final current time. + m.mu.Lock() + t := m.now.Add(d) + m.mu.Unlock() + + // Continue to execute timers until there are no more before the new time. + for { + if !m.runNextTimer(t) { + break + } + } + + // Ensure that we end with the new time. + m.mu.Lock() + m.now = t + m.mu.Unlock() + + // Give a small buffer to make sure that other goroutines get handled. + gosched() +} + +// Set sets the current time of the mock clock to a specific one. +// This should only be called from a single goroutine at a time. +func (m *Mock) Set(t time.Time) { + // Continue to execute timers until there are no more before the new time. + for { + if !m.runNextTimer(t) { + break + } + } + + // Ensure that we end with the new time. + m.mu.Lock() + m.now = t + m.mu.Unlock() + + // Give a small buffer to make sure that other goroutines get handled. + gosched() +} + +// WaitForAllTimers sets the clock until all timers are expired +func (m *Mock) WaitForAllTimers() time.Time { + // Continue to execute timers until there are no more + for { + m.mu.Lock() + if len(m.timers) == 0 { + m.mu.Unlock() + return m.Now() + } + + sort.Sort(m.timers) + next := m.timers[len(m.timers)-1].Next() + m.mu.Unlock() + m.Set(next) + } +} + +// runNextTimer executes the next timer in chronological order and moves the +// current time to the timer's next tick time. The next time is not executed if +// its next time is after the max time. Returns true if a timer was executed. +func (m *Mock) runNextTimer(max time.Time) bool { + m.mu.Lock() + + // Sort timers by time. + sort.Sort(m.timers) + + // If we have no more timers then exit. + if len(m.timers) == 0 { + m.mu.Unlock() + return false + } + + // Retrieve next timer. Exit if next tick is after new time. + t := m.timers[0] + if t.Next().After(max) { + m.mu.Unlock() + return false + } + + // Move "now" forward and unlock clock. + m.now = t.Next() + now := m.now + m.mu.Unlock() + + // Execute timer. + t.Tick(now) + return true +} + +// After waits for the duration to elapse and then sends the current time on the returned channel. +func (m *Mock) After(d time.Duration) <-chan time.Time { + return m.Timer(d).C +} + +// AfterFunc waits for the duration to elapse and then executes a function in its own goroutine. +// A Timer is returned that can be stopped. +func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer { + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time, 1) + t := &Timer{ + c: ch, + fn: f, + mock: m, + next: m.now.Add(d), + stopped: false, + } + m.timers = append(m.timers, (*internalTimer)(t)) + return t +} + +// Now returns the current wall time on the mock clock. +func (m *Mock) Now() time.Time { + m.mu.Lock() + defer m.mu.Unlock() + return m.now +} + +// Since returns time since `t` using the mock clock's wall time. +func (m *Mock) Since(t time.Time) time.Duration { + return m.Now().Sub(t) +} + +// Until returns time until `t` using the mock clock's wall time. +func (m *Mock) Until(t time.Time) time.Duration { + return t.Sub(m.Now()) +} + +// Sleep pauses the goroutine for the given duration on the mock clock. +// The clock must be moved forward in a separate goroutine. +func (m *Mock) Sleep(d time.Duration) { + <-m.After(d) +} + +// Tick is a convenience function for Ticker(). +// It will return a ticker channel that cannot be stopped. +func (m *Mock) Tick(d time.Duration) <-chan time.Time { + return m.Ticker(d).C +} + +// Ticker creates a new instance of Ticker. +func (m *Mock) Ticker(d time.Duration) *Ticker { + m.mu.Lock() + defer m.mu.Unlock() + ch := make(chan time.Time, 1) + t := &Ticker{ + C: ch, + c: ch, + mock: m, + d: d, + next: m.now.Add(d), + } + m.timers = append(m.timers, (*internalTicker)(t)) + return t +} + +// Timer creates a new instance of Timer. +func (m *Mock) Timer(d time.Duration) *Timer { + m.mu.Lock() + ch := make(chan time.Time, 1) + t := &Timer{ + C: ch, + c: ch, + mock: m, + next: m.now.Add(d), + stopped: false, + } + m.timers = append(m.timers, (*internalTimer)(t)) + now := m.now + m.mu.Unlock() + m.runNextTimer(now) + return t +} + +// removeClockTimer removes a timer from m.timers. m.mu MUST be held +// when this method is called. +func (m *Mock) removeClockTimer(t clockTimer) { + for i, timer := range m.timers { + if timer == t { + copy(m.timers[i:], m.timers[i+1:]) + m.timers[len(m.timers)-1] = nil + m.timers = m.timers[:len(m.timers)-1] + break + } + } + sort.Sort(m.timers) +} + +// clockTimer represents an object with an associated start time. +type clockTimer interface { + Next() time.Time + Tick(time.Time) +} + +// clockTimers represents a list of sortable timers. +type clockTimers []clockTimer + +func (a clockTimers) Len() int { return len(a) } +func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) } + +// Timer represents a single event. +// The current time will be sent on C, unless the timer was created by AfterFunc. +type Timer struct { + C <-chan time.Time + c chan time.Time + timer *time.Timer // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + fn func() // AfterFunc function, if set + stopped bool // True if stopped, false if running +} + +// Stop turns off the ticker. +func (t *Timer) Stop() bool { + if t.timer != nil { + return t.timer.Stop() + } + + t.mock.mu.Lock() + registered := !t.stopped + t.mock.removeClockTimer((*internalTimer)(t)) + t.stopped = true + t.mock.mu.Unlock() + return registered +} + +// Reset changes the expiry time of the timer +func (t *Timer) Reset(d time.Duration) bool { + if t.timer != nil { + return t.timer.Reset(d) + } + + t.mock.mu.Lock() + t.next = t.mock.now.Add(d) + defer t.mock.mu.Unlock() + + registered := !t.stopped + if t.stopped { + t.mock.timers = append(t.mock.timers, (*internalTimer)(t)) + } + + t.stopped = false + return registered +} + +type internalTimer Timer + +func (t *internalTimer) Next() time.Time { return t.next } +func (t *internalTimer) Tick(now time.Time) { + // a gosched() after ticking, to allow any consequences of the + // tick to complete + defer gosched() + + t.mock.mu.Lock() + if t.fn != nil { + // defer function execution until the lock is released, and + defer func() { go t.fn() }() + } else { + t.c <- now + } + t.mock.removeClockTimer((*internalTimer)(t)) + t.stopped = true + t.mock.mu.Unlock() +} + +// Ticker holds a channel that receives "ticks" at regular intervals. +type Ticker struct { + C <-chan time.Time + c chan time.Time + ticker *time.Ticker // realtime impl, if set + next time.Time // next tick time + mock *Mock // mock clock, if set + d time.Duration // time between ticks + stopped bool // True if stopped, false if running +} + +// Stop turns off the ticker. +func (t *Ticker) Stop() { + if t.ticker != nil { + t.ticker.Stop() + } else { + t.mock.mu.Lock() + t.mock.removeClockTimer((*internalTicker)(t)) + t.stopped = true + t.mock.mu.Unlock() + } +} + +// Reset resets the ticker to a new duration. +func (t *Ticker) Reset(dur time.Duration) { + if t.ticker != nil { + t.ticker.Reset(dur) + return + } + + t.mock.mu.Lock() + defer t.mock.mu.Unlock() + + if t.stopped { + t.mock.timers = append(t.mock.timers, (*internalTicker)(t)) + t.stopped = false + } + + t.d = dur + t.next = t.mock.now.Add(dur) +} + +type internalTicker Ticker + +func (t *internalTicker) Next() time.Time { return t.next } +func (t *internalTicker) Tick(now time.Time) { + select { + case t.c <- now: + default: + } + t.mock.mu.Lock() + t.next = now.Add(t.d) + t.mock.mu.Unlock() + gosched() +} + +// Sleep momentarily so that other goroutines can process. +func gosched() { time.Sleep(1 * time.Millisecond) } + +var ( + // type checking + _ Clock = &Mock{} +) diff --git a/vendor/github.com/benbjohnson/clock/context.go b/vendor/github.com/benbjohnson/clock/context.go new file mode 100644 index 000000000..eb67594f2 --- /dev/null +++ b/vendor/github.com/benbjohnson/clock/context.go @@ -0,0 +1,86 @@ +package clock + +import ( + "context" + "fmt" + "sync" + "time" +) + +func (m *Mock) WithTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + return m.WithDeadline(parent, m.Now().Add(timeout)) +} + +func (m *Mock) WithDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return context.WithCancel(parent) + } + ctx := &timerCtx{clock: m, parent: parent, deadline: deadline, done: make(chan struct{})} + propagateCancel(parent, ctx) + dur := m.Until(deadline) + if dur <= 0 { + ctx.cancel(context.DeadlineExceeded) // deadline has already passed + return ctx, func() {} + } + ctx.Lock() + defer ctx.Unlock() + if ctx.err == nil { + ctx.timer = m.AfterFunc(dur, func() { + ctx.cancel(context.DeadlineExceeded) + }) + } + return ctx, func() { ctx.cancel(context.Canceled) } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent context.Context, child *timerCtx) { + if parent.Done() == nil { + return // parent is never canceled + } + go func() { + select { + case <-parent.Done(): + child.cancel(parent.Err()) + case <-child.Done(): + } + }() +} + +type timerCtx struct { + sync.Mutex + + clock Clock + parent context.Context + deadline time.Time + done chan struct{} + + err error + timer *Timer +} + +func (c *timerCtx) cancel(err error) { + c.Lock() + defer c.Unlock() + if c.err != nil { + return // already canceled + } + c.err = err + close(c.done) + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true } + +func (c *timerCtx) Done() <-chan struct{} { return c.done } + +func (c *timerCtx) Err() error { return c.err } + +func (c *timerCtx) Value(key interface{}) interface{} { return c.parent.Value(key) } + +func (c *timerCtx) String() string { + return fmt.Sprintf("clock.WithDeadline(%s [%s])", c.deadline, c.deadline.Sub(c.clock.Now())) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 000000000..50d95c548 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 000000000..9433004a2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 000000000..3676ee405 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 000000000..48482330e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 000000000..aac99f196 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,216 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + for _, fn := range opts { + fn(b) + } + b.Reset() + return b +} + +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 000000000..b9c0c51cd --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 000000000..df9d68bce --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 000000000..8120d0213 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 000000000..28d58ca37 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml new file mode 100644 index 000000000..b05e4c53f --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.2 + diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md new file mode 100644 index 000000000..d3f211818 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/README.md @@ -0,0 +1,5 @@ +hub +=== + +[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub) +[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub) diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go new file mode 100644 index 000000000..24c5efa86 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/hub.go @@ -0,0 +1,82 @@ +// Package hub provides a simple event dispatcher for publish/subscribe pattern. +package hub + +import "sync" + +type Kind int + +// Event is an interface for published events. +type Event interface { + Kind() Kind +} + +// Hub is an event dispatcher, publishes events to the subscribers +// which are subscribed for a specific event type. +// Optimized for publish calls. +// The handlers may be called in order different than they are registered. +type Hub struct { + subscribers map[Kind][]handler + m sync.RWMutex + seq uint64 +} + +type handler struct { + f func(Event) + id uint64 +} + +// Subscribe registers f for the event of a specific kind. +func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) { + var cancelled bool + h.m.Lock() + h.seq++ + id := h.seq + if h.subscribers == nil { + h.subscribers = make(map[Kind][]handler) + } + h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f}) + h.m.Unlock() + return func() { + h.m.Lock() + if cancelled { + h.m.Unlock() + return + } + cancelled = true + a := h.subscribers[kind] + for i, f := range a { + if f.id == id { + a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1] + break + } + } + if len(a) == 0 { + delete(h.subscribers, kind) + } + h.m.Unlock() + } +} + +// Publish an event to the subscribers. +func (h *Hub) Publish(e Event) { + h.m.RLock() + if handlers, ok := h.subscribers[e.Kind()]; ok { + for _, h := range handlers { + h.f(e) + } + } + h.m.RUnlock() +} + +// DefaultHub is the default Hub used by Publish and Subscribe. +var DefaultHub Hub + +// Subscribe registers f for the event of a specific kind in the DefaultHub. +func Subscribe(kind Kind, f func(Event)) (cancel func()) { + return DefaultHub.Subscribe(kind, f) +} + +// Publish an event to the subscribers in DefaultHub. +func Publish(e Event) { + DefaultHub.Publish(e) +} diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml new file mode 100644 index 000000000..ae8233c2b --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.15 + - tip + +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE new file mode 100644 index 000000000..d565b1b1f --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md new file mode 100644 index 000000000..3dffd26e4 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/README.md @@ -0,0 +1,82 @@ +rpc2 +==== + +[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2) +[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2) + +rpc2 is a fork of net/rpc package in the standard library. +The main goal is to add bi-directional support to calls. +That means server can call the methods of client. +This is not possible with net/rpc package. +In order to do this it adds a `*Client` argument to method signatures. + +Install +-------- + + go get github.com/cenkalti/rpc2 + +Example server +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + srv := rpc2.NewServer() + srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error { + + // Reversed call (server to client) + var rep Reply + client.Call("mult", Args{2, 3}, &rep) + fmt.Println("mult result:", rep) + + *reply = Reply(args.A + args.B) + return nil + }) + + lis, _ := net.Listen("tcp", "127.0.0.1:5000") + srv.Accept(lis) +} +``` + +Example Client +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + conn, _ := net.Dial("tcp", "127.0.0.1:5000") + + clt := rpc2.NewClient(conn) + clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error { + *reply = Reply(args.A * args.B) + return nil + }) + go clt.Run() + + var rep Reply + clt.Call("add", Args{1, 2}, &rep) + fmt.Println("add result:", rep) +} +``` diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go new file mode 100644 index 000000000..cc9956976 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/client.go @@ -0,0 +1,364 @@ +// Package rpc2 provides bi-directional RPC client and server similar to net/rpc. +package rpc2 + +import ( + "context" + "errors" + "io" + "log" + "reflect" + "sync" +) + +// Client represents an RPC Client. +// There may be multiple outstanding Calls associated +// with a single Client, and a Client may be used by +// multiple goroutines simultaneously. +type Client struct { + mutex sync.Mutex // protects pending, seq, request + sending sync.Mutex + request Request // temp area used in send() + seq uint64 + pending map[uint64]*Call + closing bool + shutdown bool + server bool + codec Codec + handlers map[string]*handler + disconnect chan struct{} + State *State // additional information to associate with client + blocking bool // whether to block request handling +} + +// NewClient returns a new Client to handle requests to the +// set of services at the other end of the connection. +// It adds a buffer to the write side of the connection so +// the header and payload are sent as a unit. +func NewClient(conn io.ReadWriteCloser) *Client { + return NewClientWithCodec(NewGobCodec(conn)) +} + +// NewClientWithCodec is like NewClient but uses the specified +// codec to encode requests and decode responses. +func NewClientWithCodec(codec Codec) *Client { + return &Client{ + codec: codec, + pending: make(map[uint64]*Call), + handlers: make(map[string]*handler), + disconnect: make(chan struct{}), + seq: 1, // 0 means notification. + } +} + +// SetBlocking puts the client in blocking mode. +// In blocking mode, received requests are processes synchronously. +// If you have methods that may take a long time, other subsequent requests may time out. +func (c *Client) SetBlocking(blocking bool) { + c.blocking = blocking +} + +// Run the client's read loop. +// You must run this method before calling any methods on the server. +func (c *Client) Run() { + c.readLoop() +} + +// DisconnectNotify returns a channel that is closed +// when the client connection has gone away. +func (c *Client) DisconnectNotify() chan struct{} { + return c.disconnect +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (c *Client) Handle(method string, handlerFunc interface{}) { + addHandler(c.handlers, method, handlerFunc) +} + +// readLoop reads messages from codec. +// It reads a reqeust or a response to the previous request. +// If the message is request, calls the handler function. +// If the message is response, sends the reply to the associated call. +func (c *Client) readLoop() { + var err error + var req Request + var resp Response + for err == nil { + req = Request{} + resp = Response{} + if err = c.codec.ReadHeader(&req, &resp); err != nil { + break + } + + if req.Method != "" { + // request comes to server + if err = c.readRequest(&req); err != nil { + debugln("rpc2: error reading request:", err.Error()) + } + } else { + // response comes to client + if err = c.readResponse(&resp); err != nil { + debugln("rpc2: error reading response:", err.Error()) + } + } + } + // Terminate pending calls. + c.sending.Lock() + c.mutex.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrShutdown + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.Error = err + call.done() + } + c.mutex.Unlock() + c.sending.Unlock() + if err != io.EOF && !closing && !c.server { + debugln("rpc2: client protocol error:", err) + } + close(c.disconnect) + if !closing { + c.codec.Close() + } +} + +func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) { + // Invoke the method, providing a new value for the reply. + replyv := reflect.New(method.replyType.Elem()) + + returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv}) + + // Do not send response if request is a notification. + if req.Seq == 0 { + return + } + + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + resp := &Response{ + Seq: req.Seq, + Error: errmsg, + } + if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil { + debugln("rpc2: error writing response:", err.Error()) + } +} + +func (c *Client) readRequest(req *Request) error { + method, ok := c.handlers[req.Method] + if !ok { + resp := &Response{ + Seq: req.Seq, + Error: "rpc2: can't find method " + req.Method, + } + return c.codec.WriteResponse(resp, resp) + } + + // Decode the argument value. + var argv reflect.Value + argIsValue := false // if true, need to indirect before calling. + if method.argType.Kind() == reflect.Ptr { + argv = reflect.New(method.argType.Elem()) + } else { + argv = reflect.New(method.argType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err := c.codec.ReadRequestBody(argv.Interface()); err != nil { + return err + } + if argIsValue { + argv = argv.Elem() + } + + if c.blocking { + c.handleRequest(*req, method, argv) + } else { + go c.handleRequest(*req, method, argv) + } + + return nil +} + +func (c *Client) readResponse(resp *Response) error { + seq := resp.Seq + c.mutex.Lock() + call := c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + + var err error + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + call.done() + default: + err = c.codec.ReadResponseBody(call.Reply) + if err != nil { + call.Error = errors.New("reading body " + err.Error()) + } + call.done() + } + + return err +} + +// Close waits for active calls to finish and closes the codec. +func (c *Client) Close() error { + c.mutex.Lock() + if c.shutdown || c.closing { + c.mutex.Unlock() + return ErrShutdown + } + c.closing = true + c.mutex.Unlock() + return c.codec.Close() +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.Method = method + call.Args = args + call.Reply = reply + if done == nil { + done = make(chan *Call, 10) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + log.Panic("rpc2: done channel is unbuffered") + } + } + call.Done = done + c.send(call) + return call +} + +// CallWithContext invokes the named function, waits for it to complete, and +// returns its error status, or an error from Context timeout. +func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error { + call := c.Go(method, args, reply, make(chan *Call, 1)) + select { + case <-call.Done: + return call.Error + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (c *Client) Call(method string, args interface{}, reply interface{}) error { + return c.CallWithContext(context.Background(), method, args, reply) +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + debugln("rpc2: discarding Call reply due to insufficient Done chan capacity") + } +} + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +// ErrShutdown is returned when the connection is closing or closed. +var ErrShutdown = errors.New("connection is shut down") + +// Call represents an active RPC. +type Call struct { + Method string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Strobes when call is complete. +} + +func (c *Client) send(call *Call) { + c.sending.Lock() + defer c.sending.Unlock() + + // Register this call. + c.mutex.Lock() + if c.shutdown || c.closing { + call.Error = ErrShutdown + c.mutex.Unlock() + call.done() + return + } + seq := c.seq + c.seq++ + c.pending[seq] = call + c.mutex.Unlock() + + // Encode and send the request. + c.request.Seq = seq + c.request.Method = call.Method + err := c.codec.WriteRequest(&c.request, call.Args) + if err != nil { + c.mutex.Lock() + call = c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + if call != nil { + call.Error = err + call.done() + } + } +} + +// Notify sends a request to the receiver but does not wait for a return value. +func (c *Client) Notify(method string, args interface{}) error { + c.sending.Lock() + defer c.sending.Unlock() + + if c.shutdown || c.closing { + return ErrShutdown + } + + c.request.Seq = 0 + c.request.Method = method + return c.codec.WriteRequest(&c.request, args) +} diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go new file mode 100644 index 000000000..b097d9aaa --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/codec.go @@ -0,0 +1,125 @@ +package rpc2 + +import ( + "bufio" + "encoding/gob" + "io" + "sync" +) + +// A Codec implements reading and writing of RPC requests and responses. +// The client calls ReadHeader to read a message header. +// The implementation must populate either Request or Response argument. +// Depending on which argument is populated, ReadRequestBody or +// ReadResponseBody is called right after ReadHeader. +// ReadRequestBody and ReadResponseBody may be called with a nil +// argument to force the body to be read and then discarded. +type Codec interface { + // ReadHeader must read a message and populate either the request + // or the response by inspecting the incoming message. + ReadHeader(*Request, *Response) error + + // ReadRequestBody into args argument of handler function. + ReadRequestBody(interface{}) error + + // ReadResponseBody into reply argument of handler function. + ReadResponseBody(interface{}) error + + // WriteRequest must be safe for concurrent use by multiple goroutines. + WriteRequest(*Request, interface{}) error + + // WriteResponse must be safe for concurrent use by multiple goroutines. + WriteResponse(*Response, interface{}) error + + // Close is called when client/server finished with the connection. + Close() error +} + +// Request is a header written before every RPC call. +type Request struct { + Seq uint64 // sequence number chosen by client + Method string +} + +// Response is a header written before every RPC return. +type Response struct { + Seq uint64 // echoes that of the request + Error string // error, if any. +} + +type gobCodec struct { + rwc io.ReadWriteCloser + dec *gob.Decoder + enc *gob.Encoder + encBuf *bufio.Writer + mutex sync.Mutex +} + +type message struct { + Seq uint64 + Method string + Error string +} + +// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn. +func NewGobCodec(conn io.ReadWriteCloser) Codec { + buf := bufio.NewWriter(conn) + return &gobCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } +} + +func (c *gobCodec) ReadHeader(req *Request, resp *Response) error { + var msg message + if err := c.dec.Decode(&msg); err != nil { + return err + } + + if msg.Method != "" { + req.Seq = msg.Seq + req.Method = msg.Method + } else { + resp.Seq = msg.Seq + resp.Error = msg.Error + } + return nil +} + +func (c *gobCodec) ReadRequestBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) ReadResponseBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) Close() error { + return c.rwc.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go new file mode 100644 index 000000000..ec1b62521 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/debug.go @@ -0,0 +1,12 @@ +package rpc2 + +import "log" + +// DebugLog controls the printing of internal and I/O errors. +var DebugLog = false + +func debugln(v ...interface{}) { + if DebugLog { + log.Println(v...) + } +} diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go new file mode 100644 index 000000000..87e116887 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go @@ -0,0 +1,226 @@ +// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package. +// +// Beside struct types, JSONCodec allows using positional arguments. +// Use []interface{} as the type of argument when sending and receiving methods. +// +// Positional arguments example: +// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error { +// *result = args[0].(float64) + args[1].(float64) +// return nil +// }) +// +// var result float64 +// client.Call("add", []interface{}{1, 2}, &result) +// +package jsonrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sync" + + "github.com/cenkalti/rpc2" +) + +type jsonCodec struct { + dec *json.Decoder // for reading JSON values + enc *json.Encoder // for writing JSON values + c io.Closer + + // temporary work space + msg message + serverRequest serverRequest + clientResponse clientResponse + + // JSON-RPC clients can use arbitrary json values as request IDs. + // Package rpc expects uint64 request IDs. + // We assign uint64 sequence numbers to incoming requests + // but save the original request ID in the pending map. + // When rpc responds, we use the sequence number in + // the response to find the original request ID. + mutex sync.Mutex // protects seq, pending + pending map[uint64]*json.RawMessage + seq uint64 +} + +// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn. +func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec { + return &jsonCodec{ + dec: json.NewDecoder(conn), + enc: json.NewEncoder(conn), + c: conn, + pending: make(map[uint64]*json.RawMessage), + } +} + +// serverRequest and clientResponse combined +type message struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// Unmarshal to +type serverRequest struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` +} +type clientResponse struct { + Id uint64 `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// to Marshal +type serverResponse struct { + Id *json.RawMessage `json:"id"` + Result interface{} `json:"result"` + Error interface{} `json:"error"` +} +type clientRequest struct { + Method string `json:"method"` + Params interface{} `json:"params"` + Id *uint64 `json:"id"` +} + +func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error { + c.msg = message{} + if err := c.dec.Decode(&c.msg); err != nil { + return err + } + + if c.msg.Method != "" { + // request comes to server + c.serverRequest.Id = c.msg.Id + c.serverRequest.Method = c.msg.Method + c.serverRequest.Params = c.msg.Params + + req.Method = c.serverRequest.Method + + // JSON request id can be any JSON value; + // RPC package expects uint64. Translate to + // internal uint64 and save JSON on the side. + if c.serverRequest.Id == nil { + // Notification + } else { + c.mutex.Lock() + c.seq++ + c.pending[c.seq] = c.serverRequest.Id + c.serverRequest.Id = nil + req.Seq = c.seq + c.mutex.Unlock() + } + } else { + // response comes to client + err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id) + if err != nil { + return err + } + c.clientResponse.Result = c.msg.Result + c.clientResponse.Error = c.msg.Error + + resp.Error = "" + resp.Seq = c.clientResponse.Id + if c.clientResponse.Error != nil || c.clientResponse.Result == nil { + x, ok := c.clientResponse.Error.(string) + if !ok { + return fmt.Errorf("invalid error %v", c.clientResponse.Error) + } + if x == "" { + x = "unspecified error" + } + resp.Error = x + } + } + return nil +} + +var errMissingParams = errors.New("jsonrpc: request body missing params") + +func (c *jsonCodec) ReadRequestBody(x interface{}) error { + if x == nil { + return nil + } + if c.serverRequest.Params == nil { + return errMissingParams + } + + var err error + + // Check if x points to a slice of any kind + rt := reflect.TypeOf(x) + if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice { + // If it's a slice, unmarshal as is + err = json.Unmarshal(*c.serverRequest.Params, x) + } else { + // Anything else unmarshal into a slice containing x + params := &[]interface{}{x} + err = json.Unmarshal(*c.serverRequest.Params, params) + } + + return err +} + +func (c *jsonCodec) ReadResponseBody(x interface{}) error { + if x == nil { + return nil + } + return json.Unmarshal(*c.clientResponse.Result, x) +} + +func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error { + req := &clientRequest{Method: r.Method} + + // Check if param is a slice of any kind + if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice { + // If it's a slice, leave as is + req.Params = param + } else { + // Put anything else into a slice + req.Params = []interface{}{param} + } + + if r.Seq == 0 { + // Notification + req.Id = nil + } else { + seq := r.Seq + req.Id = &seq + } + return c.enc.Encode(req) +} + +var null = json.RawMessage([]byte("null")) + +func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error { + c.mutex.Lock() + b, ok := c.pending[r.Seq] + if !ok { + c.mutex.Unlock() + return errors.New("invalid sequence number in response") + } + delete(c.pending, r.Seq) + c.mutex.Unlock() + + if b == nil { + // Invalid request so no id. Use JSON null. + b = &null + } + resp := serverResponse{Id: b} + if r.Error == "" { + resp.Result = x + } else { + resp.Error = r.Error + } + return c.enc.Encode(resp) +} + +func (c *jsonCodec) Close() error { + return c.c.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go new file mode 100644 index 000000000..2a5be7ed6 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/server.go @@ -0,0 +1,181 @@ +package rpc2 + +import ( + "io" + "log" + "net" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/cenkalti/hub" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() +var typeOfClient = reflect.TypeOf((*Client)(nil)) + +const ( + clientConnected hub.Kind = iota + clientDisconnected +) + +// Server responds to RPC requests made by Client. +type Server struct { + handlers map[string]*handler + eventHub *hub.Hub +} + +type handler struct { + fn reflect.Value + argType reflect.Type + replyType reflect.Type +} + +type connectionEvent struct { + Client *Client +} + +type disconnectionEvent struct { + Client *Client +} + +func (connectionEvent) Kind() hub.Kind { return clientConnected } +func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected } + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + handlers: make(map[string]*handler), + eventHub: &hub.Hub{}, + } +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (s *Server) Handle(method string, handlerFunc interface{}) { + addHandler(s.handlers, method, handlerFunc) +} + +func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) { + if _, ok := handlers[mname]; ok { + panic("rpc2: multiple registrations for " + mname) + } + + method := reflect.ValueOf(handlerFunc) + mtype := method.Type() + // Method needs three ins: *client, *args, *reply. + if mtype.NumIn() != 3 { + log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn()) + } + // First arg must be a pointer to rpc2.Client. + clientType := mtype.In(0) + if clientType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "client type not a pointer:", clientType) + } + if clientType != typeOfClient { + log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client") + } + // Second arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + log.Panicln(mname, "argument type not exported:", argType) + } + // Third arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "reply type not a pointer:", replyType) + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + log.Panicln("method", mname, "reply type not exported:", replyType) + } + // Method needs one out. + if mtype.NumOut() != 1 { + log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut()) + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + log.Panicln("method", mname, "returns", returnType.String(), "not error") + } + handlers[mname] = &handler{ + fn: method, + argType: argType, + replyType: replyType, + } +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} + +// Is this an exported - upper case - name? +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// OnConnect registers a function to run when a client connects. +func (s *Server) OnConnect(f func(*Client)) { + s.eventHub.Subscribe(clientConnected, func(e hub.Event) { + go f(e.(connectionEvent).Client) + }) +} + +// OnDisconnect registers a function to run when a client disconnects. +func (s *Server) OnDisconnect(f func(*Client)) { + s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) { + go f(e.(disconnectionEvent).Client) + }) +} + +// Accept accepts connections on the listener and serves requests +// for each incoming connection. Accept blocks; the caller typically +// invokes it in a go statement. +func (s *Server) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Print("rpc.Serve: accept:", err.Error()) + return + } + go s.ServeConn(conn) + } +} + +// ServeConn runs the server on a single connection. +// ServeConn blocks, serving the connection until the client hangs up. +// The caller typically invokes ServeConn in a go statement. +// ServeConn uses the gob wire format (see package gob) on the +// connection. To use an alternate codec, use ServeCodec. +func (s *Server) ServeConn(conn io.ReadWriteCloser) { + s.ServeCodec(NewGobCodec(conn)) +} + +// ServeCodec is like ServeConn but uses the specified codec to +// decode requests and encode responses. +func (s *Server) ServeCodec(codec Codec) { + s.ServeCodecWithState(codec, NewState()) +} + +// ServeCodecWithState is like ServeCodec but also gives the ability to +// associate a state variable with the client that persists across RPC calls. +func (s *Server) ServeCodecWithState(codec Codec, state *State) { + defer codec.Close() + + // Client also handles the incoming connections. + c := NewClientWithCodec(codec) + c.server = true + c.handlers = s.handlers + c.State = state + + s.eventHub.Publish(connectionEvent{c}) + c.Run() + s.eventHub.Publish(disconnectionEvent{c}) +} diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go new file mode 100644 index 000000000..7a4f23e6d --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/state.go @@ -0,0 +1,25 @@ +package rpc2 + +import "sync" + +type State struct { + store map[string]interface{} + m sync.RWMutex +} + +func NewState() *State { + return &State{store: make(map[string]interface{})} +} + +func (s *State) Get(key string) (value interface{}, ok bool) { + s.m.RLock() + value, ok = s.store[key] + s.m.RUnlock() + return +} + +func (s *State) Set(key string, value interface{}) { + s.m.Lock() + s.store[key] = value + s.m.Unlock() +} diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format new file mode 100644 index 000000000..0ff425760 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.clang-format @@ -0,0 +1,25 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: true +AlignEscapedNewlines: DontAlign +# mkdocs annotations in source code are written as trailing comments +# and alignment pushes these really far away from the content. +AlignTrailingComments: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: false +BreakBeforeBraces: Attach +IndentWidth: 4 +KeepEmptyLinesAtTheStartOfBlocks: false +TabWidth: 4 +UseTab: ForContinuationAndIndentation +ColumnLimit: 1000 +# Go compiler comments need to stay unindented. +CommentPragmas: '^go:.*' +# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64 +# and sorting makes this impossible. +SortIncludes: false +... diff --git a/vendor/github.com/cilium/ebpf/.gitattributes b/vendor/github.com/cilium/ebpf/.gitattributes new file mode 100644 index 000000000..113f97b98 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitattributes @@ -0,0 +1 @@ +internal/sys/types.go linguist-generated=false diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 000000000..b46162b8e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.o +!*_bpf*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml new file mode 100644 index 000000000..65f91b910 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -0,0 +1,13 @@ +--- +linters: + disable-all: true + enable: + - goimports + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - typecheck + - unused + - gofmt diff --git a/vendor/github.com/cilium/ebpf/.vimto.toml b/vendor/github.com/cilium/ebpf/.vimto.toml new file mode 100644 index 000000000..49a12dbc0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.vimto.toml @@ -0,0 +1,12 @@ +kernel="ghcr.io/cilium/ci-kernels:stable" +smp="cpus=2" +memory="1G" +user="root" +setup=[ + "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup", + "/bin/sh -c 'modprobe bpf_testmod || true'", + "dmesg --clear", +] +teardown=[ + "dmesg --read-clear", +] diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS new file mode 100644 index 000000000..ca65d23c0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -0,0 +1,11 @@ +* @cilium/ebpf-lib-maintainers + +features/ @rgo3 +link/ @mmat11 + +perf/ @florianl +ringbuf/ @florianl + +btf/ @dylandreimerink + +cmd/bpf2go/ @mejedi diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..8e42838c5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md new file mode 100644 index 000000000..673a9ac29 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to ebpf-go + +Want to contribute to ebpf-go? There are a few things you need to know. + +We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started. diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 000000000..c637ae99c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 000000000..a56a03e39 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,3 @@ +# Maintainers + +Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md) diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile new file mode 100644 index 000000000..d355eea71 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -0,0 +1,112 @@ +# The development version of clang is distributed as the 'clang' binary, +# while stable/released versions have a version number attached. +# Pin the default clang to a stable version. +CLANG ?= clang-17 +STRIP ?= llvm-strip-17 +OBJCOPY ?= llvm-objcopy-17 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ + +# Obtain an absolute path to the directory of the Makefile. +# Assume the Makefile is in the root of the repository. +REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) + +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") + +IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) +VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + +TARGETS := \ + testdata/loader-clang-11 \ + testdata/loader-clang-14 \ + testdata/loader-$(CLANG) \ + testdata/manyprogs \ + testdata/btf_map_init \ + testdata/invalid_map \ + testdata/raw_tracepoint \ + testdata/invalid_map_static \ + testdata/invalid_btf_map_init \ + testdata/strings \ + testdata/freplace \ + testdata/fentry_fexit \ + testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + testdata/kconfig \ + testdata/kconfig_config \ + testdata/kfunc \ + testdata/invalid-kfunc \ + testdata/kfunc-kmod \ + testdata/constants \ + testdata/errors \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ + cmd/bpf2go/testdata/minimal + +.PHONY: all clean container-all container-shell generate + +.DEFAULT_TARGET = container-all + +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + +${CONTAINER_ENGINE} run --rm -t ${CONTAINER_RUN_ARGS} \ + -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ + --env HOME="/tmp" \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" \ + make all + +# (debug) Drop the user into a shell inside the container as root. +# Set BPF2GO_ envs to make 'make generate' just work. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ + -v "${REPODIR}":/ebpf -w /ebpf \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" + +clean: + find "$(CURDIR)" -name "*.elf" -delete + find "$(CURDIR)" -name "*.o" -delete + +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate + ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf + ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + +generate: + go generate -run "internal/cmd/gentypes" ./... + go generate -skip "internal/cmd/gentypes" ./... + +testdata/loader-%-el.elf: testdata/loader.c + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +testdata/loader-%-eb.elf: testdata/loader.c + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +%-el.elf: %.c + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +%-eb.elf : %.c + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.8 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md new file mode 100644 index 000000000..85871db1a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/README.md @@ -0,0 +1,72 @@ +# eBPF + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) + +![HoneyGopher](docs/ebpf/ebpf-go.png) + +ebpf-go is a pure Go library that provides utilities for loading, compiling, and +debugging eBPF programs. It has minimal external dependencies and is intended to +be used in long running processes. + +See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF +ecosystem. + +## Getting Started + +Please take a look at our [Getting Started] guide. + +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of +eBPF and the library, and help shape the future of the project. + +## Getting Help + +The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page. +Please search for existing threads before starting a new one. Refrain from +opening issues on the bug tracker if you're just starting out or if you're not +sure if something is a bug in the library code. + +Alternatively, [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have other questions regarding the project. Note that this channel is ephemeral +and has its history erased past a certain point, which is less helpful for +others running into the same problem later. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. +* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. + +## Requirements + +* A version of Go that is [supported by + upstream](https://golang.org/doc/devel/release.html#policy) +* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions + are not supported. + +## License + +MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 000000000..282233d32 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,180 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 + +const sourceMask OpCode = 0x0008 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xffff + // ImmSource src is from constant + ImmSource Source = 0x0000 + // RegSource src is from register + RegSource Source = 0x0008 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 + +const aluMask OpCode = 0x3ff0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xffff + // Add - addition + Add ALUOp = 0x0000 + // Sub - subtraction + Sub ALUOp = 0x0010 + // Mul - multiplication + Mul ALUOp = 0x0020 + // Div - division + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 + // Or - bitwise or + Or ALUOp = 0x0040 + // And - bitwise and + And ALUOp = 0x0050 + // LSh - bitwise shift left + LSh ALUOp = 0x0060 + // RSh - bitwise shift right + RSh ALUOp = 0x0070 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x0080 + // Mod - modulo + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 + // Xor - bitwise xor + Xor ALUOp = 0x00a0 + // Mov - move value from one place to another + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 + // ArSh - arithmetic shift + ArSh ALUOp = 0x00c0 + // Swap - endian conversions + Swap ALUOp = 0x00d0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 000000000..35b406bf3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,117 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-65535] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 65535: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-65535] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[SDiv-304] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[SMod-400] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 000000000..7031bdc27 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 000000000..84a40b227 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,250 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc int32 + +func (_ BuiltinFunc) Max() BuiltinFunc { + return maxBuiltinFunc - 1 +} + +// eBPF built-in functions +// +// You can regenerate this list using the following gawk script: +// +// /FN\(.+\),/ { +// match($1, /\(([a-z_0-9]+),/, r) +// split(r[1], p, "_") +// printf "Fn" +// for (i in p) { +// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) +// } +// print "" +// } +// +// The script expects include/uapi/linux/bpf.h as it's input. +const ( + FnUnspec BuiltinFunc = iota + FnMapLookupElem + FnMapUpdateElem + FnMapDeleteElem + FnProbeRead + FnKtimeGetNs + FnTracePrintk + FnGetPrandomU32 + FnGetSmpProcessorId + FnSkbStoreBytes + FnL3CsumReplace + FnL4CsumReplace + FnTailCall + FnCloneRedirect + FnGetCurrentPidTgid + FnGetCurrentUidGid + FnGetCurrentComm + FnGetCgroupClassid + FnSkbVlanPush + FnSkbVlanPop + FnSkbGetTunnelKey + FnSkbSetTunnelKey + FnPerfEventRead + FnRedirect + FnGetRouteRealm + FnPerfEventOutput + FnSkbLoadBytes + FnGetStackid + FnCsumDiff + FnSkbGetTunnelOpt + FnSkbSetTunnelOpt + FnSkbChangeProto + FnSkbChangeType + FnSkbUnderCgroup + FnGetHashRecalc + FnGetCurrentTask + FnProbeWriteUser + FnCurrentTaskUnderCgroup + FnSkbChangeTail + FnSkbPullData + FnCsumUpdate + FnSetHashInvalid + FnGetNumaNodeId + FnSkbChangeHead + FnXdpAdjustHead + FnProbeReadStr + FnGetSocketCookie + FnGetSocketUid + FnSetHash + FnSetsockopt + FnSkbAdjustRoom + FnRedirectMap + FnSkRedirectMap + FnSockMapUpdate + FnXdpAdjustMeta + FnPerfEventReadValue + FnPerfProgReadValue + FnGetsockopt + FnOverrideReturn + FnSockOpsCbFlagsSet + FnMsgRedirectMap + FnMsgApplyBytes + FnMsgCorkBytes + FnMsgPullData + FnBind + FnXdpAdjustTail + FnSkbGetXfrmState + FnGetStack + FnSkbLoadBytesRelative + FnFibLookup + FnSockHashUpdate + FnMsgRedirectHash + FnSkRedirectHash + FnLwtPushEncap + FnLwtSeg6StoreBytes + FnLwtSeg6AdjustSrh + FnLwtSeg6Action + FnRcRepeat + FnRcKeydown + FnSkbCgroupId + FnGetCurrentCgroupId + FnGetLocalStorage + FnSkSelectReuseport + FnSkbAncestorCgroupId + FnSkLookupTcp + FnSkLookupUdp + FnSkRelease + FnMapPushElem + FnMapPopElem + FnMapPeekElem + FnMsgPushData + FnMsgPopData + FnRcPointerRel + FnSpinLock + FnSpinUnlock + FnSkFullsock + FnTcpSock + FnSkbEcnSetCe + FnGetListenerSock + FnSkcLookupTcp + FnTcpCheckSyncookie + FnSysctlGetName + FnSysctlGetCurrentValue + FnSysctlGetNewValue + FnSysctlSetNewValue + FnStrtol + FnStrtoul + FnSkStorageGet + FnSkStorageDelete + FnSendSignal + FnTcpGenSyncookie + FnSkbOutput + FnProbeReadUser + FnProbeReadKernel + FnProbeReadUserStr + FnProbeReadKernelStr + FnTcpSendAck + FnSendSignalThread + FnJiffies64 + FnReadBranchRecords + FnGetNsCurrentPidTgid + FnXdpOutput + FnGetNetnsCookie + FnGetCurrentAncestorCgroupId + FnSkAssign + FnKtimeGetBootNs + FnSeqPrintf + FnSeqWrite + FnSkCgroupId + FnSkAncestorCgroupId + FnRingbufOutput + FnRingbufReserve + FnRingbufSubmit + FnRingbufDiscard + FnRingbufQuery + FnCsumLevel + FnSkcToTcp6Sock + FnSkcToTcpSock + FnSkcToTcpTimewaitSock + FnSkcToTcpRequestSock + FnSkcToUdp6Sock + FnGetTaskStack + FnLoadHdrOpt + FnStoreHdrOpt + FnReserveHdrOpt + FnInodeStorageGet + FnInodeStorageDelete + FnDPath + FnCopyFromUser + FnSnprintfBtf + FnSeqPrintfBtf + FnSkbCgroupClassid + FnRedirectNeigh + FnPerCpuPtr + FnThisCpuPtr + FnRedirectPeer + FnTaskStorageGet + FnTaskStorageDelete + FnGetCurrentTaskBtf + FnBprmOptsSet + FnKtimeGetCoarseNs + FnImaInodeHash + FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs + FnGetBranchSnapshot + FnTraceVprintk + FnSkcToUnixSock + FnKallsymsLookupName + FnFindVma + FnLoop + FnStrncmp + FnGetFuncArg + FnGetFuncRet + FnGetFuncArgCnt + FnGetRetval + FnSetRetval + FnXdpGetBuffLen + FnXdpLoadBytes + FnXdpStoreBytes + FnCopyFromUserTask + FnSkbSetTstamp + FnImaFileHash + FnKptrXchg + FnMapLookupPercpuElem + FnSkcToMptcpSock + FnDynptrFromMem + FnRingbufReserveDynptr + FnRingbufSubmitDynptr + FnRingbufDiscardDynptr + FnDynptrRead + FnDynptrWrite + FnDynptrData + FnTcpRawGenSyncookieIpv4 + FnTcpRawGenSyncookieIpv6 + FnTcpRawCheckSyncookieIpv4 + FnTcpRawCheckSyncookieIpv6 + FnKtimeGetTaiNs + FnUserRingbufDrain + FnCgrpStorageGet + FnCgrpStorageDelete + + maxBuiltinFunc +) + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 000000000..47150bc4f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,235 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] + _ = x[FnSkbOutput-111] + _ = x[FnProbeReadUser-112] + _ = x[FnProbeReadKernel-113] + _ = x[FnProbeReadUserStr-114] + _ = x[FnProbeReadKernelStr-115] + _ = x[FnTcpSendAck-116] + _ = x[FnSendSignalThread-117] + _ = x[FnJiffies64-118] + _ = x[FnReadBranchRecords-119] + _ = x[FnGetNsCurrentPidTgid-120] + _ = x[FnXdpOutput-121] + _ = x[FnGetNetnsCookie-122] + _ = x[FnGetCurrentAncestorCgroupId-123] + _ = x[FnSkAssign-124] + _ = x[FnKtimeGetBootNs-125] + _ = x[FnSeqPrintf-126] + _ = x[FnSeqWrite-127] + _ = x[FnSkCgroupId-128] + _ = x[FnSkAncestorCgroupId-129] + _ = x[FnRingbufOutput-130] + _ = x[FnRingbufReserve-131] + _ = x[FnRingbufSubmit-132] + _ = x[FnRingbufDiscard-133] + _ = x[FnRingbufQuery-134] + _ = x[FnCsumLevel-135] + _ = x[FnSkcToTcp6Sock-136] + _ = x[FnSkcToTcpSock-137] + _ = x[FnSkcToTcpTimewaitSock-138] + _ = x[FnSkcToTcpRequestSock-139] + _ = x[FnSkcToUdp6Sock-140] + _ = x[FnGetTaskStack-141] + _ = x[FnLoadHdrOpt-142] + _ = x[FnStoreHdrOpt-143] + _ = x[FnReserveHdrOpt-144] + _ = x[FnInodeStorageGet-145] + _ = x[FnInodeStorageDelete-146] + _ = x[FnDPath-147] + _ = x[FnCopyFromUser-148] + _ = x[FnSnprintfBtf-149] + _ = x[FnSeqPrintfBtf-150] + _ = x[FnSkbCgroupClassid-151] + _ = x[FnRedirectNeigh-152] + _ = x[FnPerCpuPtr-153] + _ = x[FnThisCpuPtr-154] + _ = x[FnRedirectPeer-155] + _ = x[FnTaskStorageGet-156] + _ = x[FnTaskStorageDelete-157] + _ = x[FnGetCurrentTaskBtf-158] + _ = x[FnBprmOptsSet-159] + _ = x[FnKtimeGetCoarseNs-160] + _ = x[FnImaInodeHash-161] + _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[FnTcpRawGenSyncookieIpv4-204] + _ = x[FnTcpRawGenSyncookieIpv6-205] + _ = x[FnTcpRawCheckSyncookieIpv4-206] + _ = x[FnTcpRawCheckSyncookieIpv6-207] + _ = x[FnKtimeGetTaiNs-208] + _ = x[FnUserRingbufDrain-209] + _ = x[FnCgrpStorageGet-210] + _ = x[FnCgrpStorageDelete-211] + _ = x[maxBuiltinFunc-212] +} + +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc" + +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179} + +func (i BuiltinFunc) String() string { + if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 000000000..67cd39d6f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,954 @@ +package asm + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// RawInstructionOffset is an offset in units of raw BPF instructions. +type RawInstructionOffset uint64 + +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + +// Bytes returns the offset of an instruction in bytes. +func (rio RawInstructionOffset) Bytes() uint64 { + return uint64(rio) * InstructionSize +} + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { + return 0, err + } + + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) + } + + ins.Offset = int16(bo.Uint16(data[2:4])) + + if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } + + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { + return InstructionSize, nil + } + + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { + // No Wrap, to avoid io.EOF clash + return 0, errors.New("64bit immediate is missing second half") + } + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { + return 0, errors.New("64bit immediate has non-zero fields") + } + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) + + return 2 * InstructionSize, nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.IsDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) + if err != nil { + return 0, fmt.Errorf("can't marshal registers: %s", err) + } + + if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + + data := make([]byte, InstructionSize) + data[0] = op + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { + // Preserve the offset value for direct map loads. + offset := uint64(ins.Constant) & (math.MaxUint32 << 32) + rawFd := uint64(uint32(fd)) + ins.Constant = int64(offset | rawFd) +} + +// MapPtr returns the map fd for this instruction. +// +// The result is undefined if the instruction is not a load from a map, +// see IsLoadFromMap. +// +// Deprecated: use Map() instead. +func (ins *Instruction) MapPtr() int { + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) +} + +// RewriteMapOffset changes the offset of a direct load from a map. +// +// Returns an error if the instruction is not a direct load. +func (ins *Instruction) RewriteMapOffset(offset uint32) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapValue { + return errors.New("not a direct load from a map") + } + + fd := uint64(ins.Constant) & math.MaxUint32 + ins.Constant = int64(uint64(offset)<<32 | fd) + return nil +} + +func (ins *Instruction) mapOffset() uint32 { + return uint32(uint64(ins.Constant) >> 32) +} + +// IsLoadFromMap returns true if the instruction loads from a map. +// +// This covers both loading the map pointer and direct map value loads. +func (ins *Instruction) IsLoadFromMap() bool { + return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) +} + +// IsFunctionCall returns true if the instruction calls another BPF function. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsFunctionCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall +} + +// IsKfuncCall returns true if the instruction calls a kfunc. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsKfuncCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall +} + +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + +// IsConstantLoad returns true if the instruction loads a constant of the +// given size. +func (ins *Instruction) IsConstantLoad(size Size) bool { + return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + if ins.IsLoadFromMap() { + fd := ins.mapFd() + m := ins.Map() + switch ins.Src { + case PseudoMapFD: + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } + + case PseudoMapValue: + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } + } + + goto ref + } + + switch cls := op.Class(); { + case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode, MemSXMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case XAddMode: + fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + } + + case cls.IsALU(): + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: + fmt.Fprintf(f, "imm: %d", ins.Constant) + default: + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) + switch jop := op.JumpOp(); jop { + case Call: + switch ins.Src { + case PseudoCall: + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + case PseudoKfuncCall: + // kfunc call + fmt.Fprintf(f, "Kfunc(%d)", ins.Constant) + default: + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + default: + fmt.Fprintf(f, "%v ", op) + } + +ref: + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) + } +} + +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +// WithMetadata sets the given Metadata on the Instruction. e.g. to copy +// Metadata from another Instruction when replacing it. +func (ins Instruction) WithMetadata(meta Metadata) Instruction { + ins.Metadata = meta + return ins +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol() == "" { + continue + } + + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + offsets[ins.Symbol()] = i + } + + return offsets, nil +} + +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference() == "" { + continue + } + + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) + } + + return offsets +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overridden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Guess how many digits we need at most, by assuming that all instructions + // are double wide. + highestOffset := len(insns) * 2 + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) + } +} + +// Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + + for i, ins := range insns { + if _, err := ins.Marshal(w, bo); err != nil { + return fmt.Errorf("instruction %d: %w", i, err) + } + } + return nil +} + +// Tag calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and so can be compared +// to ProgramInfo.Tag to figure out whether a loaded program matches +// certain instructions. +func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + h := sha1.New() + for i, ins := range insns { + if ins.IsLoadFromMap() { + ins.Constant = 0 + } + _, err := ins.Marshal(h, bo) + if err != nil { + return "", fmt.Errorf("instruction %d: %w", i, err) + } + } + return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil +} + +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + +// Iterate allows iterating a BPF program while keeping track of +// various offsets. +// +// Modifying the instruction slice will lead to undefined behaviour. +func (insns Instructions) Iterate() *InstructionIterator { + return &InstructionIterator{insns: insns} +} + +// InstructionIterator iterates over a BPF program. +type InstructionIterator struct { + insns Instructions + // The instruction in question. + Ins *Instruction + // The index of the instruction in the original instruction slice. + Index int + // The offset of the instruction in raw BPF instructions. This accounts + // for double-wide instructions. + Offset RawInstructionOffset +} + +// Next returns true as long as there are any instructions remaining. +func (iter *InstructionIterator) Next() bool { + if len(iter.insns) == 0 { + return false + } + + if iter.Ins != nil { + iter.Index++ + iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) + } + iter.Ins = &iter.insns[0] + iter.insns = iter.insns[1:] + return true +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { + switch bo { + case binary.LittleEndian: + return bpfRegisters((src << 4) | (dst & 0xF)), nil + case binary.BigEndian: + return bpfRegisters((dst << 4) | (src & 0xF)), nil + default: + return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). +func IsUnreferencedSymbol(err error) bool { + return errors.Is(err, ErrUnreferencedSymbol) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 000000000..2738d736b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,135 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = 0xf0 + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call { + return InvalidOpCode + } + + return OpCode(class).SetJumpOp(op).SetSource(source) +} + +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 000000000..85a4aaffa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 000000000..cdb5c5cfa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,225 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 + // XAddMode - add atomically across processors. + XAddMode Mode = 0xc0 +) + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapFD, + Constant: int64(uint32(fd)), + } +} + +// LoadMapValue stores a pointer to the value at a certain offset of a map. +func LoadMapValue(dst Register, fd int, offset uint32) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapValue, + Constant: int64(fdAndOffset), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return OpCode(StXClass).SetMode(XAddMode).SetSize(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreXAddOp(size), + Dst: dst, + Src: src, + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 000000000..c48080327 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,84 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[MemSXMode-128] + _ = x[XAddMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "XAddMode" + _Mode_name_6 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 128: + return _Mode_name_4 + case i == 192: + return _Mode_name_5 + case i == 255: + return _Mode_name_6 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 000000000..dd368a936 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 000000000..1dfd0b171 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,303 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. + LdClass Class = 0x00 + // LdXClass loads memory into registers. + LdXClass Class = 0x01 + // StClass stores immediate values to memory. + StClass Class = 0x02 + // StXClass stores registers to memory. + StXClass Class = 0x03 + // ALUClass describes arithmetic operators. + ALUClass Class = 0x04 + // JumpClass describes jump operators. + JumpClass Class = 0x05 + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. + ALU64Class Class = 0x07 +) + +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() +} + +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. +// +// The encoding varies based on a 3-bit Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint16 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} + +// rawInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) rawInstructions() int { + if op.IsDWordLoad() { + return 2 + } + return 1 +} + +func (op OpCode) IsDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if !op.Class().isLoadOrStore() { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if !op.Class().isLoadOrStore() { + return InvalidSize + } + return Size(op & sizeMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if !op.Class().IsALU() { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. +func (op OpCode) JumpOp() JumpOp { + if !op.Class().IsJump() { + return InvalidJumpOp + } + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { + return InvalidJumpOp + } + + return jumpOp +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { + return InvalidOpCode + } + + return newOp +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); { + case class.isLoadOrStore(): + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } + } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + + if class == ALUClass { + f.WriteString("32") + } + } + + case class.IsJump(): + f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 000000000..58bc3e7e7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[Jump32Class-6] + _ = x[ALU64Class-7] +} + +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" + +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} + +func (i Class) String() string { + if i >= Class(len(_Class_index)-1) { + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Class_name[_Class_index[i]:_Class_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 000000000..457a3b8a8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,51 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +// Pseudo registers used by 64bit loads and jumps +const ( + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 000000000..bece896bb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,79 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] + _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] + _ = x[AttachCgroupUnixConnect-49] + _ = x[AttachCgroupUnixSendmsg-50] + _ = x[AttachCgroupUnixRecvmsg-51] + _ = x[AttachCgroupUnixGetpeername-52] + _ = x[AttachCgroupUnixGetsockname-53] + _ = x[AttachNetkitPrimary-54] + _ = x[AttachNetkitPeer-55] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 000000000..671f680b2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,699 @@ +package btf + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") + ErrMultipleMatches = errors.New("multiple matching types") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +// immutableTypes is a set of types which musn't be changed. +type immutableTypes struct { + // All types contained by the spec, not including types from the base in + // case the spec was parsed from split BTF. + types []Type + + // Type IDs indexed by type. + typeIDs map[Type]TypeID + + // The ID of the first type in types. + firstTypeID TypeID + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]TypeID + + // Byte order of the types. This affects things like struct member order + // when using bitfields. + byteOrder binary.ByteOrder +} + +func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { + if id < s.firstTypeID { + return nil, false + } + + index := int(id - s.firstTypeID) + if index >= len(s.types) { + return nil, false + } + + return s.types[index], true +} + +// mutableTypes is a set of types which may be changed. +type mutableTypes struct { + imm immutableTypes + mu sync.RWMutex // protects copies below + copies map[Type]Type // map[orig]copy + copiedTypeIDs map[Type]TypeID // map[copy]origID +} + +// add a type to the set of mutable types. +// +// Copies type and all of its children once. Repeated calls with the same type +// do not copy again. +func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { + mt.mu.RLock() + cpy, ok := mt.copies[typ] + mt.mu.RUnlock() + + if ok { + // Fast path: the type has been copied before. + return cpy + } + + // modifyGraphPreorder copies the type graph node by node, so we can't drop + // the lock in between. + mt.mu.Lock() + defer mt.mu.Unlock() + + return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs) +} + +// copy a set of mutable types. +func (mt *mutableTypes) copy() *mutableTypes { + if mt == nil { + return nil + } + + mtCopy := &mutableTypes{ + mt.imm, + sync.RWMutex{}, + make(map[Type]Type, len(mt.copies)), + make(map[Type]TypeID, len(mt.copiedTypeIDs)), + } + + // Prevent concurrent modification of mt.copiedTypeIDs. + mt.mu.RLock() + defer mt.mu.RUnlock() + + copiesOfCopies := make(map[Type]Type, len(mt.copies)) + for orig, copy := range mt.copies { + // NB: We make a copy of copy, not orig, so that changes to mutable types + // are preserved. + copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs) + mtCopy.copies[orig] = copyOfCopy + } + + return mtCopy +} + +func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + mt.mu.RLock() + defer mt.mu.RUnlock() + + id, ok := mt.copiedTypeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, false + } + + return mt.add(immT, mt.imm.typeIDs), true +} + +func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { + immTypes := mt.imm.namedTypes[newEssentialName(name)] + if len(immTypes) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(immTypes)) + for _, id := range immTypes { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, fmt.Errorf("no type with ID %d", id) + } + + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if immT.TypeName() == name { + result = append(result, mt.add(immT, mt.imm.typeIDs)) + } + } + return result, nil +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + *mutableTypes + + // String table from ELF. + strings *stringTable +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil) + } + + return nil, err + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadata. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// symbolOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + offsets := make(map[symbol]uint32) + for _, sym := range symbols { + if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if sym.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(sym.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section) + } + + secName := file.Sections[sym.Section].Name + offsets[symbol{secName, sym.Name}] = uint32(sym.Value) + } + + return offsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + offsets, err := symbolOffsets(file) + if err != nil { + return nil, err + } + + if btfSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed BTF is not supported") + } + + spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil) + if err != nil { + return nil, err + } + + err = fixupDatasec(spec.imm.types, sectionSizes, offsets) + if err != nil { + return nil, err + } + + return spec, nil +} + +func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) { + var ( + baseStrings *stringTable + firstTypeID TypeID + err error + ) + + if base != nil { + if base.imm.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + baseStrings = base.strings + + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + } + + types, rawStrings, err := parseBTF(btf, bo, baseStrings, base) + if err != nil { + return nil, err + } + + typeIDs, typesByName := indexTypes(types, firstTypeID) + + return &Spec{ + &mutableTypes{ + immutableTypes{ + types, + typeIDs, + firstTypeID, + typesByName, + bo, + }, + sync.RWMutex{}, + make(map[Type]Type), + make(map[Type]TypeID), + }, + rawStrings, + }, nil +} + +func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { + namedTypes := 0 + for _, typ := range types { + if typ.TypeName() != "" { + // Do a pre-pass to figure out how big types by name has to be. + // Most types have unique names, so it's OK to ignore essentialName + // here. + namedTypes++ + } + } + + typeIDs := make(map[Type]TypeID, len(types)) + typesByName := make(map[essentialName][]TypeID, namedTypes) + + for i, typ := range types { + id := firstTypeID + TypeID(i) + typeIDs[typ] = id + + if name := newEssentialName(typ.TypeName()); name != "" { + typesByName[name] = append(typesByName[name], id) + } + } + + return typeIDs, typesByName +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + buf := new(bufio.Reader) + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) + if _, err := parseBTFHeader(buf, bo); err == nil { + return bo + } + } + + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) { + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), + baseStrings) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) + types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base) + if err != nil { + return nil, nil, err + } + + return types, rawStrings, nil +} + +type symbol struct { + section string + name string +} + +// fixupDatasec attempts to patch up missing info in Datasecs and its members by +// supplementing them with information from the ELF headers and symbol table. +func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error { + for _, typ := range types { + ds, ok := typ.(*Datasec) + if !ok { + continue + } + + name := ds.Name + + // Some Datasecs are virtual and don't have corresponding ELF sections. + switch name { + case ".ksyms": + // .ksyms describes forward declarations of kfunc signatures. + // Nothing to fix up, all sizes and offsets are 0. + for _, vsi := range ds.Vars { + _, ok := vsi.Type.(*Func) + if !ok { + // Only Funcs are supported in the .ksyms Datasec. + return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported) + } + } + + continue + case ".kconfig": + // .kconfig has a size of 0 and has all members' offsets set to 0. + // Fix up all offsets and set the Datasec's size. + if err := fixupDatasecLayout(ds); err != nil { + return err + } + + // Fix up extern to global linkage to avoid a BTF verifier error. + for _, vsi := range ds.Vars { + vsi.Type.(*Var).Linkage = GlobalVar + } + + continue + } + + if ds.Size != 0 { + continue + } + + ds.Size, ok = sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + for i := range ds.Vars { + symName := ds.Vars[i].Type.TypeName() + ds.Vars[i].Offset, ok = offsets[symbol{name, symName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) + } + } + } + + return nil +} + +// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and +// alignment. Calculate and set ds.Size. +func fixupDatasecLayout(ds *Datasec) error { + var off uint32 + + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type) + } + + size, err := Sizeof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting size: %w", v.Name, err) + } + align, err := alignof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err) + } + + // Align the current member based on the offset of the end of the previous + // member and the alignment of the current member. + off = internal.Align(off, uint32(align)) + + ds.Vars[i].Offset = off + + off += uint32(size) + } + + ds.Size = off + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + if s == nil { + return nil + } + + return &Spec{ + s.mutableTypes.copy(), + s.strings, + } +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// nextTypeID returns the next unallocated type ID or an error if there are no +// more type IDs. +func (s *Spec) nextTypeID() (TypeID, error) { + id := s.imm.firstTypeID + TypeID(len(s.imm.types)) + if id < s.imm.firstTypeID { + return 0, fmt.Errorf("no more type IDs") + } + return id, nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + typ, ok := s.typeByID(id) + if !ok { + return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) + } + + return typ, nil +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + return s.mutableTypes.typeID(typ) +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + return s.mutableTypes.anyTypesByName(name) +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple Types +// with the same name can exist, the parameter typ is taken to narrow down the +// search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. On success, the +// address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +// Returns an error wrapping ErrMultipleTypes if multiple candidates are found. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typeInterface := reflect.TypeOf((*Type)(nil)).Elem() + + // typ may be **T or *Type + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if wanted == typeInterface { + // This is *Type. Unwrap the value's type. + wanted = typPtr.Elem().Type() + } + + if !wanted.AssignableTo(typeInterface) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + return loadRawSpec(r, internal.NativeEndian, base) +} + +// TypesIterator iterates over types of a given spec. +type TypesIterator struct { + spec *Spec + id TypeID + done bool + // The last visited type in the spec. + Type Type +} + +// Iterate returns the types iterator. +func (s *Spec) Iterate() *TypesIterator { + return &TypesIterator{spec: s, id: s.imm.firstTypeID} +} + +// Next returns true as long as there are any remaining types. +func (iter *TypesIterator) Next() bool { + if iter.done { + return false + } + + var ok bool + iter.Type, ok = iter.spec.typeByID(iter.id) + iter.id++ + iter.done = !ok + return !iter.done +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go new file mode 100644 index 000000000..f0e327abc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -0,0 +1,519 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota // Unknown + kindInt // Int + kindPointer // Pointer + kindArray // Array + kindStruct // Struct + kindUnion // Union + kindEnum // Enum + kindForward // Forward + kindTypedef // Typedef + kindVolatile // Volatile + kindConst // Const + kindRestrict // Restrict + // Added ~4.20 + kindFunc // Func + kindFuncProto // FuncProto + // Added ~5.1 + kindVar // Var + kindDatasec // Datasec + // Added ~5.13 + kindFloat // Float + // Added 5.16 + kindDeclTag // DeclTag + kindTypeTag // TypeTag + // Added 6.0 + kindEnum64 // Enum64 +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +var btfHeaderLen = binary.Size(&btfHeader{}) + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { + var header btfHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, errors.New("header length shorter than btfHeader size") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) + } + + return &header, nil +} + +var btfTypeLen = binary.Size(btfType{}) + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return readBits(bt.Info, len, shift) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info = writeBits(bt.Info, len, shift, value) +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) kindFlagBool() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) setKindFlagBool(set bool) { + var value uint32 + if set { + value = 1 + } + bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +// Bitfield returns true if the struct or union contain a bitfield. +func (bt *btfType) Bitfield() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetBitfield(isBitfield bool) { + bt.setKindFlagBool(isBitfield) +} + +func (bt *btfType) FwdKind() FwdKind { + return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift)) +} + +func (bt *btfType) SetFwdKind(kind FwdKind) { + bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +func (bt *btfType) Signed() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetSigned(signed bool) { + bt.setKindFlagBool(signed) +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) SetType(id TypeID) { + bt.SizeType = uint32(id) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + +func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error { + buf := make([]byte, unsafe.Sizeof(*bt)) + bo.PutUint32(buf[0:], bt.NameOff) + bo.PutUint32(buf[4:], bt.Info) + bo.PutUint32(buf[8:], bt.SizeType) + _, err := w.Write(buf) + return err +} + +type rawType struct { + btfType + data interface{} +} + +func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := rt.btfType.Marshal(w, bo); err != nil { + return err + } + + if rt.data == nil { + return nil + } + + return binary.Write(w, bo, rt.data) +} + +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range members { + if off+btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i) + } + + members[i].NameOff = bo.Uint32(b[off+0:]) + members[i].Type = TypeID(bo.Uint32(b[off+4:])) + members[i].Offset = bo.Uint32(b[off+8:]) + + off += btfMemberLen + } + + return off, nil +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range secinfos { + if off+btfVarSecinfoLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i) + } + + secinfos[i].Type = TypeID(bo.Uint32(b[off+0:])) + secinfos[i].Offset = bo.Uint32(b[off+4:]) + secinfos[i].Size = bo.Uint32(b[off+8:]) + + off += btfVarSecinfoLen + } + + return off, nil +} + +type btfVariable struct { + Linkage uint32 +} + +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + +type btfEnum struct { + NameOff uint32 + Val uint32 +} + +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].Val = bo.Uint32(b[off+4:]) + + off += btfEnumLen + } + + return off, nil +} + +type btfEnum64 struct { + NameOff uint32 + ValLo32 uint32 + ValHi32 uint32 +} + +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnum64Len > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].ValLo32 = bo.Uint32(b[off+4:]) + enums[i].ValHi32 = bo.Uint32(b[off+8:]) + + off += btfEnum64Len + } + + return off, nil +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +var btfParamLen = int(unsafe.Sizeof(btfParam{})) + +func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range params { + if off+btfParamLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i) + } + + params[i].NameOff = bo.Uint32(b[off+0:]) + params[i].Type = TypeID(bo.Uint32(b[off+4:])) + + off += btfParamLen + } + + return off, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} + +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") + } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go new file mode 100644 index 000000000..b7a1b80d1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindUnknown-0] + _ = x[kindInt-1] + _ = x[kindPointer-2] + _ = x[kindArray-3] + _ = x[kindStruct-4] + _ = x[kindUnion-5] + _ = x[kindEnum-6] + _ = x[kindForward-7] + _ = x[kindTypedef-8] + _ = x[kindVolatile-9] + _ = x[kindConst-10] + _ = x[kindRestrict-11] + _ = x[kindFunc-12] + _ = x[kindFuncProto-13] + _ = x[kindVar-14] + _ = x[kindDatasec-15] + _ = x[kindFloat-16] + _ = x[kindDeclTag-17] + _ = x[kindTypeTag-18] + _ = x[kindEnum64-19] +} + +const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64" + +var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120} + +func (i btfKind) String() string { + if i >= btfKind(len(_btfKind_index)-1) { + return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go new file mode 100644 index 000000000..ee89f9833 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -0,0 +1,1261 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "slices" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// A constant used when CO-RE relocation has to remove instructions. +// +// Taken from libbpf. +const COREBadRelocationSentinel = 0xbad2310 + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + kind coreKind + local uint64 + target uint64 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool +} + +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target +} + +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) + } + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) +} + +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if f.poison { + // Relocation is poisoned, replace the instruction with an invalid one. + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() + } + + // Add context to the kernel verifier output. + if source := ins.Source(); source != nil { + *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) + } else { + *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) + } + + return nil + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) + } + + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) + } + + ins.Offset = int16(f.target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) + } + + ins.Constant = int64(f.target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) + } + + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) + } + + ins.Constant = int64(f.target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.kind.checksForExistence() && f.target == 0 +} + +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 + +const ( + reloFieldByteOffset coreKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ + reloTypeMatches /* type matches kernel type */ +) + +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches +} + +func (k coreKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + case reloTypeMatches: + return "type_matches" + default: + return fmt.Sprintf("unknown (%d)", k) + } +} + +// CORERelocate calculates changes needed to adjust eBPF instructions for differences +// in types. +// +// targets forms the set of types to relocate against. The first element has to be +// BTF for vmlinux, the following must be types for kernel modules. +// +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + if len(targets) == 0 { + // Explicitly check for nil here since the argument used to be optional. + return nil, fmt.Errorf("targets must be provided") + } + + // We can't encode type IDs that aren't for vmlinux into instructions at the + // moment. + resolveTargetTypeID := targets[0].TypeID + + for _, target := range targets { + if bo != target.imm.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) + } + } + + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), + } + continue + } + + group, ok := relosByType[relo.typ] + if !ok { + group = &reloGroup{} + relosByType[relo.typ] = group + } + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) + } + + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + essentialName := newEssentialName(localTypeName) + + var targetTypes []Type + for _, target := range targets { + namedTypeIDs := target.imm.namedTypes[essentialName] + targetTypes = slices.Grow(targetTypes, len(namedTypeIDs)) + for _, id := range namedTypeIDs { + typ, err := target.TypeByID(id) + if err != nil { + return nil, err + } + + targetTypes = append(targetTypes, typ) + } + } + + fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for j, index := range group.indices { + result[index] = fixups[j] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") +var errIncompatibleTypes = errors.New("incompatible types") + +// coreCalculateFixups finds the target type that best matches all relocations. +// +// All relos must target the same type. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + bestScore := len(relos) + var bestFixups []COREFixup + for _, target := range targets { + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) + } + if fixup.poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. + // + // Poison everything except checksForExistence. + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } + } + } + + return bestFixups, nil +} + +var errNoSignedness = errors.New("no signedness") + +// coreCalculateFixup calculates the fixup given a relocation and a target type. +func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{kind: relo.kind, poison: true}, nil + } + zero := COREFixup{} + + local := relo.typ + + switch relo.kind { + case reloTypeMatches: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := coreTypesMatch(local, target, nil) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + return fixup(1, 1) + + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := CheckTypeCompatibility(local, target) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + targetID, err := resolveTargetTypeID(target) + if errors.Is(err, ErrNotFound) { + // Probably a relocation trying to get the ID + // of a type from a kmod. + return poison() + } + if err != nil { + return zero, err + } + return fixup(uint64(relo.id), uint64(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint64(localSize), uint64(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(localValue.Value, targetValue.Value) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: + if _, ok := As[*Fwd](target); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) + + case reloFieldLShiftU64: + var target uint64 + if bo == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + target = uint64(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + return fixupWithoutValidation(0, uint64(64-targetSize)) + + case reloFieldSigned: + switch local := UnderlyingType(localField.Type).(type) { + case *Enum: + target, ok := As[*Enum](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) + } + + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) + case *Int: + target, ok := As[*Int](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) + } + + return fixup( + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), + ) + default: + return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) + } + } + } + + return zero, ErrNotSupported +} + +func boolToUint64(val bool) uint64 { + if val { + return 1 + } + return 0 +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCOREAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := As[*Enum](t) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ +type coreField struct { + Type Type + + // The position of the field from the start of the composite type in bytes. + offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + if n == 0 { + return nil + } + + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil +} + +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) + if err != nil { + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil + } + + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, err + } + return Bits(size * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + // The first index is used to offset a pointer of the base type like + // when accessing an array. + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + var localMaybeFlex, targetMaybeFlex bool + for i, acc := range localAcc[1:] { + switch localType := UnderlyingType(local.Type).(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + localMemberType, ok := As[composite](localMember.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = coreField{ + Type: localMemberType, + offset: local.offset + localMember.Offset.Bytes(), + } + localMaybeFlex = false + continue + } + + targetType, ok := As[composite](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, + } + localMaybeFlex = acc == len(localMembers)-1 + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } + targetMaybeFlex = last + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := As[*Array](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = coreField{ + Type: localType.Type, + offset: local.offset, + } + localMaybeFlex = false + + if err := local.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } + targetMaybeFlex = false + + if err := target.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, err + } + } + + return local, target, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset Bits + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxResolveDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.Offset += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := As[composite](member.Type) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := As[*Enum](target) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := newEssentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if newEssentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. +// +// Only layout compatibility is checked, ignoring names of the root type. +func CheckTypeCompatibility(localType Type, targetType Type) error { + return coreAreTypesCompatible(localType, targetType, nil) +} + +type pair struct { + A, B Type +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errIncompatibleTypes if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := localType.(type) { + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: + return nil + + case *Pointer: + tv := targetType.(*Pointer) + return coreAreTypesCompatible(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { + return err + } + + return coreAreTypesCompatible(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { + return err + } + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, localParam := range lv.Params { + targetParam := tv.Params[i] + if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { + return err + } + } + + return nil + + default: + return fmt.Errorf("unsupported type %T", localType) + } +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float, *Int: + return nil + + case *Enum: + tv := targetType.(*Enum) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + case *Fwd: + tv := targetType.(*Fwd) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +// coreEssentialNamesMatch compares two names while ignoring their flavour suffix. +// +// This should only be used on names which are in the global scope, like struct +// names, typedefs or enum values. +func coreEssentialNamesMatch(a, b string) bool { + if a == "" || b == "" { + // allow anonymous and named type to match + return true + } + + return newEssentialName(a) == newEssentialName(b) +} + +/* The comment below is from __bpf_core_types_match in relo_core.c: + * + * Check that two types "match". This function assumes that root types were + * already checked for name match. + * + * The matching relation is defined as follows: + * - modifiers and typedefs are stripped (and, hence, effectively ignored) + * - generally speaking types need to be of same kind (struct vs. struct, union + * vs. union, etc.) + * - exceptions are struct/union behind a pointer which could also match a + * forward declaration of a struct or union, respectively, and enum vs. + * enum64 (see below) + * Then, depending on type: + * - integers: + * - match if size and signedness match + * - arrays & pointers: + * - target types are recursively matched + * - structs & unions: + * - local members need to exist in target with the same name + * - for each member we recursively check match unless it is already behind a + * pointer, in which case we only check matching names and compatible kind + * - enums: + * - local variants have to have a match in target by symbolic name (but not + * numeric value) + * - size has to match (but enum may match enum64 and vice versa) + * - function pointers: + * - number and position of arguments in local type has to match target + * - for each argument and the return value we recursively check match + */ +func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { + return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := (localType).(type) { + case *Void: + + case *Fwd: + if targetType.(*Fwd).Kind != lv.Kind { + return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Enum: + return coreEnumsMatch(lv, targetType.(*Enum)) + + case composite: + tv := targetType.(composite) + + if len(lv.members()) > len(tv.members()) { + return errIncompatibleTypes + } + + localMembers := lv.members() + targetMembers := map[string]Member{} + for _, member := range tv.members() { + targetMembers[member.Name] = member + } + + for _, localMember := range localMembers { + targetMember, found := targetMembers[localMember.Name] + if !found { + return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) + } + + err := coreTypesMatch(localMember.Type, targetMember.Type, visited) + if err != nil { + return err + } + } + + case *Int: + if !coreEncodingMatches(lv, targetType.(*Int)) { + return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Pointer: + tv := targetType.(*Pointer) + + // Allow a pointer to a forward declaration to match a struct + // or union. + if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { + return nil + } + + if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { + return nil + } + + return coreTypesMatch(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + + if lv.Nelems != tv.Nelems { + return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + return coreTypesMatch(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, lparam := range lv.Params { + if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { + return err + } + } + + return coreTypesMatch(lv.Return, tv.Return, visited) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + + return nil +} + +// coreEncodingMatches returns true if both ints have the same size and signedness. +// All encodings other than `Signed` are considered unsigned. +func coreEncodingMatches(local, target *Int) bool { + return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) +} + +// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: +// - size has to match (but enum may match enum64 and vice versa) +// - local variants have to have a match in target by symbolic name (but not numeric value) +func coreEnumsMatch(local *Enum, target *Enum) error { + if local.Size != target.Size { + return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) + } + + // If there are more values in the local than the target, there must be at least one value in the local + // that isn't in the target, and therefor the types are incompatible. + if len(local.Values) > len(target.Values) { + return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) + } + +outer: + for _, lv := range local.Values { + for _, rv := range target.Values { + if coreEssentialNamesMatch(lv.Name, rv.Name) { + continue outer + } + } + + return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go new file mode 100644 index 000000000..b1f4b1fc3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/doc.go @@ -0,0 +1,5 @@ +// Package btf handles data encoded according to the BPF Type Format. +// +// The canonical documentation lives in the Linux kernel repository and is +// available at https://www.kernel.org/doc/html/latest/bpf/btf.html +package btf diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 000000000..eb9044bad --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,835 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string]FuncInfos + lineInfos map[string]LineInfos + relocationInfos map[string]CORERelocationInfos +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, spec) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string]FuncInfos, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncInfos(bfis, spec) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string]LineInfos, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type funcInfoMeta struct{} +type coreRelocationMeta struct{} + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) +} + +// Assign per-instruction metadata to the instructions in insns. +func AssignMetadataToInstructions( + insns asm.Instructions, + funcInfos FuncInfos, + lineInfos LineInfos, + reloInfos CORERelocationInfos, +) { + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos.infos) > 0 && funcInfos.infos[0].offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos.infos[0].fn) + funcInfos.infos = funcInfos.infos[1:] + } + + if len(lineInfos.infos) > 0 && lineInfos.infos[0].offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos.infos[0].line) + lineInfos.infos = lineInfos.infos[1:] + } + + if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) + reloInfos.infos = reloInfos.infos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +// +// If an instruction has an [asm.Comment], it will be synthesized into a mostly +// empty line info. +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil { + goto marshal + } + } + + return nil, nil, nil + +marshal: + var fiBuf, liBuf bytes.Buffer + for { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &funcInfo{ + fn: fn, + offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if source := iter.Ins.Source(); source != nil { + var line *Line + if l, ok := source.(*Line); ok { + line = l + } else { + line = &Line{ + line: source.String(), + } + } + + li := &lineInfo{ + line: line, + offset: iter.Offset, + } + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + + if !iter.Next() { + break + } + } + + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// FuncInfos contains a sorted list of func infos. +type FuncInfos struct { + infos []funcInfo +} + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +type funcInfo struct { + fn *Func + offset asm.RawInstructionOffset +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) { + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &funcInfo{ + fn, + asm.RawInstructionOffset(fi.InsnOff), + }, nil +} + +func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) (FuncInfos, error) { + fis := FuncInfos{ + infos: make([]funcInfo, 0, len(bfis)), + } + for _, bfi := range bfis { + fi, err := newFuncInfo(bfi, spec) + if err != nil { + return FuncInfos{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fis.infos = append(fis.infos, *fi) + } + sort.Slice(fis.infos, func(i, j int) bool { + return fis.infos[i].offset <= fis.infos[j].offset + }) + return fis, nil +} + +// LoadFuncInfos parses BTF func info in kernel wire format. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncInfos, error) { + fis, err := parseFuncInfoRecords( + reader, + bo, + FuncInfoSize, + recordNum, + false, + ) + if err != nil { + return FuncInfos{}, fmt.Errorf("parsing BTF func info: %w", err) + } + + return newFuncInfos(fis, spec) +} + +// marshal into the BTF wire format. +func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.fn) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.offset), + TypeID: id, + } + buf := make([]byte, FuncInfoSize) + internal.NativeEndian.PutUint32(buf, bfi.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID)) + _, err = w.Write(buf) + return err +} + +// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if offsetInBytes { + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + } + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +// LineInfos contains a sorted list of line infos. +type LineInfos struct { + infos []lineInfo +} + +type lineInfo struct { + line *Line + offset asm.RawInstructionOffset +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +// LoadLineInfos parses BTF line info in kernel wire format. +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineInfos, error) { + lis, err := parseLineInfoRecords( + reader, + bo, + LineInfoSize, + recordNum, + false, + ) + if err != nil { + return LineInfos{}, fmt.Errorf("parsing BTF line info: %w", err) + } + + return newLineInfos(lis, spec.strings) +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (lineInfo, error) { + line, err := strings.Lookup(li.LineOff) + if err != nil { + return lineInfo{}, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.Lookup(li.FileNameOff) + if err != nil { + return lineInfo{}, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return lineInfo{ + &Line{ + fileName, + line, + lineNumber, + lineColumn, + }, + asm.RawInstructionOffset(li.InsnOff), + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) { + lis := LineInfos{ + infos: make([]lineInfo, 0, len(blis)), + } + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis.infos = append(lis.infos, li) + } + sort.Slice(lis.infos, func(i, j int) bool { + return lis.infos[i].offset <= lis.infos[j].offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error { + line := li.line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + fileNameOff, err := b.addString(line.fileName) + if err != nil { + return fmt.Errorf("file name %q: %w", line.fileName, err) + } + + lineOff, err := b.addString(line.line) + if err != nil { + return fmt.Errorf("line %q: %w", line.line, err) + } + + bli := bpfLineInfo{ + uint32(li.offset), + fileNameOff, + lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + + buf := make([]byte, LineInfoSize) + internal.NativeEndian.PutUint32(buf, bli.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff) + internal.NativeEndian.PutUint32(buf[8:], bli.LineOff) + internal.NativeEndian.PutUint32(buf[12:], bli.LineCol) + _, err = w.Write(buf) + return err +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { + var li bpfLineInfo + + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + out := make([]bpfLineInfo, 0, recordNum) + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if offsetInBytes { + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + } + + out = append(out, li) + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + // The local type of the relocation, stripped of typedefs and qualifiers. + typ Type + accessor coreAccessor + kind coreKind + // The ID of the local type in the source BTF. + id TypeID +} + +func (cr *CORERelocation) String() string { + return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +// CORERelocationInfos contains a sorted list of co:re relocation infos. +type CORERelocationInfos struct { + infos []coreRelocationInfo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := spec.TypeByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + relo.TypeID, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { + rs := CORERelocationInfos{ + infos: make([]coreRelocationInfo, 0, len(brs)), + } + for _, br := range brs { + relo, err := newRelocationInfo(br, spec, strings) + if err != nil { + return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs.infos = append(rs.infos, *relo) + } + sort.Slice(rs.infos, func(i, j int) bool { + return rs.infos[i].offset < rs.infos[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go new file mode 100644 index 000000000..6feb08dfb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/feature.go @@ -0,0 +1,123 @@ +package btf + +import ( + "errors" + "math" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// haveBTF attempts to load a BTF blob containing an Int. It should pass on any +// kernel that supports BPF_BTF_LOAD. +var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error { + // 0-length anonymous integer + err := probeBTF(&Int{}) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}) + +// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is +// used as a proxy for .bss, .data and .rodata map support, which generally +// come with a Var and Datasec. These were introduced in Linux 5.2. +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error { + if err := haveBTF(); err != nil { + return err + } + + v := &Var{ + Name: "a", + Type: &Pointer{(*Void)(nil)}, + } + + err := probeBTF(v) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: creating the map may still + // succeed without Btf* attrs. + return internal.ErrNotSupported + } + return err +}) + +// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It +// is used as a proxy for ext_info (func_info) support, which depends on +// Func(Proto) by definition. +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error { + if err := haveBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}) + +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error { + if err := haveProgBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + Linkage: GlobalFunc, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}) + +var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error { + if err := haveBTF(); err != nil { + return err + } + + enum := &Enum{ + Size: 8, + Values: []EnumValue{ + {"TEST", math.MaxUint32 + 1}, + }, + } + + err := probeBTF(enum) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}) + +func probeBTF(typ Type) error { + b, err := NewBuilder([]Type{typ}) + if err != nil { + return err + } + + buf, err := b.Marshal(nil, nil) + if err != nil { + return err + } + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(buf), + BtfSize: uint32(len(buf)), + }) + + if err == nil { + fd.Close() + } + + return err +} diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 000000000..5e581b4a8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,350 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + typ = skipQualifiers(typ) + fmt.Fprintf(&gf.w, "type %s ", name) + if err := gf.writeTypeLit(typ, 0); err != nil { + return err + } + + e, ok := typ.(*Enum) + if !ok || len(e.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range e.Values { + id := gf.enumIdentifier(name, ev.Name) + var value any + if e.Signed { + value = int64(ev.Value) + } else { + value = ev.Value + } + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value) + } + gf.w.WriteString(")") + + return nil +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + err = gf.writeIntLit(v) + + case *Enum: + if !v.Signed { + gf.w.WriteRune('u') + } + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + err = fmt.Errorf("invalid enum size %d", v.Size) + } + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) error { + bits := i.Size * 8 + switch i.Encoding { + case Bool: + if i.Size != 1 { + return fmt.Errorf("bool with size %d", i.Size) + } + gf.w.WriteString("bool") + case Char: + if i.Size != 1 { + return fmt.Errorf("char with size %d", i.Size) + } + // BTF doesn't have a way to specify the signedness of a char. Assume + // we are dealing with unsigned, since this works nicely with []byte + // in Go code. + fallthrough + case Unsigned, Signed: + stem := "uint" + if i.Encoding == Signed { + stem = "int" + } + if i.Size > 8 { + fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8) + } else { + fmt.Fprintf(&gf.w, "%s%d", stem, bits) + } + default: + return fmt.Errorf("can't encode %s", i.Encoding) + } + return nil +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + fieldSize, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + + prevOffset = offset + uint32(fieldSize) + if prevOffset > size { + return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size) + } + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("can't format %s as part of data section", vsi.Type) + } + + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 000000000..adfa6fed4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,317 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 + + needsKernelBase bool +} + +// NewHandle loads the contents of a [Builder] into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandle(b *Builder) (*Handle, error) { + small := getByteSlice() + defer putByteSlice(small) + + buf, err := b.Marshal(*small, KernelMarshalOptions()) + if err != nil { + return nil, fmt.Errorf("marshal BTF: %w", err) + } + + return NewHandleFromRawBTF(buf) +} + +// NewHandleFromRawBTF loads raw BTF into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandleFromRawBTF(btf []byte) (*Handle, error) { + const minLogSize = 64 * 1024 + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + var ( + logBuf []byte + err error + ) + for { + var fd *sys.FD + fd, err = sys.BtfLoad(attr) + if err == nil { + return &Handle{fd, attr.BtfSize, false}, nil + } + + if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC + // if there are other verification errors. ENOSPC is only returned when + // the BTF blob is correct, a log was requested, and the provided buffer + // is too small. We're therefore not sure whether we got the full + // log or not. + break + } + + // Make an educated guess how large the buffer should be. Start + // at a reasonable minimum and then double the size. + logSize := uint32(max(len(logBuf)*2, minLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.BtfLogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.BtfLogTrueSize + } + + logBuf = make([]byte, logSize) + attr.BtfLogSize = logSize + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogLevel = 1 + } + + if err := haveBTF(); err != nil { + return nil, err + } + + return nil, internal.ErrorWithLog("load btf", err, logBuf) +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size, info.IsModule()}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base must contain type information for vmlinux if the handle is for +// a kernel module. It may be nil otherwise. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + if h.needsKernelBase && base == nil { + return nil, fmt.Errorf("missing base types") + } + + return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsVmlinux returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the current handle. Only valid after a call to Next. + ID ID + // The current Handle. Only valid until a call to Next. + // See Take if you want to retain the handle. + Handle *Handle + err error +} + +// Next retrieves a handle for the next BTF object. +// +// Returns true if another BTF object was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next() bool { + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + break + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + break + } + + id = attr.NextId + handle, err := NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + break + } + + it.Handle.Close() + it.ID, it.Handle = id, handle + return true + } + + // No more handles or we encountered an error. + it.Handle.Close() + it.Handle = nil + return false +} + +// Take the ownership of the current handle. +// +// It's the callers responsibility to close the handle. +func (it *HandleIterator) Take() *Handle { + handle := it.Handle + it.Handle = nil + return handle +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} + +// FindHandle returns the first handle for which predicate returns true. +// +// Requires CAP_SYS_ADMIN. +// +// Returns an error wrapping ErrNotFound if predicate never returns true or if +// there is no BTF loaded into the kernel. +func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) { + it := new(HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, fmt.Errorf("info for ID %d: %w", it.ID, err) + } + + if predicate(info) { + return it.Take(), nil + } + } + if err := it.Err(); err != nil { + return nil, fmt.Errorf("iterate handles: %w", err) + } + + return nil, fmt.Errorf("find handle: %w", ErrNotFound) +} diff --git a/vendor/github.com/cilium/ebpf/btf/kernel.go b/vendor/github.com/cilium/ebpf/btf/kernel.go new file mode 100644 index 000000000..8584ebcb9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -0,0 +1,159 @@ +package btf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" +) + +var kernelBTF = struct { + sync.RWMutex + kernel *Spec + modules map[string]*Spec +}{ + modules: make(map[string]*Spec), +} + +// FlushKernelSpec removes any cached kernel type information. +func FlushKernelSpec() { + kallsyms.FlushKernelModuleCache() + + kernelBTF.Lock() + defer kernelBTF.Unlock() + + kernelBTF.kernel = nil + kernelBTF.modules = make(map[string]*Spec) +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.kernel + kernelBTF.RUnlock() + + if spec == nil { + kernelBTF.Lock() + defer kernelBTF.Unlock() + + spec = kernelBTF.kernel + } + + if spec != nil { + return spec.Copy(), nil + } + + spec, _, err := loadKernelSpec() + if err != nil { + return nil, err + } + + kernelBTF.kernel = spec + return spec.Copy(), nil +} + +// LoadKernelModuleSpec returns the BTF information for the named kernel module. +// +// Defaults to /sys/kernel/btf/. +// Returns an error wrapping ErrNotSupported if BTF is not enabled. +// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. +func LoadKernelModuleSpec(module string) (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.modules[module] + kernelBTF.RUnlock() + + if spec != nil { + return spec.Copy(), nil + } + + base, err := LoadKernelSpec() + if err != nil { + return nil, fmt.Errorf("load kernel spec: %w", err) + } + + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if spec = kernelBTF.modules[module]; spec != nil { + return spec.Copy(), nil + } + + spec, err = loadKernelModuleSpec(module, base) + if err != nil { + return nil, err + } + + kernelBTF.modules[module] = spec + return spec.Copy(), nil +} + +func loadKernelSpec() (_ *Spec, fallback bool, _ error) { + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + spec, err := loadRawSpec(fh, internal.NativeEndian, nil) + return spec, false, err + } + + file, err := findVMLinux() + if err != nil { + return nil, false, err + } + defer file.Close() + + spec, err := LoadSpecFromReader(file) + return spec, true, err +} + +func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + dir, file := filepath.Split(module) + if dir != "" || filepath.Ext(file) != "" { + return nil, fmt.Errorf("invalid module name %q", module) + } + + fh, err := os.Open(filepath.Join("/sys/kernel/btf", module)) + if err != nil { + return nil, err + } + defer fh.Close() + + return loadRawSpec(fh, internal.NativeEndian, base) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*os.File, error) { + release, err := internal.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := os.Open(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go new file mode 100644 index 000000000..f14cfa6e9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -0,0 +1,611 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "maps" + "math" + "slices" + "sync" + + "github.com/cilium/ebpf/internal" +) + +type MarshalOptions struct { + // Target byte order. Defaults to the system's native endianness. + Order binary.ByteOrder + // Remove function linkage information for compatibility with <5.6 kernels. + StripFuncLinkage bool + // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. + ReplaceEnum64 bool + // Prevent the "No type found" error when loading BTF without any types. + PreventNoTypeFound bool +} + +// KernelMarshalOptions will generate BTF suitable for the current kernel. +func KernelMarshalOptions() *MarshalOptions { + return &MarshalOptions{ + Order: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceEnum64: haveEnum64() != nil, + PreventNoTypeFound: true, // All current kernels require this. + } +} + +// encoder turns Types into raw BTF. +type encoder struct { + MarshalOptions + + pending internal.Deque[Type] + buf *bytes.Buffer + strings *stringTableBuilder + ids map[Type]TypeID + visited map[Type]struct{} + lastID TypeID +} + +var bufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, btfHeaderLen+128) + return &buf + }, +} + +func getByteSlice() *[]byte { + return bufferPool.Get().(*[]byte) +} + +func putByteSlice(buf *[]byte) { + *buf = (*buf)[:0] + bufferPool.Put(buf) +} + +// Builder turns Types into raw BTF. +// +// The default value may be used and represents an empty BTF blob. Void is +// added implicitly if necessary. +type Builder struct { + // Explicitly added types. + types []Type + // IDs for all added types which the user knows about. + stableIDs map[Type]TypeID + // Explicitly added strings. + strings *stringTableBuilder +} + +// NewBuilder creates a Builder from a list of types. +// +// It is more efficient than calling [Add] individually. +// +// Returns an error if adding any of the types fails. +func NewBuilder(types []Type) (*Builder, error) { + b := &Builder{ + make([]Type, 0, len(types)), + make(map[Type]TypeID, len(types)), + nil, + } + + for _, typ := range types { + _, err := b.Add(typ) + if err != nil { + return nil, fmt.Errorf("add %s: %w", typ, err) + } + } + + return b, nil +} + +// Empty returns true if neither types nor strings have been added. +func (b *Builder) Empty() bool { + return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0) +} + +// Add a Type and allocate a stable ID for it. +// +// Adding the identical Type multiple times is valid and will return the same ID. +// +// See [Type] for details on identity. +func (b *Builder) Add(typ Type) (TypeID, error) { + if b.stableIDs == nil { + b.stableIDs = make(map[Type]TypeID) + } + + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + if ds, ok := typ.(*Datasec); ok { + if err := datasecResolveWorkaround(b, ds); err != nil { + return 0, err + } + } + + id, ok := b.stableIDs[typ] + if ok { + return id, nil + } + + b.types = append(b.types, typ) + + id = TypeID(len(b.types)) + if int(id) != len(b.types) { + return 0, fmt.Errorf("no more type IDs") + } + + b.stableIDs[typ] = id + return id, nil +} + +// Marshal encodes all types in the Marshaler into BTF wire format. +// +// opts may be nil. +func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { + stb := b.strings + if stb == nil { + // Assume that most types are named. This makes encoding large BTF like + // vmlinux a lot cheaper. + stb = newStringTableBuilder(len(b.types)) + } else { + // Avoid modifying the Builder's string table. + stb = b.strings.Copy() + } + + if opts == nil { + opts = &MarshalOptions{Order: internal.NativeEndian} + } + + // Reserve space for the BTF header. + buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] + + w := internal.NewBuffer(buf) + defer internal.PutBuffer(w) + + e := encoder{ + MarshalOptions: *opts, + buf: w, + strings: stb, + lastID: TypeID(len(b.types)), + visited: make(map[Type]struct{}, len(b.types)), + ids: maps.Clone(b.stableIDs), + } + + if e.ids == nil { + e.ids = make(map[Type]TypeID) + } + + types := b.types + if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound { + // We have strings that need to be written out, + // but no types (besides the implicit Void). + // Kernels as recent as v6.7 refuse to load such BTF + // with a "No type found" error in the log. + // Fix this by adding a dummy type. + types = []Type{&Int{Size: 0}} + } + + // Ensure that types are marshaled in the exact order they were Add()ed. + // Otherwise the ID returned from Add() won't match. + e.pending.Grow(len(types)) + for _, typ := range types { + e.pending.Push(typ) + } + + if err := e.deflatePending(); err != nil { + return nil, err + } + + length := e.buf.Len() + typeLen := uint32(length - btfHeaderLen) + + stringLen := e.strings.Length() + buf = e.strings.AppendEncoded(e.buf.Bytes()) + + // Fill out the header, and write it out. + header := &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(btfHeaderLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringLen), + } + + err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header) + if err != nil { + return nil, fmt.Errorf("write header: %v", err) + } + + return buf, nil +} + +// addString adds a string to the resulting BTF. +// +// Adding the same string multiple times will return the same result. +// +// Returns an identifier into the string table or an error if the string +// contains invalid characters. +func (b *Builder) addString(str string) (uint32, error) { + if b.strings == nil { + b.strings = newStringTableBuilder(0) + } + + return b.strings.Add(str) +} + +func (e *encoder) allocateIDs(root Type) (err error) { + visitInPostorder(root, e.visited, func(typ Type) bool { + if _, ok := typ.(*Void); ok { + return true + } + + if _, ok := e.ids[typ]; ok { + return true + } + + id := e.lastID + 1 + if id < e.lastID { + err = errors.New("type ID overflow") + return false + } + + e.pending.Push(typ) + e.ids[typ] = id + e.lastID = id + return true + }) + return +} + +// id returns the ID for the given type or panics with an error. +func (e *encoder) id(typ Type) TypeID { + if _, ok := typ.(*Void); ok { + return 0 + } + + id, ok := e.ids[typ] + if !ok { + panic(fmt.Errorf("no ID for type %v", typ)) + } + + return id +} + +func (e *encoder) deflatePending() error { + // Declare root outside of the loop to avoid repeated heap allocations. + var root Type + + for !e.pending.Empty() { + root = e.pending.Shift() + + // Allocate IDs for all children of typ, including transitive dependencies. + if err := e.allocateIDs(root); err != nil { + return err + } + + if err := e.deflateType(root); err != nil { + id := e.ids[root] + return fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + } + } + + return nil +} + +func (e *encoder) deflateType(typ Type) (err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + panic(r) + } + } + }() + + var raw rawType + raw.NameOff, err = e.strings.Add(typ.TypeName()) + if err != nil { + return err + } + + switch v := typ.(type) { + case *Void: + return errors.New("Void is implicit in BTF wire format") + + case *Int: + raw.SetKind(kindInt) + raw.SetSize(v.Size) + + var bi btfInt + bi.SetEncoding(v.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(v.Size) * 8) + raw.data = bi + + case *Pointer: + raw.SetKind(kindPointer) + raw.SetType(e.id(v.Target)) + + case *Array: + raw.SetKind(kindArray) + raw.data = &btfArray{ + e.id(v.Type), + e.id(v.Index), + v.Nelems, + } + + case *Struct: + raw.SetKind(kindStruct) + raw.SetSize(v.Size) + raw.data, err = e.convertMembers(&raw.btfType, v.Members) + + case *Union: + err = e.deflateUnion(&raw, v) + + case *Enum: + if v.Size == 8 { + err = e.deflateEnum64(&raw, v) + } else { + err = e.deflateEnum(&raw, v) + } + + case *Fwd: + raw.SetKind(kindForward) + raw.SetFwdKind(v.Kind) + + case *Typedef: + raw.SetKind(kindTypedef) + raw.SetType(e.id(v.Type)) + + case *Volatile: + raw.SetKind(kindVolatile) + raw.SetType(e.id(v.Type)) + + case *Const: + raw.SetKind(kindConst) + raw.SetType(e.id(v.Type)) + + case *Restrict: + raw.SetKind(kindRestrict) + raw.SetType(e.id(v.Type)) + + case *Func: + raw.SetKind(kindFunc) + raw.SetType(e.id(v.Type)) + if !e.StripFuncLinkage { + raw.SetLinkage(v.Linkage) + } + + case *FuncProto: + raw.SetKind(kindFuncProto) + raw.SetType(e.id(v.Return)) + raw.SetVlen(len(v.Params)) + raw.data, err = e.deflateFuncParams(v.Params) + + case *Var: + raw.SetKind(kindVar) + raw.SetType(e.id(v.Type)) + raw.data = btfVariable{uint32(v.Linkage)} + + case *Datasec: + raw.SetKind(kindDatasec) + raw.SetSize(v.Size) + raw.SetVlen(len(v.Vars)) + raw.data = e.deflateVarSecinfos(v.Vars) + + case *Float: + raw.SetKind(kindFloat) + raw.SetSize(v.Size) + + case *declTag: + raw.SetKind(kindDeclTag) + raw.SetType(e.id(v.Type)) + raw.data = &btfDeclTag{uint32(v.Index)} + raw.NameOff, err = e.strings.Add(v.Value) + + case *typeTag: + raw.SetKind(kindTypeTag) + raw.SetType(e.id(v.Type)) + raw.NameOff, err = e.strings.Add(v.Value) + + default: + return fmt.Errorf("don't know how to deflate %T", v) + } + + if err != nil { + return err + } + + return raw.Marshal(e.buf, e.Order) +} + +func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) { + raw.SetKind(kindUnion) + raw.SetSize(union.Size) + raw.data, err = e.convertMembers(&raw.btfType, union.Members) + return +} + +func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) { + bms := make([]btfMember, 0, len(members)) + isBitfield := false + for _, member := range members { + isBitfield = isBitfield || member.BitfieldSize > 0 + + offset := member.Offset + if isBitfield { + offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff) + } + + nameOff, err := e.strings.Add(member.Name) + if err != nil { + return nil, err + } + + bms = append(bms, btfMember{ + nameOff, + e.id(member.Type), + uint32(offset), + }) + } + + header.SetVlen(len(members)) + header.SetBitfield(isBitfield) + return bms, nil +} + +func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) { + raw.SetKind(kindEnum) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + // Signedness appeared together with ENUM64 support. + raw.SetSigned(enum.Signed && !e.ReplaceEnum64) + raw.data, err = e.deflateEnumValues(enum) + return +} + +func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) { + bes := make([]btfEnum, 0, len(enum.Values)) + for _, value := range enum.Values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + if enum.Signed { + if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name) + } + } else { + if value.Value > math.MaxUint32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name) + } + } + + bes = append(bes, btfEnum{ + nameOff, + uint32(value.Value), + }) + } + + return bes, nil +} + +func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { + if e.ReplaceEnum64 { + // Replace the ENUM64 with a union of fields with the correct size. + // This matches libbpf behaviour on purpose. + placeholder := &Int{ + "enum64_placeholder", + enum.Size, + Unsigned, + } + if enum.Signed { + placeholder.Encoding = Signed + } + if err := e.allocateIDs(placeholder); err != nil { + return fmt.Errorf("add enum64 placeholder: %w", err) + } + + members := make([]Member, 0, len(enum.Values)) + for _, v := range enum.Values { + members = append(members, Member{ + Name: v.Name, + Type: placeholder, + }) + } + + return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members}) + } + + raw.SetKind(kindEnum64) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + raw.SetSigned(enum.Signed) + raw.data, err = e.deflateEnum64Values(enum.Values) + return +} + +func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) { + bes := make([]btfEnum64, 0, len(values)) + for _, value := range values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + bes = append(bes, btfEnum64{ + nameOff, + uint32(value.Value), + uint32(value.Value >> 32), + }) + } + + return bes, nil +} + +func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) { + bps := make([]btfParam, 0, len(params)) + for _, param := range params { + nameOff, err := e.strings.Add(param.Name) + if err != nil { + return nil, err + } + + bps = append(bps, btfParam{ + nameOff, + e.id(param.Type), + }) + } + return bps, nil +} + +func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo { + vsis := make([]btfVarSecinfo, 0, len(vars)) + for _, v := range vars { + vsis = append(vsis, btfVarSecinfo{ + e.id(v.Type), + v.Offset, + v.Size, + }) + } + return vsis +} + +// MarshalMapKV creates a BTF object containing a map key and value. +// +// The function is intended for the use of the ebpf package and may be removed +// at any point in time. +func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) { + var b Builder + + if key != nil { + keyID, err = b.Add(key) + if err != nil { + return nil, 0, 0, fmt.Errorf("add key type: %w", err) + } + } + + if value != nil { + valueID, err = b.Add(value) + if err != nil { + return nil, 0, 0, fmt.Errorf("add value type: %w", err) + } + } + + handle, err := NewHandle(&b) + if err != nil { + // Check for 'full' map BTF support, since kernels between 4.18 and 5.2 + // already support BTF blobs for maps without Var or Datasec just fine. + if err := haveMapBTF(); err != nil { + return nil, 0, 0, err + } + } + return handle, keyID, valueID, err +} diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 000000000..7c31461c3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,198 @@ +package btf + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "maps" + "slices" + "strings" +) + +type stringTable struct { + base *stringTable + offsets []uint32 + prevIdx int + strings []string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + idx := len(base.offsets) - 1 + firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + } + + // Derived from vmlinux BTF. + const averageStringLength = 16 + + n := int(r.Size() / averageStringLength) + offsets := make([]uint32, 0, n) + strings := make([]string, 0, n) + + offset := firstStringOffset + scanner := bufio.NewScanner(r) + scanner.Split(splitNull) + for scanner.Scan() { + str := scanner.Text() + offsets = append(offsets, offset) + strings = append(strings, str) + offset += uint32(len(str)) + 1 + } + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(strings) == 0 { + return nil, errors.New("string table is empty") + } + + if firstStringOffset == 0 && strings[0] != "" { + return nil, errors.New("first item in string table is non-empty") + } + + return &stringTable{base, offsets, 0, strings}, nil +} + +func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { + i := bytes.IndexByte(data, 0) + if i == -1 { + if atEOF && len(data) > 0 { + return 0, nil, errors.New("string table isn't null terminated") + } + return 0, nil, nil + } + + return i + 1, data[:i], nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { + return st.base.lookup(offset) + } + return st.lookup(offset) +} + +func (st *stringTable) lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 && st.base == nil { + return "", nil + } + + // Accesses tend to be globally increasing, so check if the next string is + // the one we want. This skips the binary search in about 50% of cases. + if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset { + st.prevIdx++ + return st.strings[st.prevIdx], nil + } + + i, found := slices.BinarySearch(st.offsets, offset) + if !found { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + // Set the new increment index, but only if its greater than the current. + if i > st.prevIdx+1 { + st.prevIdx = i + } + + return st.strings[i], nil +} + +// Num returns the number of strings in the table. +func (st *stringTable) Num() int { + return len(st.strings) +} + +// stringTableBuilder builds BTF string tables. +type stringTableBuilder struct { + length uint32 + strings map[string]uint32 +} + +// newStringTableBuilder creates a builder with the given capacity. +// +// capacity may be zero. +func newStringTableBuilder(capacity int) *stringTableBuilder { + var stb stringTableBuilder + + if capacity == 0 { + // Use the runtime's small default size. + stb.strings = make(map[string]uint32) + } else { + stb.strings = make(map[string]uint32, capacity) + } + + // Ensure that the empty string is at index 0. + stb.append("") + return &stb +} + +// Add a string to the table. +// +// Adding the same string multiple times will only store it once. +func (stb *stringTableBuilder) Add(str string) (uint32, error) { + if strings.IndexByte(str, 0) != -1 { + return 0, fmt.Errorf("string contains null: %q", str) + } + + offset, ok := stb.strings[str] + if ok { + return offset, nil + } + + return stb.append(str), nil +} + +func (stb *stringTableBuilder) append(str string) uint32 { + offset := stb.length + stb.length += uint32(len(str)) + 1 + stb.strings[str] = offset + return offset +} + +// Lookup finds the offset of a string in the table. +// +// Returns an error if str hasn't been added yet. +func (stb *stringTableBuilder) Lookup(str string) (uint32, error) { + offset, ok := stb.strings[str] + if !ok { + return 0, fmt.Errorf("string %q is not in table", str) + } + + return offset, nil +} + +// Length returns the length in bytes. +func (stb *stringTableBuilder) Length() int { + return int(stb.length) +} + +// AppendEncoded appends the string table to the end of the provided buffer. +func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte { + n := len(buf) + buf = append(buf, make([]byte, stb.Length())...) + strings := buf[n:] + for str, offset := range stb.strings { + copy(strings[offset:], str) + } + return buf +} + +// Copy the string table builder. +func (stb *stringTableBuilder) Copy() *stringTableBuilder { + return &stringTableBuilder{ + stb.length, + maps.Clone(stb.strings), + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go new file mode 100644 index 000000000..c39dc66e4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -0,0 +1,123 @@ +package btf + +import ( + "fmt" +) + +// Functions to traverse a cyclic graph of types. The below was very useful: +// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order + +// Visit all types reachable from root in postorder. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { + if _, ok := visited[root]; ok { + return true + } + if visited == nil { + visited = make(map[Type]struct{}) + } + visited[root] = struct{}{} + + cont := children(root, func(child *Type) bool { + return visitInPostorder(*child, visited, yield) + }) + if !cont { + return false + } + + return yield(root) +} + +// children calls yield on each child of typ. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func children(typ Type, yield func(child *Type) bool) bool { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float: + // No children to traverse. + case *Pointer: + if !yield(&v.Target) { + return false + } + case *Array: + if !yield(&v.Index) { + return false + } + if !yield(&v.Type) { + return false + } + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + } + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + } + case *Typedef: + if !yield(&v.Type) { + return false + } + case *Volatile: + if !yield(&v.Type) { + return false + } + case *Const: + if !yield(&v.Type) { + return false + } + case *Restrict: + if !yield(&v.Type) { + return false + } + case *Func: + if !yield(&v.Type) { + return false + } + case *FuncProto: + if !yield(&v.Return) { + return false + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return false + } + } + case *Var: + if !yield(&v.Type) { + return false + } + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return false + } + } + case *declTag: + if !yield(&v.Type) { + return false + } + case *typeTag: + if !yield(&v.Type) { + return false + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) + } + + return true +} diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 000000000..a3397460b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,1319 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// Mirrors MAX_RESOLVE_DEPTH in libbpf. +// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761 +const maxResolveDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID = sys.TypeID + +// Type represents a type described by BTF. +// +// Identity of Type follows the [Go specification]: two Types are considered +// equal if they have the same concrete type and the same dynamic value, aka +// they point at the same location in memory. This means that the following +// Types are considered distinct even though they have the same "shape". +// +// a := &Int{Size: 1} +// b := &Int{Size: 1} +// a != b +// +// [Go specification]: https://go.dev/ref/spec#Comparison_operators +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // New implementations must update walkType. +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) + _ Type = (*declTag)(nil) + _ Type = (*typeTag)(nil) + _ Type = (*cycle)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } + +type IntEncoding byte + +// Valid IntEncodings. +// +// These may look like they are flags, but they aren't. +const ( + Unsigned IntEncoding = 0 + Signed IntEncoding = 1 + Char IntEncoding = 2 + Bool IntEncoding = 4 +) + +func (ie IntEncoding) String() string { + switch ie { + case Char: + // NB: There is no way to determine signedness for char. + return "char" + case Bool: + return "bool" + case Signed: + return "signed" + case Unsigned: + return "unsigned" + default: + return fmt.Sprintf("IntEncoding(%d)", byte(ie)) + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + Type + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + // True if the values should be interpreted as signed integers. + Signed bool + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value uint64 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +func (f *Fwd) matches(typ Type) bool { + if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct { + return true + } + + if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion { + return true + } + + return false +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) copy() Type { + cpy := *td + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction. +func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction { + ins.Metadata.Set(funcInfoMeta{}, fn) + return ins +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) copy() Type { + cpy := *f + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) copy() Type { + cpy := *v + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + // Var or Func. + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// declTag associates metadata with a declaration. +type declTag struct { + Type Type + Value string + // The index this tag refers to in the target type. For composite types, + // a value of -1 indicates that the tag refers to the whole type. Otherwise + // it indicates which member or argument the tag applies to. + Index int +} + +func (dt *declTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index) +} + +func (dt *declTag) TypeName() string { return "" } +func (dt *declTag) copy() Type { + cpy := *dt + return &cpy +} + +// typeTag associates metadata with a type. +type typeTag struct { + Type Type + Value string +} + +func (tt *typeTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) +} + +func (tt *typeTag) TypeName() string { return "" } +func (tt *typeTag) qualify() Type { return tt.Type } +func (tt *typeTag) copy() Type { + cpy := *tt + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) + _ qualifier = (*typeTag)(nil) +) + +var errUnsizedType = errors.New("type is unsized") + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxResolveDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Returns an error if the Type can't be aligned, like an integer with an uneven +// size. Currently only supports the subset of types necessary for bitfield +// relocations. +func alignof(typ Type) (int, error) { + var n int + + switch t := UnderlyingType(typ).(type) { + case *Enum: + n = int(t.size()) + case *Int: + n = int(t.Size) + case *Array: + return alignof(t.Type) + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } + + if !internal.IsPow(n) { + return 0, fmt.Errorf("alignment value %d is not a power of two", n) + } + + return n, nil +} + +// Copy a Type recursively. +// +// typ may form a cycle. +func Copy(typ Type) Type { + return copyType(typ, nil, make(map[Type]Type), nil) +} + +func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type { + if typ == nil { + return nil + } + + cpy, ok := copies[typ] + if ok { + // This has been copied previously, no need to continue. + return cpy + } + + cpy = typ.copy() + copies[typ] = cpy + + if id, ok := ids[typ]; ok { + copiedIDs[cpy] = id + } + + children(cpy, func(child *Type) bool { + *child = copyType(*child, ids, copies, copiedIDs) + return true + }) + + return cpy +} + +type typeDeque = internal.Deque[*Type] + +// readAndInflateTypes reads the raw btf type info and turns it into a graph +// of Types connected via pointers. +// +// If base is provided, then the types are considered to be of a split BTF +// (e.g., a kernel module). +// +// Returns a slice of types indexed by TypeID. Since BTF ignores compilation +// units, multiple types may share the same name. A Type may form a cyclic graph +// by pointing at itself. +func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) { + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(btfTypeLen) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]Type, 0, tyMaxCount) + + // Void is defined to always be type ID 0, and is thus omitted from BTF. + types = append(types, (*Void)(nil)) + + firstTypeID := TypeID(0) + if base != nil { + var err error + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + + // Split BTF doesn't contain Void. + types = types[:0] + } + + type fixupDef struct { + id TypeID + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, typ *Type) { + if id < firstTypeID { + if baseType, err := base.TypeByID(id); err == nil { + *typ = baseType + return + } + } + + idx := int(id - firstTypeID) + if idx < len(types) { + // We've already inflated this type, fix it up immediately. + *typ = types[idx] + return + } + + fixups = append(fixups, fixupDef{id, typ}) + } + + type bitfieldFixupDef struct { + id TypeID + m *Member + } + + var ( + legacyBitfields = make(map[TypeID][2]Bits) // offset, size + bitfieldFixups []bitfieldFixupDef + ) + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(btfMember.Offset), + }) + + m := &members[i] + fixup(raw[i].Type, &m.Type) + + if kindFlag { + m.BitfieldSize = Bits(btfMember.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := legacyBitfields[raw[i].Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + + if m.Type != nil { + // We couldn't find a legacy bitfield, but we know that the member's + // type has already been inflated. Hence we know that it can't be + // a legacy bitfield and there is nothing left to do. + continue + } + + // We don't have fixup data, and the type we're pointing + // at hasn't been inflated yet. No choice but to defer + // the fixup. + bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ + raw[i].Type, + m, + }) + } + return members, nil + } + + var ( + buf = make([]byte, 1024) + header btfType + bInt btfInt + bArr btfArray + bMembers []btfMember + bEnums []btfEnum + bParams []btfParam + bVariable btfVariable + bSecInfos []btfVarSecinfo + bDeclTag btfDeclTag + bEnums64 []btfEnum64 + ) + + var declTags []*declTag + for { + var ( + id = firstTypeID + TypeID(len(types)) + typ Type + ) + + if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) + } + + if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } + + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + name, err := rawStrings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + buf = buf[:btfIntLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err) + } + if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + + case kindPointer: + ptr := &Pointer{nil} + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + buf = buf[:btfArrayLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err) + } + if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{name, header.Size(), members} + + case kindUnion: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{name, header.Size(), members} + + case kindEnum: + vlen := header.Vlen() + bEnums = slices.Grow(bEnums[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + signed := header.Signed() + for i, btfVal := range bEnums { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + value := uint64(btfVal.Val) + if signed { + // Sign extend values to 64 bit. + value = uint64(int32(btfVal.Val)) + } + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), signed, vals} + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + + case kindTypedef: + typedef := &Typedef{name, nil} + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage()} + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + vlen := header.Vlen() + bParams = slices.Grow(bParams[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err) + } + if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err) + } + + params := make([]FuncParam, 0, vlen) + for i, param := range bParams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(bParams[i].Type, ¶ms[i].Type) + } + + fp := &FuncProto{nil, params} + fixup(header.Type(), &fp.Return) + typ = fp + + case kindVar: + buf = buf[:btfVariableLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage)} + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + vlen := header.Vlen() + bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err) + } + + vars := make([]VarSecinfo, 0, vlen) + for _, btfVar := range bSecInfos { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(bSecInfos[i].Type, &vars[i].Type) + } + typ = &Datasec{name, header.Size(), vars} + + case kindFloat: + typ = &Float{name, header.Size()} + + case kindDeclTag: + buf = buf[:btfDeclTagLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + fixup(header.Type(), &dt.Type) + typ = dt + + declTags = append(declTags, dt) + + case kindTypeTag: + tt := &typeTag{nil, name} + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + vlen := header.Vlen() + bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + for i, btfVal := range bEnums64 { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32) + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), header.Signed(), vals} + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + + types = append(types, typ) + } + + for _, fixup := range fixups { + if fixup.id < firstTypeID { + return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id) + } + + idx := int(fixup.id - firstTypeID) + if idx >= len(types) { + return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + + *fixup.typ = types[idx] + } + + for _, bitfieldFixup := range bitfieldFixups { + if bitfieldFixup.id < firstTypeID { + return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") + } + + data, ok := legacyBitfields[bitfieldFixup.id] + if ok { + // This is indeed a legacy bitfield, fix it up. + bitfieldFixup.m.Offset += data[0] + bitfieldFixup.m.BitfieldSize = data[1] + } + } + + for _, dt := range declTags { + switch t := dt.Type.(type) { + case *Var, *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index) + } + + case composite: + if dt.Index >= len(t.members()) { + return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t) + } + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t) + } + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return types, nil +} + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs +// until it finds a T. +// +// Returns the zero value and false if there is no T or if the type is nested +// too deeply. +func As[T Type](typ Type) (T, bool) { + // NB: We can't make this function return (*T) since then + // we can't assert that a type matches an interface which + // embeds Type: as[composite](T). + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (typ).(type) { + case T: + return v, true + case qualifier: + typ = v.qualify() + case *Typedef: + typ = v.Type + default: + goto notFound + } + } +notFound: + var zero T + return zero, false +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + _, _ = io.WriteString(f, internal.GoTypeName(t)) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go new file mode 100644 index 000000000..12a89b87e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -0,0 +1,26 @@ +package btf + +// datasecResolveWorkaround ensures that certain vars in a Datasec are added +// to a Spec before the Datasec. This avoids a bug in kernel BTF validation. +// +// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/ +func datasecResolveWorkaround(b *Builder, ds *Datasec) error { + for _, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + continue + } + + switch v.Type.(type) { + case *Typedef, *Volatile, *Const, *Restrict, *typeTag: + // NB: We must never call Add on a Datasec, otherwise we risk + // infinite recursion. + _, err := b.Add(v.Type) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 000000000..b2cb214ad --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,925 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kconfig" + "github.com/cilium/ebpf/internal/sysenc" +) + +// CollectionOptions control loading a collection into the kernel. +// +// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. +type CollectionOptions struct { + Maps MapOptions + Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + ByteOrder: cs.ByteOrder, + Types: cs.Types.Copy(), + } + + for name, spec := range cs.Maps { + cpy.Maps[name] = spec.Copy() + } + + for name, spec := range cs.Programs { + cpy.Programs[name] = spec.Copy() + } + + return &cpy +} + +// RewriteMaps replaces all references to specific maps. +// +// Use this function to use pre-existing maps instead of creating new ones +// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. +// +// Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. +func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { + for symbol, m := range maps { + // have we seen a program that uses this symbol / map + seen := false + for progName, progSpec := range cs.Programs { + err := progSpec.Instructions.AssociateMap(symbol, m) + + switch { + case err == nil: + seen = true + + case errors.Is(err, asm.ErrUnreferencedSymbol): + // Not all programs need to use the map + + default: + return fmt.Errorf("program %s: %w", progName, err) + } + } + + if !seen { + return fmt.Errorf("map %s not referenced by any programs", symbol) + } + + // Prevent NewCollection from creating rewritten maps + delete(cs.Maps, symbol) + } + + return nil +} + +// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. +type MissingConstantsError struct { + // The constants missing from .rodata. + Constants []string +} + +func (m *MissingConstantsError) Error() string { + return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) +} + +// RewriteConstants replaces the value of multiple constants. +// +// The constant must be defined like so in the C program: +// +// volatile const type foobar; +// volatile const type foobar = default; +// +// Replacement values must be of the same length as the C sizeof(type). +// If necessary, they are marshalled according to the same rules as +// map values. +// +// From Linux 5.5 the verifier will use constants to eliminate dead code. +// +// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { + replaced := make(map[string]bool) + + for name, spec := range cs.Maps { + if !strings.HasPrefix(name, ".rodata") { + continue + } + + b, ds, err := spec.dataSection() + if errors.Is(err, errMapNoBTFValue) { + // Data sections without a BTF Datasec are valid, but don't support + // constant replacements. + continue + } + if err != nil { + return fmt.Errorf("map %s: %w", name, err) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + for _, v := range ds.Vars { + vname := v.Type.TypeName() + replacement, ok := consts[vname] + if !ok { + continue + } + + if _, ok := v.Type.(*btf.Var); !ok { + return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname) + } + + if replaced[vname] { + return fmt.Errorf("section %s: duplicate variable %s", name, vname) + } + + if int(v.Offset+v.Size) > len(cpy) { + return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname) + } + + b, err := sysenc.Marshal(replacement, int(v.Size)) + if err != nil { + return fmt.Errorf("marshaling constant replacement %s: %w", vname, err) + } + + b.CopyTo(cpy[v.Offset : v.Offset+v.Size]) + + replaced[vname] = true + } + + spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + } + + var missing []string + for c := range consts { + if !replaced[c] { + missing = append(missing, c) + } + } + + if len(missing) != 0 { + return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) + } + + return nil +} + +// Assign the contents of a CollectionSpec to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same MapSpec or ProgramSpec is assigned multiple times. +func (cs *CollectionSpec) Assign(to interface{}) error { + // Assign() only supports assigning ProgramSpecs and MapSpecs, + // so doesn't load any resources into the kernel. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*ProgramSpec)(nil)): + if p := cs.Programs[name]; p != nil { + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*MapSpec)(nil)): + if m := cs.Maps[name]; m != nil { + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + return assignValues(to, getValue) +} + +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. +// +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// opts may be nil. +// +// Returns an error if any of the fields can't be found, or +// if the same Map or Program is assigned multiple times. +func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() + + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true + return loader.loadProgram(name) + + case reflect.TypeOf((*Map)(nil)): + assignedMaps[name] = true + return loader.loadMap(name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateDeferredMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + + return nil +} + +// Collection is a collection of Programs and Maps associated +// with their symbols +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map +} + +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() + + // Create maps first, as their fds need to be linked into programs. + for mapName := range spec.Maps { + if _, err := loader.loadMap(mapName); err != nil { + return nil, err + } + } + + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + + if _, err := loader.loadProgram(progName); err != nil { + return nil, err + } + } + + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateDeferredMaps(); err != nil { + return nil, err + } + + // Prevent loader.cleanup from closing maps and programs. + maps, progs := loader.maps, loader.programs + loader.maps, loader.programs = nil, nil + + return &Collection{ + progs, + maps, + }, nil +} + +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program +} + +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { + if opts == nil { + opts = &CollectionOptions{} + } + + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name, m := range opts.MapReplacements { + spec, ok := coll.Maps[name] + if !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) + } + } + + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + }, nil +} + +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } + + // Finalize 'scalar' maps that don't refer to any other eBPF resources + // potentially pending creation. This is needed for frozen maps like .rodata + // that need to be finalized before invoking the verifier. + if !mapSpec.Type.canStoreMapOrProgram() { + if err := m.finalize(mapSpec); err != nil { + return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) + } + } + + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference() == "" { + continue + } + + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { + continue + } + + m, err := cl.loadMap(ins.Reference()) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + return prog, nil +} + +// populateDeferredMaps iterates maps holding programs or other maps and loads +// any dependencies. Populates all maps in cl and freezes them if specified. +func (cl *collectionLoader) populateDeferredMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + // Scalar maps without Map or Program references are finalized during + // creation. Don't finalize them again. + if !mapSpec.Type.canStoreMapOrProgram() { + continue + } + + mapSpec = mapSpec.Copy() + + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. + for i, kv := range mapSpec.Contents { + objName, ok := kv.Value.(string) + if !ok { + continue + } + + switch t := mapSpec.Type; { + case t.canStoreProgram(): + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case t.canStoreMap(): + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } + } + + return nil +} + +// resolveKconfig resolves all variables declared in .kconfig and populates +// m.Contents. Does nothing if the given m.Contents is non-empty. +func resolveKconfig(m *MapSpec) error { + ds, ok := m.Value.(*btf.Datasec) + if !ok { + return errors.New("map value is not a Datasec") + } + + type configInfo struct { + offset uint32 + typ btf.Type + } + + configs := make(map[string]configInfo) + + data := make([]byte, ds.Size) + for _, vsi := range ds.Vars { + v := vsi.Type.(*btf.Var) + n := v.TypeName() + + switch n { + case "LINUX_KERNEL_VERSION": + if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { + return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + } + + kv, err := internal.KernelVersion() + if err != nil { + return fmt.Errorf("getting kernel version: %w", err) + } + internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) + + case "LINUX_HAS_SYSCALL_WRAPPER": + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) + } + var value uint64 = 1 + if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { + value = 0 + } else if err != nil { + return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + default: // Catch CONFIG_*. + configs[n] = configInfo{ + offset: vsi.Offset, + typ: v.Type, + } + } + } + + // We only parse kconfig file if a CONFIG_* variable was found. + if len(configs) > 0 { + f, err := kconfig.Find() + if err != nil { + return fmt.Errorf("cannot find a kconfig file: %w", err) + } + defer f.Close() + + filter := make(map[string]struct{}, len(configs)) + for config := range configs { + filter[config] = struct{}{} + } + + kernelConfig, err := kconfig.Parse(f, filter) + if err != nil { + return fmt.Errorf("cannot parse kconfig file: %w", err) + } + + for n, info := range configs { + value, ok := kernelConfig[n] + if !ok { + return fmt.Errorf("config option %q does not exists for this kernel", n) + } + + err := kconfig.PutValue(data[info.offset:], info.typ, value) + if err != nil { + return fmt.Errorf("problem adding value for %s: %w", n, err) + } + } + } + + m.Contents = []MapKV{{uint32(0), data}} + + return nil +} + +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func LoadCollection(file string) (*Collection, error) { + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + + return nil +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} + +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} + +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) + } + + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) + } + + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) + } + + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} + + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue + } + + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { + continue + } + + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } + + // Obtain the destination type of the pointer. + v = field.value.Elem() + + case reflect.Struct: + // Reference the value's type directly. + v = field.value + + default: + continue + } + + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + + fields = append(fields, inner...) + } + + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + + toValue := reflect.ValueOf(to) + if toValue.Type().Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer to struct", to) + } + + if toValue.IsNil() { + return fmt.Errorf("nil pointer to %T", to) + } + + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { + return err + } + + type elem struct { + // Either *Map or *Program + typ reflect.Type + name string + } + + assigned := make(map[elem]string) + for _, field := range fields { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { + return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) + } + + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) + } + + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) + if err != nil { + return fmt.Errorf("field %s: %w", field.Name, err) + } + + if !field.value.CanSet() { + return fmt.Errorf("field %s: can't set value", field.Name) + } + field.value.Set(reflect.ValueOf(value)) + + assigned[e] = field.Name + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go new file mode 100644 index 000000000..07e959efd --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu.go @@ -0,0 +1,66 @@ +package ebpf + +import ( + "fmt" + "os" + "strings" + "sync" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return parseCPUsFromFile("/sys/devices/system/cpu/possible") +}) + +// PossibleCPU returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPU() (int, error) { + return possibleCPU() +} + +// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if +// the error is non-nil. +func MustPossibleCPU() int { + cpus, err := PossibleCPU() + if err != nil { + panic(err) + } + return cpus +} + +func parseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 000000000..396b3394d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,25 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. +package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 000000000..620037d80 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,1337 @@ +package ebpf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +type kconfigMetaKey struct{} + +type kconfigMeta struct { + Map *MapSpec + Offset uint32 +} + +type kfuncMetaKey struct{} + +type kfuncMeta struct { + Binding elf.SymBind + Func *btf.Func +} + +// elfCode is a convenience to reduce the amount of arguments that have to +// be passed around explicitly. You should treat its contents as immutable. +type elfCode struct { + *internal.SafeELFFile + sections map[elf.SectionIndex]*elfSection + license string + version uint32 + btf *btf.Spec + extInfo *btf.ExtInfos + maps map[string]*MapSpec + kfuncs map[string]*btf.Func + kconfig *MapSpec +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + if err != nil { + return nil, fmt.Errorf("file %s: %w", file, err) + } + return spec, nil +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { + f, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + + // Checks if the ELF file is for BPF data. + // Old LLVM versions set e_machine to EM_NONE. + if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine) + } + + var ( + licenseSection *elf.Section + versionSection *elf.Section + sections = make(map[elf.SectionIndex]*elfSection) + relSections = make(map[elf.SectionIndex]*elf.Section) + ) + + // This is the target of relocations generated by inline assembly. + sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) + + // Collect all the sections we're interested in. This includes relocations + // which we parse later. + // + // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date. + for i, sec := range f.Sections { + idx := elf.SectionIndex(i) + + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + sections[idx] = newElfSection(sec, mapSection) + case sec.Name == ".maps": + sections[idx] = newElfSection(sec, btfMapSection) + case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): + sections[idx] = newElfSection(sec, dataSection) + case sec.Type == elf.SHT_REL: + // Store relocations under the section index of the target + relSections[elf.SectionIndex(sec.Info)] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + sections[idx] = newElfSection(sec, programSection) + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, fmt.Errorf("load license: %w", err) + } + + version, err := loadVersion(versionSection, f.ByteOrder) + if err != nil { + return nil, fmt.Errorf("load version: %w", err) + } + + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) + if err != nil && !errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + ec := &elfCode{ + SafeELFFile: f, + sections: sections, + license: license, + version: version, + btf: btfSpec, + extInfo: btfExtInfo, + maps: make(map[string]*MapSpec), + kfuncs: make(map[string]*btf.Func), + } + + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + ec.assignSymbols(symbols) + + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) + } + + if err := ec.loadMaps(); err != nil { + return nil, fmt.Errorf("load maps: %w", err) + } + + if err := ec.loadBTFMaps(); err != nil { + return nil, fmt.Errorf("load BTF maps: %w", err) + } + + if err := ec.loadDataSections(); err != nil { + return nil, fmt.Errorf("load data sections: %w", err) + } + + if err := ec.loadKconfigSection(); err != nil { + return nil, fmt.Errorf("load virtual .kconfig section: %w", err) + } + + if err := ec.loadKsymsSection(); err != nil { + return nil, fmt.Errorf("load virtual .ksyms section: %w", err) + } + + // Finally, collect programs and link them. + progs, err := ec.loadProgramSections() + if err != nil { + return nil, fmt.Errorf("load programs: %w", err) + } + + return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", nil + } + + data, err := sec.Data() + if err != nil { + return "", fmt.Errorf("section %s: %v", sec.Name, err) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + if err := binary.Read(sec.Open(), bo, &version); err != nil { + return 0, fmt.Errorf("section %s: %v", sec.Name, err) + } + return version, nil +} + +type elfSectionKind int + +const ( + undefSection elfSectionKind = iota + mapSection + btfMapSection + programSection + dataSection +) + +type elfSection struct { + *elf.Section + kind elfSectionKind + // Offset from the start of the section to a symbol + symbols map[uint64]elf.Symbol + // Offset from the start of the section to a relocation, which points at + // a symbol in another section. + relocations map[uint64]elf.Symbol + // The number of relocations pointing at this section. + references int +} + +func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { + return &elfSection{ + section, + kind, + make(map[uint64]elf.Symbol), + make(map[uint64]elf.Symbol), + 0, + } +} + +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string + for _, sec := range ec.sections { + if sec.kind != programSection { + continue + } + + if len(sec.symbols) == 0 { + return nil, fmt.Errorf("section %v: missing symbols", sec.Name) + } + + funcs, err := ec.loadFunctions(sec) + if err != nil { + return nil, fmt.Errorf("section %v: %w", sec.Name, err) + } + + progType, attachType, progFlags, attachTo := getProgType(sec.Name) + + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } + + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) + } + progs[name] = spec + + if spec.SectionName != ".text" { + export = append(export, name) + } + } + } + + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) + } + } + + return progs, nil +} + +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) + if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. + if rel, ok := section.relocations[offset]; ok { + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) + } + } + } + + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) + } + + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) +} + +var errUnsupportedBinding = errors.New("unsupported binding") + +func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { + var ( + typ = elf.ST_TYPE(rel.Info) + bind = elf.ST_BIND(rel.Info) + name = rel.Name + ) + + target := ec.sections[rel.Section] + + switch target.kind { + case mapSection, btfMapSection: + if bind == elf.STB_LOCAL { + return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) + } + + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { + // STT_NOTYPE is generated on clang < 8 which doesn't tag + // relocations appropriately. + return fmt.Errorf("map load: incorrect relocation type %v", typ) + } + + ins.Src = asm.PseudoMapFD + + case dataSection: + var offset uint32 + switch typ { + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // This is really a reference to a static symbol, which clang doesn't + // emit a symbol table entry for. Instead it encodes the offset in + // the instruction itself. + offset = uint32(uint64(ins.Constant)) + + case elf.STT_OBJECT: + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + default: + return fmt.Errorf("incorrect relocation type %v for direct map load", typ) + } + + // We rely on using the name of the data section as the reference. It + // would be nicer to keep the real name in case of an STT_OBJECT, but + // it's not clear how to encode that into Instruction. + name = target.Name + + // The kernel expects the offset in the second basic BPF instruction. + ins.Constant = int64(uint64(offset) << 32) + ins.Src = asm.PseudoMapValue + + case programSection: + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } + + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) + } + + sym, ok := target.symbols[uint64(ins.Constant)] + if !ok { + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) + } + + name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc + + default: + return fmt.Errorf("neither a call nor a load instruction: %v", ins) + } + + // The Undefined section is used for 'virtual' symbols that aren't backed by + // an ELF section. This includes symbol references from inline asm, forward + // function declarations, as well as extern kfunc declarations using __ksym + // and extern kconfig variables declared using __kconfig. + case undefSection: + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_NOTYPE { + return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) + } + + kf := ec.kfuncs[name] + switch { + // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name + // that matches the symbol name we mark the instruction as a referencing a kfunc. + case kf != nil && ins.OpCode.JumpOp() == asm.Call: + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Src = asm.PseudoKfuncCall + ins.Constant = -1 + + case kf != nil && ins.OpCode.IsDWordLoad(): + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Constant = 0 + + // If no kconfig map is found, this must be a symbol reference from inline + // asm (see testdata/loader.c:asm_relocation()) or a call to a forward + // function declaration (see testdata/fwd_decl.c). Don't interfere, These + // remain standard symbol references. + // extern __kconfig reads are represented as dword loads that need to be + // rewritten to pseudo map loads from .kconfig. If the map is present, + // require it to contain the symbol to disambiguate between inline asm + // relos and kconfigs. + case ec.kconfig != nil && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars { + if vsi.Type.(*btf.Var).Name != rel.Name { + continue + } + + ins.Src = asm.PseudoMapValue + ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset}) + return nil + } + + return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name) + } + + default: + return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) + } + + *ins = ins.WithReference(name) + return nil +} + +func (ec *elfCode) loadMaps() error { + for _, sec := range ec.sections { + if sec.kind != mapSection { + continue + } + + nSym := len(sec.symbols) + if nSym == 0 { + return fmt.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(nSym) != 0 { + return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + var ( + r = bufio.NewReader(sec.Open()) + size = sec.Size / uint64(nSym) + ) + for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { + mapSym, ok := sec.symbols[offset] + if !ok { + return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + mapName := mapSym.Name + if ec.maps[mapName] != nil { + return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + spec := MapSpec{ + Name: SanitizeName(mapName, -1), + } + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", mapName) + } + + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = bytes.NewReader(extra) + } + + ec.maps[mapName] = &spec + } + } + + return nil +} + +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. +func (ec *elfCode) loadBTFMaps() error { + for _, sec := range ec.sections { + if sec.kind != btfMapSection { + continue + } + + if ec.btf == nil { + return fmt.Errorf("missing BTF") + } + + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { + return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + } + + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. + v, ok := vs.Type.(*btf.Var) + if !ok { + return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + } + name := string(v.Name) + + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + + if ec.maps[name] != nil { + return fmt.Errorf("section %v: map %s already exists", sec.Name, name) + } + + // Each Var representing a BTF map definition contains a Struct. + mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct) + if !ok { + return fmt.Errorf("expected struct, got %s", v.Type) + } + + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + if err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + ec.maps[name] = mapSpec + } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } + } + + return nil +} + +// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing +// a BTF map definition. The name and spec arguments will be copied to the +// resulting MapSpec, and inner must be true on any recursive invocations. +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { + var ( + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error + ) + + for i, member := range def.Members { + switch member.Name { + case "type": + mt, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get type: %w", err) + } + mapType = MapType(mt) + + case "map_flags": + flags, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map flags: %w", err) + } + + case "max_entries": + maxEntries, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map max entries: %w", err) + } + + case "key": + if keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + pk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) + } + + key = pk.Target + + size, err := btf.Sizeof(pk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF key: %w", err) + } + + keySize = uint32(size) + + case "value": + if valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + vk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) + } + + value = vk.Target + + size, err := btf.Sizeof(vk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF value: %w", err) + } + + valueSize = uint32(size) + + case "key_size": + // Key needs to be nil and keySize needs to be 0 for key_size to be + // considered a valid member. + if key != nil || keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + keySize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF key size: %w", err) + } + + case "value_size": + // Value needs to be nil and valueSize needs to be 0 for value_size to be + // considered a valid member. + if value != nil || valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + valueSize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF value size: %w", err) + } + + case "pinning": + if inner { + return nil, errors.New("inner maps can't be pinned") + } + + pinning, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get pinning: %w", err) + } + + pinType = PinType(pinning) + + case "values": + // The 'values' field in BTF map definitions is used for declaring map + // value types that are references to other BPF objects, like other maps + // or programs. It is always expected to be an array of pointers. + if i != len(def.Members)-1 { + return nil, errors.New("'values' must be the last member in a BTF map definition") + } + + if valueSize != 0 && valueSize != 4 { + return nil, errors.New("value_size must be 0 or 4") + } + valueSize = 4 + + valueType, err := resolveBTFArrayMacro(member.Type) + if err != nil { + return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) + } + + switch t := valueType.(type) { + case *btf.Struct: + // The values member pointing to an array of structs means we're expecting + // a map-in-map declaration. + if mapType != ArrayOfMaps && mapType != HashOfMaps { + return nil, errors.New("outer map needs to be an array or a hash of maps") + } + if inner { + return nil, fmt.Errorf("nested inner maps are not supported") + } + + // This inner map spec is used as a map template, but it needs to be + // created as a traditional map before it can be used to do so. + // libbpf names the inner map template '.inner', but we + // opted for _inner to simplify validation logic. (dots only supported + // on kernels 5.2 and up) + // Pass the BTF spec from the parent object, since both parent and + // child must be created from the same BTF blob (on kernels that support BTF). + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + if err != nil { + return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) + } + + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + + default: + return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) + } + + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + + case "map_extra": + return nil, fmt.Errorf("BTF map definition: field %s: %w", member.Name, ErrNotSupported) + + default: + return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) + } + } + + return &MapSpec{ + Name: SanitizeName(name, -1), + Type: MapType(mapType), + KeySize: keySize, + ValueSize: valueSize, + MaxEntries: maxEntries, + Flags: flags, + Key: key, + Value: value, + Pinning: pinType, + InnerMap: innerMapSpec, + Contents: contents, + }, nil +} + +// uintFromBTF resolves the __uint macro, which is a pointer to a sized +// array, e.g. for int (*foo)[10], this function will return 10. +func uintFromBTF(typ btf.Type) (uint32, error) { + ptr, ok := typ.(*btf.Pointer) + if !ok { + return 0, fmt.Errorf("not a pointer: %v", typ) + } + + arr, ok := ptr.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + + return arr.Nelems, nil +} + +// resolveBTFArrayMacro resolves the __array macro, which declares an array +// of pointers to a given type. This function returns the target Type of +// the pointers in the array. +func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { + arr, ok := typ.(*btf.Array) + if !ok { + return nil, fmt.Errorf("not an array: %v", typ) + } + + ptr, ok := arr.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("not an array of pointers: %v", typ) + } + + return ptr.Target, nil +} + +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := member.Offset.Bytes() + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), r.Name}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), r.Name}) + default: + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) + } + } + + return contents, nil +} + +func (ec *elfCode) loadDataSections() error { + for _, sec := range ec.sections { + if sec.kind != dataSection { + continue + } + + if sec.references == 0 { + // Prune data sections which are not referenced by any + // instructions. + continue + } + + mapSpec := &MapSpec{ + Name: SanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(sec.Size), + MaxEntries: 1, + } + + switch sec.Type { + // Only open the section if we know there's actual data to be read. + case elf.SHT_PROGBITS: + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + + if uint64(len(data)) > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + mapSpec.Contents = []MapKV{{uint32(0), data}} + + case elf.SHT_NOBITS: + // NOBITS sections like .bss contain only zeroes, and since data sections + // are Arrays, the kernel already preallocates them. Skip reading zeroes + // from the ELF. + default: + return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) + } + + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + } + } + + if strings.HasPrefix(sec.Name, ".rodata") { + mapSpec.Flags = unix.BPF_F_RDONLY_PROG + mapSpec.Freeze = true + } + + ec.maps[sec.Name] = mapSpec + } + + return nil +} + +// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKconfigSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".kconfig", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + if ds.Size == 0 { + return errors.New("zero-length .kconfig") + } + + ec.kconfig = &MapSpec{ + Name: ".kconfig", + Type: Array, + KeySize: uint32(4), + ValueSize: ds.Size, + MaxEntries: 1, + Flags: unix.BPF_F_RDONLY_PROG, + Freeze: true, + Key: &btf.Int{Size: 4}, + Value: ds, + } + + return nil +} + +// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKsymsSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".ksyms", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + for _, v := range ds.Vars { + // we have already checked the .ksyms Datasec to only contain Func Vars. + ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func) + } + + return nil +} + +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") + + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { + continue + } + + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= unix.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= unix.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" + } + + return programType, attachType, flags, extra + } + + return UnspecifiedProgram, AttachNone, 0, "" +} + +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { + rels := make(map[uint64]elf.Symbol) + + if sec.Entsize < 16 { + return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) + } + + r := bufio.NewReader(sec.Open()) + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, fmt.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(symbols) { + return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) + } + + symbol := symbols[symNo] + rels[rel.Off] = symbol + } + + return rels, nil +} diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go new file mode 100644 index 000000000..4b58251d9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_sections.go @@ -0,0 +1,109 @@ +// Code generated by internal/cmd/gensections.awk; DO NOT EDIT. + +package ebpf + +// Code in this file is derived from libbpf, available under BSD-2-Clause. + +import "github.com/cilium/ebpf/internal/sys" + +var elfSectionDefs = []libbpfElfSectionDef{ + {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE}, + {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE}, + {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, + {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE}, + {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE}, + {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE}, + {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE}, + {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF}, + {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF}, + {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF}, + {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF}, + {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF}, + {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF}, + {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF}, + {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF}, + {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE}, + {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE}, + {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE}, + {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS}, + {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT}, + {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE}, + {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE}, + {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE}, + {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE}, + {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE}, + {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, + {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, + {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, + {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE}, + {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE}, + {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE}, + {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT}, + {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE}, + {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE}, + {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT}, + {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE}, + {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE}, + {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE}, + {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE}, +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go new file mode 100644 index 000000000..04c60c64b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/info.go @@ -0,0 +1,499 @@ +package ebpf + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// The *Info structs expose metadata about a program or map. Most +// fields are exposed via a getter: +// +// func (*MapInfo) ID() (MapID, bool) +// +// This is because the metadata available changes based on kernel version. +// The second boolean return value indicates whether a particular field is +// available on the current kernel. +// +// Always add new metadata as such a getter, unless you can somehow get the +// value of the field on all supported kernels. Also document which version +// a particular field first appeared in. +// +// Some metadata is a buffer which needs additional parsing. In this case, +// store the undecoded data in the Info struct and provide a getter which +// decodes it when necessary. See ProgramInfo.Instructions for an example. + +// MapInfo describes a map. +type MapInfo struct { + Type MapType + id MapID + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + Flags uint32 + // Name as supplied by user space at load time. Available from 4.15. + Name string + + btf btf.ID +} + +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err := sys.ObjInfo(fd, &info) + if errors.Is(err, syscall.EINVAL) { + return newMapInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + return &MapInfo{ + MapType(info.Type), + MapID(info.Id), + info.KeySize, + info.ValueSize, + info.MaxEntries, + uint32(info.MapFlags), + unix.ByteSliceToString(info.Name[:]), + btf.ID(info.BtfId), + }, nil +} + +func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { + var mi MapInfo + err := scanFdInfo(fd, map[string]interface{}{ + "map_type": &mi.Type, + "key_size": &mi.KeySize, + "value_size": &mi.ValueSize, + "max_entries": &mi.MaxEntries, + "map_flags": &mi.Flags, + }) + if err != nil { + return nil, err + } + return &mi, nil +} + +// ID returns the map ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) ID() (MapID, bool) { + return mi.id, mi.id > 0 +} + +// BTFID returns the BTF ID associated with the Map. +// +// The ID is only valid as long as the associated Map is kept alive. +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the Map was loaded without BTF information.) +func (mi *MapInfo) BTFID() (btf.ID, bool) { + return mi.btf, mi.btf > 0 +} + +// programStats holds statistics of a program. +type programStats struct { + // Total accumulated runtime of the program ins ns. + runtime time.Duration + // Total number of times the program was called. + runCount uint64 + // Total number of times the programm was NOT called. + // Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented"). + recursionMisses uint64 +} + +// ProgramInfo describes a program. +type ProgramInfo struct { + Type ProgramType + id ProgramID + // Truncated hash of the BPF bytecode. Available from 4.13. + Tag string + // Name as supplied by user space at load time. Available from 4.15. + Name string + + createdByUID uint32 + haveCreatedByUID bool + btf btf.ID + stats *programStats + + maps []MapID + insns []byte + + lineInfos []byte + numLineInfos uint32 + funcInfos []byte + numFuncInfos uint32 +} + +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) + if errors.Is(err, syscall.EINVAL) { + return newProgramInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + stats: &programStats{ + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, + recursionMisses: info.RecursionMisses, + }, + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + makeSecondCall := false + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + makeSecondCall = true + } else if haveProgramInfoMapIDs() == nil { + // This program really has no associated maps. + pi.maps = make([]MapID, 0) + } else { + // The kernel doesn't report associated maps. + pi.maps = nil + } + + // createdByUID and NrMapIds were introduced in the same kernel version. + if pi.maps != nil { + pi.createdByUID = info.CreatedByUid + pi.haveCreatedByUID = true + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + makeSecondCall = true + } + + if info.NrLineInfo > 0 { + pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) + info2.LineInfo = sys.NewSlicePointer(pi.lineInfos) + info2.LineInfoRecSize = btf.LineInfoSize + info2.NrLineInfo = info.NrLineInfo + pi.numLineInfos = info.NrLineInfo + makeSecondCall = true + } + + if info.NrFuncInfo > 0 { + pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) + info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos) + info2.FuncInfoRecSize = btf.FuncInfoSize + info2.NrFuncInfo = info.NrFuncInfo + pi.numFuncInfos = info.NrFuncInfo + makeSecondCall = true + } + + if makeSecondCall { + if err := sys.ObjInfo(fd, &info2); err != nil { + return nil, err + } + } + + return &pi, nil +} + +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { + var info ProgramInfo + err := scanFdInfo(fd, map[string]interface{}{ + "prog_type": &info.Type, + "prog_tag": &info.Tag, + }) + if errors.Is(err, errMissingFields) { + return nil, &internal.UnsupportedFeatureError{ + Name: "reading program info from /proc/self/fdinfo", + MinimumVersion: internal.Version{4, 10, 0}, + } + } + if err != nil { + return nil, err + } + + return &info, nil +} + +// ID returns the program ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) ID() (ProgramID, bool) { + return pi.id, pi.id > 0 +} + +// CreatedByUID returns the Uid that created the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) CreatedByUID() (uint32, bool) { + return pi.createdByUID, pi.haveCreatedByUID +} + +// BTFID returns the BTF ID associated with the program. +// +// The ID is only valid as long as the associated program is kept alive. +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + +// RunCount returns the total number of times the program was called. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) RunCount() (uint64, bool) { + if pi.stats != nil { + return pi.stats.runCount, true + } + return 0, false +} + +// Runtime returns the total accumulated runtime of the program. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Runtime() (time.Duration, bool) { + if pi.stats != nil { + return pi.stats.runtime, true + } + return time.Duration(0), false +} + +// RecursionMisses returns the total number of times the program was NOT called. +// This can happen when another bpf program is already running on the cpu, which +// is likely to happen for example when you interrupt bpf program execution. +func (pi *ProgramInfo) RecursionMisses() (uint64, bool) { + if pi.stats != nil { + return pi.stats.recursionMisses, true + } + return 0, false +} + +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// If available, the instructions will be annotated with metadata from the +// BTF. This includes line information and function information. Reading +// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is +// unavailable, the instructions will be returned without metadata. +// +// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. +// Requires CAP_SYS_ADMIN for instructions with metadata. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + if pi.btf != 0 { + btfh, err := btf.NewHandleFromID(pi.btf) + if err != nil { + // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM. + // Ignore it and fall back to instructions without metadata. + if !errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("unable to get BTF handle: %w", err) + } + } + + // If we have a BTF handle, we can use it to assign metadata to the instructions. + if btfh != nil { + defer btfh.Close() + + spec, err := btfh.Spec(nil) + if err != nil { + return nil, fmt.Errorf("unable to get BTF spec: %w", err) + } + + lineInfos, err := btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse line info: %w", err) + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse func info: %w", err) + } + + btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + } + } + + fn := btf.FuncMetadata(&insns[0]) + name := pi.Name + if fn != nil { + name = fn.Name + } + insns[0] = insns[0].WithSymbol(name) + + return insns, nil +} + +// MapIDs returns the maps related to the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.maps, pi.maps != nil +} + +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) + if err != nil { + return err + } + defer fh.Close() + + if err := scanFdInfoReader(fh, fields); err != nil { + return fmt.Errorf("%s: %w", fh.Name(), err) + } + return nil +} + +var errMissingFields = errors.New("missing fields") + +func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { + var ( + scanner = bufio.NewScanner(r) + scanned int + ) + + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "\t", 2) + if len(parts) != 2 { + continue + } + + name := strings.TrimSuffix(parts[0], ":") + field, ok := fields[string(name)] + if !ok { + continue + } + + if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", name, err) + } + + scanned++ + } + + if err := scanner.Err(); err != nil { + return err + } + + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + + if scanned != len(fields) { + return errMissingFields + } + + return nil +} + +// EnableStats starts the measuring of the runtime +// and run counts of eBPF programs. +// +// Collecting statistics can have an impact on the performance. +// +// Requires at least 5.8. +func EnableStats(which uint32) (io.Closer, error) { + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) + if err != nil { + return nil, err + } + return fd, nil +} + +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error { + prog, err := progLoad(asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, SocketFilter, "MIT") + if err != nil { + return err + } + defer prog.Close() + + err = sys.ObjInfo(prog, &sys.ProgInfo{ + // NB: Don't need to allocate MapIds since the program isn't using + // any maps. + NrMapIds: 1, + }) + if errors.Is(err, unix.EINVAL) { + // Most likely the syscall doesn't exist. + return internal.ErrNotSupported + } + if errors.Is(err, unix.E2BIG) { + // We've hit check_uarg_tail_zero on older kernels. + return internal.ErrNotSupported + } + + return err +}) diff --git a/vendor/github.com/cilium/ebpf/internal/auxv.go b/vendor/github.com/cilium/ebpf/internal/auxv.go new file mode 100644 index 000000000..45fd0d37f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/auxv.go @@ -0,0 +1,60 @@ +package internal + +import ( + "errors" + "io" + _ "unsafe" +) + +type auxvPairReader interface { + Close() error + ReadAuxvPair() (uint64, uint64, error) +} + +// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h +const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +type auxvRuntimeReader struct { + data []uintptr + index int +} + +func (r *auxvRuntimeReader) Close() error { + return nil +} + +func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { + if r.index >= len(r.data)+2 { + return 0, 0, io.EOF + } + + // we manually add the (_AT_NULL, _AT_NULL) pair at the end + // that is not provided by the go runtime + var tag, value uintptr + if r.index+1 < len(r.data) { + tag, value = r.data[r.index], r.data[r.index+1] + } else { + tag, value = _AT_NULL, _AT_NULL + } + r.index += 2 + return uint64(tag), uint64(value), nil +} + +func newAuxvRuntimeReader() (auxvPairReader, error) { + data := runtime_getAuxv() + + if len(data)%2 != 0 { + return nil, errors.New("malformed auxv passed from runtime") + } + + return &auxvRuntimeReader{ + data: data, + index: 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go new file mode 100644 index 000000000..81c654433 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/buffer.go @@ -0,0 +1,31 @@ +package internal + +import ( + "bytes" + "sync" +) + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it. +// +// The returned buffer should be passed to [PutBuffer]. +func NewBuffer(buf []byte) *bytes.Buffer { + wr := bytesBufferPool.Get().(*bytes.Buffer) + // Reinitialize the Buffer with a new backing slice since it is returned to + // the caller by wr.Bytes() below. Pooling is faster despite calling + // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed. + *wr = *bytes.NewBuffer(buf) + return wr +} + +// PutBuffer releases a buffer to the pool. +func PutBuffer(buf *bytes.Buffer) { + // Release reference to the backing buffer. + *buf = *bytes.NewBuffer(nil) + bytesBufferPool.Put(buf) +} diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go new file mode 100644 index 000000000..e3a305021 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/deque.go @@ -0,0 +1,91 @@ +package internal + +import "math/bits" + +// Deque implements a double ended queue. +type Deque[T any] struct { + elems []T + read, write uint64 + mask uint64 +} + +// Reset clears the contents of the deque while retaining the backing buffer. +func (dq *Deque[T]) Reset() { + var zero T + + for i := dq.read; i < dq.write; i++ { + dq.elems[i&dq.mask] = zero + } + + dq.read, dq.write = 0, 0 +} + +func (dq *Deque[T]) Empty() bool { + return dq.read == dq.write +} + +// Push adds an element to the end. +func (dq *Deque[T]) Push(e T) { + dq.Grow(1) + dq.elems[dq.write&dq.mask] = e + dq.write++ +} + +// Shift returns the first element or the zero value. +func (dq *Deque[T]) Shift() T { + var zero T + + if dq.Empty() { + return zero + } + + index := dq.read & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + dq.read++ + return t +} + +// Pop returns the last element or the zero value. +func (dq *Deque[T]) Pop() T { + var zero T + + if dq.Empty() { + return zero + } + + dq.write-- + index := dq.write & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + return t +} + +// Grow the deque's capacity, if necessary, to guarantee space for another n +// elements. +func (dq *Deque[T]) Grow(n int) { + have := dq.write - dq.read + need := have + uint64(n) + if need < have { + panic("overflow") + } + if uint64(len(dq.elems)) >= need { + return + } + + // Round up to the new power of two which is at least 8. + // See https://jameshfisher.com/2018/03/30/round-up-power-2/ + capacity := 1 << (64 - bits.LeadingZeros64(need-1)) + if capacity < 8 { + capacity = 8 + } + + elems := make([]T, have, capacity) + pivot := dq.read & dq.mask + copied := copy(elems, dq.elems[pivot:]) + copy(elems[copied:], dq.elems[:pivot]) + + dq.elems = elems[:capacity] + dq.mask = uint64(capacity) - 1 + dq.read, dq.write = 0, have +} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go new file mode 100644 index 000000000..011581938 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -0,0 +1,102 @@ +package internal + +import ( + "debug/elf" + "fmt" + "io" +) + +type SafeELFFile struct { + *elf.File +} + +// NewSafeELFFile reads an ELF safely. +// +// Any panic during parsing is turned into an error. This is necessary since +// there are a bunch of unfixed bugs in debug/elf. +// +// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle +func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.NewFile(r) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// Symbols is the safe version of elf.File.Symbols. +func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF symbols panicked: %s", r) + }() + + syms, err = se.File.Symbols() + return +} + +// DynamicSymbols is the safe version of elf.File.DynamicSymbols. +func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) + }() + + syms, err = se.File.DynamicSymbols() + return +} + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 000000000..a37777f21 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,9 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.BigEndian diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 000000000..6dcd916d5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,9 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.LittleEndian diff --git a/vendor/github.com/cilium/ebpf/internal/epoll/poller.go b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go new file mode 100644 index 000000000..ed1c3a3c8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/epoll/poller.go @@ -0,0 +1,278 @@ +package epoll + +import ( + "errors" + "fmt" + "math" + "os" + "runtime" + "slices" + "sync" + "time" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ErrFlushed = errors.New("data was flushed") + +// Poller waits for readiness notifications from multiple file descriptors. +// +// The wait can be interrupted by calling Close. +type Poller struct { + // mutexes protect the fields declared below them. If you need to + // acquire both at once you must lock epollMu before eventMu. + epollMu sync.Mutex + epollFd int + + eventMu sync.Mutex + closeEvent *eventFd + flushEvent *eventFd +} + +func New() (_ *Poller, err error) { + closeFDOnError := func(fd int) { + if err != nil { + unix.Close(fd) + } + } + closeEventFDOnError := func(e *eventFd) { + if err != nil { + e.close() + } + } + + epollFd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("create epoll fd: %v", err) + } + defer closeFDOnError(epollFd) + + p := &Poller{epollFd: epollFd} + p.closeEvent, err = newEventFd() + if err != nil { + return nil, err + } + defer closeEventFDOnError(p.closeEvent) + + p.flushEvent, err = newEventFd() + if err != nil { + return nil, err + } + defer closeEventFDOnError(p.flushEvent) + + if err := p.Add(p.closeEvent.raw, 0); err != nil { + return nil, fmt.Errorf("add close eventfd: %w", err) + } + + if err := p.Add(p.flushEvent.raw, 0); err != nil { + return nil, fmt.Errorf("add flush eventfd: %w", err) + } + + runtime.SetFinalizer(p, (*Poller).Close) + return p, nil +} + +// Close the poller. +// +// Interrupts any calls to Wait. Multiple calls to Close are valid, but subsequent +// calls will return os.ErrClosed. +func (p *Poller) Close() error { + runtime.SetFinalizer(p, nil) + + // Interrupt Wait() via the closeEvent fd if it's currently blocked. + if err := p.wakeWaitForClose(); err != nil { + return err + } + + // Acquire the lock. This ensures that Wait isn't running. + p.epollMu.Lock() + defer p.epollMu.Unlock() + + // Prevent other calls to Close(). + p.eventMu.Lock() + defer p.eventMu.Unlock() + + if p.epollFd != -1 { + unix.Close(p.epollFd) + p.epollFd = -1 + } + + if p.closeEvent != nil { + p.closeEvent.close() + p.closeEvent = nil + } + + if p.flushEvent != nil { + p.flushEvent.close() + p.flushEvent = nil + } + + return nil +} + +// Add an fd to the poller. +// +// id is returned by Wait in the unix.EpollEvent.Pad field any may be zero. It +// must not exceed math.MaxInt32. +// +// Add is blocked by Wait. +func (p *Poller) Add(fd int, id int) error { + if int64(id) > math.MaxInt32 { + return fmt.Errorf("unsupported id: %d", id) + } + + p.epollMu.Lock() + defer p.epollMu.Unlock() + + if p.epollFd == -1 { + return fmt.Errorf("epoll add: %w", os.ErrClosed) + } + + // The representation of EpollEvent isn't entirely accurate. + // Pad is fully usable, not just padding. Hence we stuff the + // id in there, which allows us to identify the event later (e.g., + // in case of perf events, which CPU sent it). + event := unix.EpollEvent{ + Events: unix.EPOLLIN, + Fd: int32(fd), + Pad: int32(id), + } + + if err := unix.EpollCtl(p.epollFd, unix.EPOLL_CTL_ADD, fd, &event); err != nil { + return fmt.Errorf("add fd to epoll: %v", err) + } + + return nil +} + +// Wait for events. +// +// Returns the number of pending events and any errors. +// +// - [os.ErrClosed] if interrupted by [Close]. +// - [ErrFlushed] if interrupted by [Flush]. +// - [os.ErrDeadlineExceeded] if deadline is reached. +func (p *Poller) Wait(events []unix.EpollEvent, deadline time.Time) (int, error) { + p.epollMu.Lock() + defer p.epollMu.Unlock() + + if p.epollFd == -1 { + return 0, fmt.Errorf("epoll wait: %w", os.ErrClosed) + } + + for { + timeout := int(-1) + if !deadline.IsZero() { + msec := time.Until(deadline).Milliseconds() + // Deadline is in the past, don't block. + msec = max(msec, 0) + // Deadline is too far in the future. + msec = min(msec, math.MaxInt) + + timeout = int(msec) + } + + n, err := unix.EpollWait(p.epollFd, events, timeout) + if temp, ok := err.(temporaryError); ok && temp.Temporary() { + // Retry the syscall if we were interrupted, see https://github.com/golang/go/issues/20400 + continue + } + + if err != nil { + return 0, err + } + + if n == 0 { + return 0, fmt.Errorf("epoll wait: %w", os.ErrDeadlineExceeded) + } + + for i := 0; i < n; { + event := events[i] + if int(event.Fd) == p.closeEvent.raw { + // Since we don't read p.closeEvent the event is never cleared and + // we'll keep getting this wakeup until Close() acquires the + // lock and sets p.epollFd = -1. + return 0, fmt.Errorf("epoll wait: %w", os.ErrClosed) + } + if int(event.Fd) == p.flushEvent.raw { + // read event to prevent it from continuing to wake + p.flushEvent.read() + err = ErrFlushed + events = slices.Delete(events, i, i+1) + n -= 1 + continue + } + i++ + } + + return n, err + } +} + +type temporaryError interface { + Temporary() bool +} + +// wakeWaitForClose unblocks Wait if it's epoll_wait. +func (p *Poller) wakeWaitForClose() error { + p.eventMu.Lock() + defer p.eventMu.Unlock() + + if p.closeEvent == nil { + return fmt.Errorf("epoll wake: %w", os.ErrClosed) + } + + return p.closeEvent.add(1) +} + +// Flush unblocks Wait if it's epoll_wait, for purposes of reading pending samples +func (p *Poller) Flush() error { + p.eventMu.Lock() + defer p.eventMu.Unlock() + + if p.flushEvent == nil { + return fmt.Errorf("epoll wake: %w", os.ErrClosed) + } + + return p.flushEvent.add(1) +} + +// eventFd wraps a Linux eventfd. +// +// An eventfd acts like a counter: writes add to the counter, reads retrieve +// the counter and reset it to zero. Reads also block if the counter is zero. +// +// See man 2 eventfd. +type eventFd struct { + file *os.File + // prefer raw over file.Fd(), since the latter puts the file into blocking + // mode. + raw int +} + +func newEventFd() (*eventFd, error) { + fd, err := unix.Eventfd(0, unix.O_CLOEXEC|unix.O_NONBLOCK) + if err != nil { + return nil, err + } + file := os.NewFile(uintptr(fd), "event") + return &eventFd{file, fd}, nil +} + +func (efd *eventFd) close() error { + return efd.file.Close() +} + +func (efd *eventFd) add(n uint64) error { + var buf [8]byte + internal.NativeEndian.PutUint64(buf[:], n) + _, err := efd.file.Write(buf[:]) + return err +} + +func (efd *eventFd) read() (uint64, error) { + var buf [8]byte + _, err := efd.file.Read(buf[:]) + return internal.NativeEndian.Uint64(buf[:]), err +} diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go new file mode 100644 index 000000000..83a371ad3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -0,0 +1,181 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier +// log buffer. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. +func ErrorWithLog(source string, err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + if i := bytes.IndexByte(log, 0); i != -1 { + log = log[:i] + } + + log = bytes.Trim(log, whitespace) + if len(log) == 0 { + return &VerifierError{source, err, nil, false} + } + + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) + } + + return &VerifierError{source, err, lines, false} +} + +// VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. +type VerifierError struct { + source string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string + // Deprecated: the log is never truncated anymore. + Truncated bool +} + +func (le *VerifierError) Unwrap() error { + return le.Cause +} + +func (le *VerifierError) Error() string { + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error()) + + n := len(log) + if n == 0 { + return b.String() + } + + lines := log[n-1:] + if n >= 2 && includePreviousLine(log[n-1]) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + for _, line := range lines { + b.WriteString(": ") + b.WriteString(strings.TrimSpace(line)) + } + + omitted := len(le.Log) - len(lines) + if omitted > 0 { + fmt.Fprintf(&b, " (%d line(s) omitted)", omitted) + } + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true + } + + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false +} + +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// %+v: Output the first lines, or all lines if no width is given. +// %-v: Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go new file mode 100644 index 000000000..2b856c735 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/feature.go @@ -0,0 +1,184 @@ +package internal + +import ( + "errors" + "fmt" + "sync" +) + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// UnsupportedFeatureError is returned by FeatureTest() functions. +type UnsupportedFeatureError struct { + // The minimum Linux mainline version required for this feature. + // Used for the error string, and for sanity checking during testing. + MinimumVersion Version + + // The name of the feature that isn't supported. + Name string +} + +func (ufe *UnsupportedFeatureError) Error() string { + if ufe.MinimumVersion.Unspecified() { + return fmt.Sprintf("%s not supported", ufe.Name) + } + return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) +} + +// Is indicates that UnsupportedFeatureError is ErrNotSupported. +func (ufe *UnsupportedFeatureError) Is(target error) bool { + return target == ErrNotSupported +} + +// FeatureTest caches the result of a [FeatureTestFn]. +// +// Fields should not be modified after creation. +type FeatureTest struct { + // The name of the feature being detected. + Name string + // Version in the form Major.Minor[.Patch]. + Version string + // The feature test itself. + Fn FeatureTestFn + + mu sync.RWMutex + done bool + result error +} + +// FeatureTestFn is used to determine whether the kernel supports +// a certain feature. +// +// The return values have the following semantics: +// +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed +type FeatureTestFn func() error + +// NewFeatureTest is a convenient way to create a single [FeatureTest]. +func NewFeatureTest(name, version string, fn FeatureTestFn) func() error { + ft := &FeatureTest{ + Name: name, + Version: version, + Fn: fn, + } + + return ft.execute +} + +// execute the feature test. +// +// The result is cached if the test is conclusive. +// +// See [FeatureTestFn] for the meaning of the returned error. +func (ft *FeatureTest) execute() error { + ft.mu.RLock() + result, done := ft.result, ft.done + ft.mu.RUnlock() + + if done { + return result + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // The test may have been executed by another caller while we were + // waiting to acquire ft.mu. + if ft.done { + return ft.result + } + + err := ft.Fn() + if err == nil { + ft.done = true + return nil + } + + if errors.Is(err, ErrNotSupported) { + var v Version + if ft.Version != "" { + v, err = NewVersion(ft.Version) + if err != nil { + return fmt.Errorf("feature %s: %w", ft.Name, err) + } + } + + ft.done = true + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: ft.Name, + } + + return ft.result + } + + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", ft.Name, err) +} + +// FeatureMatrix groups multiple related feature tests into a map. +// +// Useful when there is a small number of discrete features which are known +// at compile time. +// +// It must not be modified concurrently with calling [FeatureMatrix.Result]. +type FeatureMatrix[K comparable] map[K]*FeatureTest + +// Result returns the outcome of the feature test for the given key. +// +// It's safe to call this function concurrently. +func (fm FeatureMatrix[K]) Result(key K) error { + ft, ok := fm[key] + if !ok { + return fmt.Errorf("no feature probe for %v", key) + } + + return ft.execute() +} + +// FeatureCache caches a potentially unlimited number of feature probes. +// +// Useful when there is a high cardinality for a feature test. +type FeatureCache[K comparable] struct { + mu sync.RWMutex + newTest func(K) *FeatureTest + features map[K]*FeatureTest +} + +func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] { + return &FeatureCache[K]{ + newTest: newTest, + features: make(map[K]*FeatureTest), + } +} + +func (fc *FeatureCache[K]) Result(key K) error { + // NB: Executing the feature test happens without fc.mu taken. + return fc.retrieve(key).execute() +} + +func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest { + fc.mu.RLock() + ft := fc.features[key] + fc.mu.RUnlock() + + if ft != nil { + return ft + } + + fc.mu.Lock() + defer fc.mu.Unlock() + + if ft := fc.features[key]; ft != nil { + return ft + } + + ft = fc.newTest(key) + fc.features[key] = ft + return ft +} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go new file mode 100644 index 000000000..1eaf4775a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -0,0 +1,128 @@ +package internal + +import ( + "bufio" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} + +// DiscardZeroes makes sure that all written bytes are zero +// before discarding them. +type DiscardZeroes struct{} + +func (DiscardZeroes) Write(p []byte) (int, error) { + for _, b := range p { + if b != 0 { + return 0, errors.New("encountered non-zero byte") + } + } + return len(p), nil +} + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} + +// ReadUint64FromFile reads a uint64 from a file. +// +// format specifies the contents of the file in fmt.Scanf syntax. +func ReadUint64FromFile(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + data, err := os.ReadFile(filename) + if err != nil { + return 0, fmt.Errorf("reading file %q: %w", filename, err) + } + + var value uint64 + n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) + if err != nil { + return 0, fmt.Errorf("parsing file %q: %w", filename, err) + } + if n != 1 { + return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) + } + + return value, nil +} + +type uint64FromFileKey struct { + format, path string +} + +var uint64FromFileCache = struct { + sync.RWMutex + values map[uint64FromFileKey]uint64 +}{ + values: map[uint64FromFileKey]uint64{}, +} + +// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. +func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + key := uint64FromFileKey{format, filename} + + uint64FromFileCache.RLock() + if value, ok := uint64FromFileCache.values[key]; ok { + uint64FromFileCache.RUnlock() + return value, nil + } + uint64FromFileCache.RUnlock() + + value, err := ReadUint64FromFile(format, filename) + if err != nil { + return 0, err + } + + uint64FromFileCache.Lock() + defer uint64FromFileCache.Unlock() + + if value, ok := uint64FromFileCache.values[key]; ok { + // Someone else got here before us, use what is cached. + return value, nil + } + + uint64FromFileCache.values[key] = value + return value, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go new file mode 100644 index 000000000..776c7a10a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -0,0 +1,74 @@ +package kallsyms + +import ( + "bufio" + "bytes" + "io" + "os" + "sync" +) + +var kernelModules struct { + sync.RWMutex + // function to kernel module mapping + kmods map[string]string +} + +// KernelModule returns the kernel module, if any, a probe-able function is contained in. +func KernelModule(fn string) (string, error) { + kernelModules.RLock() + kmods := kernelModules.kmods + kernelModules.RUnlock() + + if kmods == nil { + kernelModules.Lock() + defer kernelModules.Unlock() + kmods = kernelModules.kmods + } + + if kmods != nil { + return kmods[fn], nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return "", err + } + defer f.Close() + kmods, err = loadKernelModuleMapping(f) + if err != nil { + return "", err + } + + kernelModules.kmods = kmods + return kmods[fn], nil +} + +// FlushKernelModuleCache removes any cached information about function to kernel module mapping. +func FlushKernelModuleCache() { + kernelModules.Lock() + defer kernelModules.Unlock() + + kernelModules.kmods = nil +} + +func loadKernelModuleMapping(f io.Reader) (map[string]string, error) { + mods := make(map[string]string) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) < 4 { + continue + } + switch string(fields[1]) { + case "t", "T": + mods[string(fields[2])] = string(bytes.Trim(fields[3], "[]")) + default: + continue + } + } + if scanner.Err() != nil { + return nil, scanner.Err() + } + return mods, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go new file mode 100644 index 000000000..1921e4f15 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -0,0 +1,293 @@ +package kconfig + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// Find find a kconfig file on the host. +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func Find() (*os.File, error) { + kernelRelease, err := internal.KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} + +// Parse parses the kconfig file for which a reader is given. +// All the CONFIG_* which are in filter and which are set set will be +// put in the returned map as key with their corresponding value as map value. +// If filter is nil, no filtering will occur. +// If the kconfig file is not valid, error will be returned. +func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) { + var r io.Reader + zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64)) + if err != nil { + r = io.NewSectionReader(source, 0, math.MaxInt64) + } else { + // Source is gzip compressed, transparently decompress. + r = zr + } + + ret := make(map[string]string, len(filter)) + + s := bufio.NewScanner(r) + + for s.Scan() { + line := s.Bytes() + err = processKconfigLine(line, ret, filter) + if err != nil { + return nil, fmt.Errorf("cannot parse line: %w", err) + } + + if filter != nil && len(ret) == len(filter) { + break + } + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("cannot parse: %w", err) + } + + if zr != nil { + return ret, zr.Close() + } + + return ret, nil +} + +// Golang translation of libbpf bpf_object__process_kconfig_line(): +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874 +// It does the same checks but does not put the data inside the BPF map. +func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error { + // Ignore empty lines and "# CONFIG_* is not set". + if !bytes.HasPrefix(line, []byte("CONFIG_")) { + return nil + } + + key, value, found := bytes.Cut(line, []byte{'='}) + if !found { + return fmt.Errorf("line %q does not contain separator '='", line) + } + + if len(value) == 0 { + return fmt.Errorf("line %q has no value", line) + } + + if filter != nil { + // NB: map[string(key)] gets special optimisation help from the compiler + // and doesn't allocate. Don't turn this into a variable. + _, ok := filter[string(key)] + if !ok { + return nil + } + } + + // This can seem odd, but libbpf only sets the value the first time the key is + // met: + // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908 + _, ok := m[string(key)] + if !ok { + m[string(key)] = string(value) + } + + return nil +} + +// PutValue translates the value given as parameter depending on the BTF +// type, the translated value is then written to the byte array. +func PutValue(data []byte, typ btf.Type, value string) error { + typ = btf.UnderlyingType(typ) + + switch value { + case "y", "n", "m": + return putValueTri(data, typ, value) + default: + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + return putValueNumber(data, typ, value) + } +} + +// Golang translation of libbpf_tristate enum: +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169 +type triState int + +const ( + TriNo triState = 0 + TriYes triState = 1 + TriModule triState = 2 +) + +func putValueTri(data []byte, typ btf.Type, value string) error { + switch v := typ.(type) { + case *btf.Int: + if v.Encoding != btf.Bool { + return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding) + } + + if v.Size != 1 { + return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size) + } + + switch value { + case "y": + data[0] = 1 + case "n": + data[0] = 0 + default: + return fmt.Errorf("cannot use %q for btf.Bool", value) + } + case *btf.Enum: + if v.Name != "libbpf_tristate" { + return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) + } + + var tri triState + switch value { + case "y": + tri = TriYes + case "m": + tri = TriModule + case "n": + tri = TriNo + default: + return fmt.Errorf("value %q is not support for libbpf_tristate", value) + } + + internal.NativeEndian.PutUint64(data, uint64(tri)) + default: + return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) + } + + return nil +} + +func putValueString(data []byte, typ btf.Type, value string) error { + array, ok := typ.(*btf.Array) + if !ok { + return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array) + } + + contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int) + if !ok { + return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType) + } + + // Any Int, which is not bool, of one byte could be used to store char: + // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638 + if contentType.Size != 1 && contentType.Encoding != btf.Bool { + return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size) + } + + if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) { + return fmt.Errorf(`value %q must start and finish with '"'`, value) + } + + str := strings.Trim(value, `"`) + + // We need to trim string if the bpf array is smaller. + if uint32(len(str)) >= array.Nelems { + str = str[:array.Nelems] + } + + // Write the string content to .kconfig. + copy(data, str) + + return nil +} + +func putValueNumber(data []byte, typ btf.Type, value string) error { + integer, ok := typ.(*btf.Int) + if !ok { + return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer) + } + + size := integer.Size + sizeInBits := size * 8 + + var n uint64 + var err error + if integer.Encoding == btf.Signed { + parsed, e := strconv.ParseInt(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } else { + parsed, e := strconv.ParseUint(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } + + if err != nil { + return fmt.Errorf("cannot parse value: %w", err) + } + + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + + switch integer.Size { + case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + data[0] = byte(n) + case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint16(data, uint16(n)) + case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint32(data, uint32(n)) + case 8: + internal.NativeEndian.PutUint64(data, uint64(n)) + default: + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/math.go b/vendor/github.com/cilium/ebpf/internal/math.go new file mode 100644 index 000000000..e95c8efde --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/math.go @@ -0,0 +1,13 @@ +package internal + +import "golang.org/x/exp/constraints" + +// Align returns 'n' updated to 'alignment' boundary. +func Align[I constraints.Integer](n, alignment I) I { + return (n + alignment - 1) / alignment * alignment +} + +// IsPow returns true if n is a power of two. +func IsPow[I constraints.Integer](n I) bool { + return n != 0 && (n&(n-1)) == 0 +} diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 000000000..dd6e6cbaf --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,97 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "reflect" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} + +// GoTypeName is like %T, but elides the package name. +// +// Pointers to a type are peeled off. +func GoTypeName(t any) string { + rT := reflect.TypeOf(t) + for rT.Kind() == reflect.Pointer { + rT = rT.Elem() + } + // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924 + return rT.Name() +} diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go new file mode 100644 index 000000000..01d892f93 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/pinning.go @@ -0,0 +1,65 @@ +package internal + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +func Pin(currentPath, newPath string, fd *sys.FD) error { + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + fsType, err := FSType(filepath.Dir(newPath)) + if err != nil { + return err + } + if fsType != unix.BPF_FS_MAGIC { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + + if currentPath == "" { + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + // Renameat2 is used instead of os.Rename to disallow the new path replacing + // an existing path. + err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { + // Object is now moved to the new pinning path. + return nil + } + if !os.IsNotExist(err) { + return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) + } + // Internal state not in sync with the file system so let's fix it. + return sys.ObjPin(&sys.ObjPinAttr{ + Pathname: sys.NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + err := os.Remove(pinnedPath) + if err == nil || os.IsNotExist(err) { + return nil + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go new file mode 100644 index 000000000..6e90f2ef7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/platform.go @@ -0,0 +1,43 @@ +package internal + +import ( + "runtime" +) + +// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by +// the linux kernel. +// +// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go +// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047 +func PlatformPrefix() string { + switch runtime.GOARCH { + case "386": + return "__ia32_" + case "amd64", "amd64p32": + return "__x64_" + + case "arm", "armbe": + return "__arm_" + case "arm64", "arm64be": + return "__arm64_" + + case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le": + return "__mips_" + + case "s390": + return "__s390_" + case "s390x": + return "__s390x_" + + case "riscv", "riscv64": + return "__riscv_" + + case "ppc": + return "__powerpc_" + case "ppc64", "ppc64le": + return "__powerpc64_" + + default: + return "" + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go new file mode 100644 index 000000000..d629145b6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/prog.go @@ -0,0 +1,11 @@ +package internal + +// EmptyBPFContext is the smallest-possible BPF input context to be used for +// invoking `Program.{Run,Benchmark,Test}`. +// +// Programs require a context input buffer of at least 15 bytes. Looking in +// net/bpf/test_run.c, bpf_test_init() requires that the input is at least +// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets +// with invalid pkt_len"), it also requires the skb to be non-empty after +// removing the Layer 2 header. +var EmptyBPFContext = make([]byte, 15) diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go new file mode 100644 index 000000000..44c02d676 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/statfs.go @@ -0,0 +1,23 @@ +package internal + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +func FSType(path string) (int64, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return 0, err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + return fsType, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 000000000..dfe174448 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 000000000..941a56fb9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,133 @@ +package sys + +import ( + "fmt" + "math" + "os" + "runtime" + "strconv" + + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + if onLeakFD != nil { + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(value, callersFrames()) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f))) + } + } + + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).finalize) + return fd +} + +// finalize is set as the FD's runtime finalizer and +// sends a leak trace before calling FD.Close(). +func (fd *FD) finalize() { + if fd.raw < 0 { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(fd.Int()) + if ok && onLeakFD != nil { + onLeakFD(f.(*runtime.Frames)) + } + + _ = fd.Close() +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.disown()) +} + +func (fd *FD) disown() int { + value := int(fd.raw) + fds.Delete(int(value)) + fd.raw = -1 + + runtime.SetFinalizer(fd, nil) + return value +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns nil if the FD is not valid. +func (fd *FD) File(name string) *os.File { + if fd.raw < 0 { + return nil + } + + return os.NewFile(uintptr(fd.disown()), name) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go new file mode 100644 index 000000000..cd50dd1f6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go @@ -0,0 +1,93 @@ +package sys + +import ( + "bytes" + "fmt" + "runtime" + "sync" +) + +// OnLeakFD controls tracing [FD] lifetime to detect resources that are not +// closed by Close(). +// +// If fn is not nil, tracing is enabled for all FDs created going forward. fn is +// invoked for all FDs that are closed by the garbage collector instead of an +// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn +// (without disabling tracing in the meantime) will cause a panic. +// +// If fn is nil, tracing will be disabled. Any FDs that have not been closed are +// considered to be leaked, fn will be invoked for them, and the process will be +// terminated. +// +// fn will be invoked at most once for every unique sys.FD allocation since a +// runtime.Frames can only be unwound once. +func OnLeakFD(fn func(*runtime.Frames)) { + // Enable leak tracing if new fn is provided. + if fn != nil { + if onLeakFD != nil { + panic("OnLeakFD called twice with non-nil fn") + } + + onLeakFD = fn + return + } + + // fn is nil past this point. + + if onLeakFD == nil { + return + } + + // Call onLeakFD for all open fds. + if fs := flushFrames(); len(fs) != 0 { + for _, f := range fs { + onLeakFD(f) + } + } + + onLeakFD = nil +} + +var onLeakFD func(*runtime.Frames) + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds sync.Map // map[int]*runtime.Frames + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames() *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// FormatFrames formats a runtime.Frames as a human-readable string. +func FormatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go new file mode 100644 index 000000000..d9fe21722 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -type MapFlags"; DO NOT EDIT. + +package sys + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[BPF_F_NO_PREALLOC-1] + _ = x[BPF_F_NO_COMMON_LRU-2] + _ = x[BPF_F_NUMA_NODE-4] + _ = x[BPF_F_RDONLY-8] + _ = x[BPF_F_WRONLY-16] + _ = x[BPF_F_STACK_BUILD_ID-32] + _ = x[BPF_F_ZERO_SEED-64] + _ = x[BPF_F_RDONLY_PROG-128] + _ = x[BPF_F_WRONLY_PROG-256] + _ = x[BPF_F_CLONE-512] + _ = x[BPF_F_MMAPABLE-1024] + _ = x[BPF_F_PRESERVE_ELEMS-2048] + _ = x[BPF_F_INNER_MAP-4096] + _ = x[BPF_F_LINK-8192] + _ = x[BPF_F_PATH_FD-16384] +} + +const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAPBPF_F_LINKBPF_F_PATH_FD" + +var _MapFlags_map = map[MapFlags]string{ + 1: _MapFlags_name[0:17], + 2: _MapFlags_name[17:36], + 4: _MapFlags_name[36:51], + 8: _MapFlags_name[51:63], + 16: _MapFlags_name[63:75], + 32: _MapFlags_name[75:95], + 64: _MapFlags_name[95:110], + 128: _MapFlags_name[110:127], + 256: _MapFlags_name[127:144], + 512: _MapFlags_name[144:155], + 1024: _MapFlags_name[155:169], + 2048: _MapFlags_name[169:189], + 4096: _MapFlags_name[189:204], + 8192: _MapFlags_name[204:214], + 16384: _MapFlags_name[214:227], +} + +func (i MapFlags) String() string { + if str, ok := _MapFlags_map[i]; ok { + return str + } + return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go new file mode 100644 index 000000000..e9bb59059 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -0,0 +1,52 @@ +package sys + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// NewPointer creates a 64-bit pointer from an unsafe Pointer. +func NewPointer(ptr unsafe.Pointer) Pointer { + return Pointer{ptr: ptr} +} + +// NewSlicePointer creates a 64-bit pointer from a byte slice. +func NewSlicePointer(buf []byte) Pointer { + if len(buf) == 0 { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(&buf[0])} +} + +// NewSlicePointerLen creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + +// NewStringPointer creates a 64-bit pointer from a string. +func NewStringPointer(str string) Pointer { + p, err := unix.BytePtrFromString(str) + if err != nil { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(p)} +} + +// NewStringSlicePointer allocates an array of Pointers to each string in the +// given slice of strings and returns a 64-bit pointer to the start of the +// resulting array. +// +// Use this function to pass arrays of strings as syscall arguments. +func NewStringSlicePointer(strings []string) Pointer { + sp := make([]Pointer, 0, len(strings)) + for _, s := range strings { + sp = append(sp, NewStringPointer(s)) + } + + return Pointer{ptr: unsafe.Pointer(&sp[0])} +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go new file mode 100644 index 000000000..6278c79c9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -0,0 +1,14 @@ +//go:build armbe || mips || mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + pad uint32 + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go new file mode 100644 index 000000000..c27b537e8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -0,0 +1,14 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer + pad uint32 +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go new file mode 100644 index 000000000..2d7828230 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -0,0 +1,13 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go new file mode 100644 index 000000000..e5337191d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -0,0 +1,83 @@ +package sys + +import ( + "fmt" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// A sigset containing only SIGPROF. +var profSet unix.Sigset_t + +func init() { + // See sigsetAdd for details on the implementation. Open coded here so + // that the compiler will check the constant calculations for us. + profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits) +} + +// maskProfilerSignal locks the calling goroutine to its underlying OS thread +// and adds SIGPROF to the thread's signal mask. This prevents pprof from +// interrupting expensive syscalls like e.g. BPF_PROG_LOAD. +// +// The caller must defer unmaskProfilerSignal() to reverse the operation. +func maskProfilerSignal() { + runtime.LockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil { + runtime.UnlockOSThread() + panic(fmt.Errorf("masking profiler signal: %w", err)) + } +} + +// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal +// mask, allowing it to be interrupted for profiling once again. +// +// It also unlocks the current goroutine from its underlying OS thread. +func unmaskProfilerSignal() { + defer runtime.UnlockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil { + panic(fmt.Errorf("unmasking profiler signal: %w", err)) + } +} + +const ( + // Signal is the nth bit in the bitfield. + sigprofBit = int(unix.SIGPROF - 1) + // The number of bits in one Sigset_t word. + wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8 +) + +// sigsetAdd adds signal to set. +// +// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch. +// This function must be able to deal with both and so must avoid any direct +// references to u32 or u64 types. +func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error { + if signal < 1 { + return fmt.Errorf("signal %d must be larger than 0", signal) + } + + // For amd64, runtime.sigaddset() performs the following operation: + // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31) + // + // This trick depends on sigset being two u32's, causing a signal in the + // bottom 31 bits to be written to the low word if bit 32 is low, or the high + // word if bit 32 is high. + + // Signal is the nth bit in the bitfield. + bit := int(signal - 1) + // Word within the sigset the bit needs to be written to. + word := bit / wordBits + + if word >= len(set.Val) { + return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal) + } + + // Write the signal bit into its corresponding word at the corrected offset. + set.Val[word] |= 1 << (bit % wordBits) + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 000000000..f6b6e9345 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,229 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// ENOTSUPP is a Linux internal error code that has leaked into UAPI. +// +// It is not the same as ENOTSUP or EOPNOTSUPP. +const ENOTSUPP = syscall.Errno(524) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [unix.BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +type LogLevel uint32 + +const ( + BPF_LOG_LEVEL1 LogLevel = 1 << iota + BPF_LOG_LEVEL2 + BPF_LOG_STATS +) + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// TypeID identifies a type in a BTF blob. +type TypeID uint32 + +// MapFlags control map behaviour. +type MapFlags uint32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type MapFlags + +const ( + BPF_F_NO_PREALLOC MapFlags = 1 << iota + BPF_F_NO_COMMON_LRU + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_STACK_BUILD_ID + BPF_F_ZERO_SEED + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_CLONE + BPF_F_MMAPABLE + BPF_F_PRESERVE_ELEMS + BPF_F_INNER_MAP + BPF_F_LINK + BPF_F_PATH_FD +) + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK +) + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +func (we wrappedErrno) Error() string { + if we.Errno == ENOTSUPP { + return "operation not supported" + } + return we.Errno.Error() +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 000000000..70e754de7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1383 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +type AdjRoomMode uint32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType uint32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + BPF_LSM_CGROUP AttachType = 43 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + __MAX_BPF_ATTACH_TYPE AttachType = 56 +) + +type Cmd uint32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId uint32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + BPF_FUNC_kptr_xchg FunctionId = 194 + BPF_FUNC_map_lookup_percpu_elem FunctionId = 195 + BPF_FUNC_skc_to_mptcp_sock FunctionId = 196 + BPF_FUNC_dynptr_from_mem FunctionId = 197 + BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198 + BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199 + BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200 + BPF_FUNC_dynptr_read FunctionId = 201 + BPF_FUNC_dynptr_write FunctionId = 202 + BPF_FUNC_dynptr_data FunctionId = 203 + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204 + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205 + BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206 + BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 + BPF_FUNC_ktime_get_tai_ns FunctionId = 208 + BPF_FUNC_user_ringbuf_drain FunctionId = 209 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 +) + +type HdrStartOff uint32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType uint32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + BPF_LINK_TYPE_NETKIT LinkType = 13 + __MAX_BPF_LINK_TYPE LinkType = 14 +) + +type MapType uint32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 +) + +type PerfEventType uint32 + +const ( + BPF_PERF_EVENT_UNSPEC PerfEventType = 0 + BPF_PERF_EVENT_UPROBE PerfEventType = 1 + BPF_PERF_EVENT_URETPROBE PerfEventType = 2 + BPF_PERF_EVENT_KPROBE PerfEventType = 3 + BPF_PERF_EVENT_KRETPROBE PerfEventType = 4 + BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5 + BPF_PERF_EVENT_EVENT PerfEventType = 6 +) + +type ProgType uint32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 +) + +type RetCode uint32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 + BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129 +) + +type SkAction uint32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus uint32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType uint32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + +type XdpAction uint32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id BTFID + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [48]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags MapFlags + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId TypeID + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + _ [4]byte + MapExtra uint64 +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns uint64 + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms uint64 + JitedFuncLens uint64 + BtfId BTFID + FuncInfoRecSize uint32 + FuncInfo Pointer + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo Pointer + JitedLineInfo uint64 + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + AttachBtfObjId BTFID + AttachBtfId TypeID + _ [4]byte +} + +type SkLookup struct { + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId TypeID + _ [44]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [36]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateKprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + KprobeMultiFlags uint32 + Count uint32 + Syms Pointer + Addrs Pointer + Cookies Pointer + _ [16]byte +} + +func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetfilterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetkitAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [40]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTcxAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTracingAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId BTFID + _ [4]byte + Cookie uint64 + _ [32]byte +} + +func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateUprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path Pointer + Offsets Pointer + RefCtrOffsets Pointer + Cookies Pointer + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetFdByIdAttr struct{ Id LinkID } + +func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetNextIdAttr struct { + Id LinkID + NextId LinkID +} + +func LinkGetNextId(attr *LinkGetNextIdAttr) error { + _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags MapFlags + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxValueTypeId TypeID + MapExtra uint64 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel LogLevel + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId TypeID + AttachBtfObjFd uint32 + CoreReloCnt uint32 + FdArray Pointer + CoreRelos Pointer + CoreReloRecSize uint32 + LogTrueSize uint32 +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgQueryAttr struct { + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds Pointer + Count uint32 + _ [4]byte + ProgAttachFlags Pointer + LinkIds Pointer + LinkAttachFlags Pointer + Revision uint64 +} + +func ProgQuery(attr *ProgQueryAttr) error { + _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + CgroupId uint64 + AttachType AttachType + _ [36]byte +} + +type IterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TargetName Pointer + TargetNameLen uint32 +} + +type KprobeLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FuncName Pointer + NameLen uint32 + Offset uint32 + Addr uint64 + Missed uint64 + _ [8]byte +} + +type KprobeMultiLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs Pointer + Count uint32 + Flags uint32 + Missed uint64 + _ [24]byte +} + +type NetNsLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + NetnsIno uint32 + AttachType AttachType + _ [40]byte +} + +type NetfilterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 + _ [32]byte +} + +type NetkitLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type PerfEventLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType +} + +type RawTracepointLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TpName Pointer + TpNameLen uint32 + _ [36]byte +} + +type TcxLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type TracingLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + AttachType AttachType + TargetObjId uint32 + TargetBtfId TypeID + _ [36]byte +} + +type XDPLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + _ [44]byte +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go new file mode 100644 index 000000000..d184ea196 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -0,0 +1,83 @@ +package sysenc + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/sys" +) + +type Buffer struct { + ptr unsafe.Pointer + // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using + // zero-copy unmarshaling. + size int +} + +const syscallPointerOnly = -1 + +func newBuffer(buf []byte) Buffer { + if len(buf) == 0 { + return Buffer{} + } + return Buffer{unsafe.Pointer(&buf[0]), len(buf)} +} + +// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling. +// +// [Pointer] is the only valid method to call on such a Buffer. +// Use [SyscallBuffer] instead if possible. +func UnsafeBuffer(ptr unsafe.Pointer) Buffer { + return Buffer{ptr, syscallPointerOnly} +} + +// SyscallOutput prepares a Buffer for a syscall to write into. +// +// size is the length of the desired buffer in bytes. +// The buffer may point at the underlying memory of dst, in which case [Unmarshal] +// becomes a no-op. +// +// The contents of the buffer are undefined and may be non-zero. +func SyscallOutput(dst any, size int) Buffer { + if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size { + buf := newBuffer(dstBuf) + buf.size = syscallPointerOnly + return buf + } + + return newBuffer(make([]byte, size)) +} + +// CopyTo copies the buffer into dst. +// +// Returns the number of copied bytes. +func (b Buffer) CopyTo(dst []byte) int { + return copy(dst, b.unsafeBytes()) +} + +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.unsafeBytes()...) +} + +// Pointer returns the location where a syscall should write. +func (b Buffer) Pointer() sys.Pointer { + // NB: This deliberately ignores b.length to support zero-copy + // marshaling / unmarshaling using unsafe.Pointer. + return sys.NewPointer(b.ptr) +} + +// Unmarshal the buffer into the provided value. +func (b Buffer) Unmarshal(data any) error { + if b.size == syscallPointerOnly { + return nil + } + + return Unmarshal(data, b.unsafeBytes()) +} + +func (b Buffer) unsafeBytes() []byte { + if b.size == syscallPointerOnly { + return nil + } + return unsafe.Slice((*byte)(b.ptr), b.size) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go new file mode 100644 index 000000000..676ad98ba --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go @@ -0,0 +1,3 @@ +// Package sysenc provides efficient conversion of Go values to system +// call interfaces. +package sysenc diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go new file mode 100644 index 000000000..52d111e7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://go.dev/LICENSE. + +package sysenc + +import ( + "reflect" + "sync" +) + +var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool + +func hasUnexportedFields(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Slice, reflect.Array, reflect.Pointer: + return hasUnexportedFields(typ.Elem()) + + case reflect.Struct: + if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok { + return unexported.(bool) + } + + unexported := false + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + // Package binary allows _ fields but always writes zeroes into them. + if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) { + unexported = true + break + } + } + + hasUnexportedFieldsCache.Store(typ, unexported) + return unexported + + default: + // NB: It's not clear what this means for Chan and so on. + return false + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go new file mode 100644 index 000000000..0026af8f2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -0,0 +1,177 @@ +package sysenc + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "slices" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// Marshal turns data into a byte slice using the system's native endianness. +// +// If possible, avoids allocations by directly using the backing memory +// of data. This means that the variable must not be modified for the lifetime +// of the returned [Buffer]. +// +// Returns an error if the data can't be turned into a byte slice according to +// the behaviour of [binary.Write]. +func Marshal(data any, size int) (Buffer, error) { + if data == nil { + return Buffer{}, errors.New("can't marshal a nil value") + } + + var buf []byte + var err error + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = unsafe.Slice(unsafe.StringData(value), len(value)) + case []byte: + buf = value + case int16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value)) + case uint16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value) + case int32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value)) + case uint32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value) + case int64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value)) + case uint64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value) + default: + if buf := unsafeBackingMemory(data); len(buf) == size { + return newBuffer(buf), nil + } + + wr := internal.NewBuffer(make([]byte, 0, size)) + defer internal.PutBuffer(wr) + + err = binary.Write(wr, internal.NativeEndian, value) + buf = wr.Bytes() + } + if err != nil { + return Buffer{}, err + } + + if len(buf) != size { + return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size) + } + + return newBuffer(buf), nil +} + +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, +} + +// Unmarshal a byte slice in the system's native endianness into data. +// +// Returns an error if buf can't be unmarshalled according to the behaviour +// of [binary.Read]. +func Unmarshal(data interface{}, buf []byte) error { + switch value := data.(type) { + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + + case *string: + *value = string(buf) + return nil + + case *[]byte: + // Backwards compat: unmarshaling into a slice replaces the whole slice. + *value = slices.Clone(buf) + return nil + + default: + if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) { + copy(dataBuf, buf) + return nil + } + + rd := bytesReaderPool.Get().(*bytes.Reader) + defer bytesReaderPool.Put(rd) + + rd.Reset(buf) + + if err := binary.Read(rd, internal.NativeEndian, value); err != nil { + return err + } + + if rd.Len() != 0 { + return fmt.Errorf("unmarshaling %T doesn't consume all data", data) + } + + return nil + } +} + +// unsafeBackingMemory returns the backing memory of data if it can be used +// instead of calling into package binary. +// +// Returns nil if the value is not a pointer or a slice, or if it contains +// padding or unexported fields. +func unsafeBackingMemory(data any) []byte { + if data == nil { + return nil + } + + value := reflect.ValueOf(data) + var valueSize int + switch value.Kind() { + case reflect.Pointer: + if value.IsNil() { + return nil + } + + if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice { + valueSize = int(elemType.Size()) + break + } + + // We're dealing with a pointer to a slice. Dereference and + // handle it like a regular slice. + value = value.Elem() + fallthrough + + case reflect.Slice: + valueSize = int(value.Type().Elem().Size()) * value.Len() + + default: + // Prevent Value.UnsafePointer from panicking. + return nil + } + + // Some nil pointer types currently crash binary.Size. Call it after our own + // code so that the panic isn't reachable. + // See https://github.com/golang/go/issues/60892 + if size := binary.Size(data); size == -1 || size != valueSize { + // The type contains padding or unsupported types. + return nil + } + + if hasUnexportedFields(reflect.TypeOf(data)) { + return nil + } + + // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer + // rules because it's very unlikely that the source data has "an equivalent + // memory layout". However, we can make it safe-ish because of the + // following reasons: + // - There is no alignment mismatch since we cast to a type with an + // alignment of 1. + // - There are no pointers in the source type so we don't upset the GC. + // - The length is verified at runtime. + return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize) +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go new file mode 100644 index 000000000..897740fec --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -0,0 +1,360 @@ +package tracefs + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrInvalidInput = errors.New("invalid input") + + ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes") +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=ProbeType -linecomment + +type ProbeType uint8 + +const ( + Kprobe ProbeType = iota // kprobe + Uprobe // uprobe +) + +func (pt ProbeType) eventsFile() (*os.File, error) { + path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String())) + if err != nil { + return nil, err + } + + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666) +} + +type ProbeArgs struct { + Type ProbeType + Symbol, Group, Path string + Offset, RefCtrOffset, Cookie uint64 + Pid, RetprobeMaxActive int + Ret bool +} + +// RandomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by IsValidTraceID. +func RandomGroup(prefix string) (string, error) { + if !validIdentifier(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput) + } + + return group, nil +} + +// validIdentifier implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set +// of characters. Non-empty, first character must not be a number, all +// characters must be alphanumeric or underscore. +func validIdentifier(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + default: + return false + } + } + + return true +} + +func sanitizeTracefsPath(path ...string) (string, error) { + base, err := getTracefsPath() + if err != nil { + return "", err + } + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput) + } + return p, nil +} + +// getTracefsPath will return a correct path to the tracefs mount point. +// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, +// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. +// The available tracefs paths will depends on distribution choices. +var getTracefsPath = sync.OnceValues(func() (string, error) { + for _, p := range []struct { + path string + fsType int64 + }{ + {"/sys/kernel/tracing", unix.TRACEFS_MAGIC}, + {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC}, + // RHEL/CentOS + {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, + } { + if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType { + return p.path, nil + } + } + + return "", errors.New("neither debugfs nor tracefs are mounted") +}) + +// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore. +// +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeIdentifier(s string) string { + var skip bool + return strings.Map(func(c rune) rune { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + return c + + case skip: + return -1 + + default: + skip = true + return '_' + } + }, s) +} + +// EventID reads a trace event's ID from tracefs given its group and name. +// The kernel requires group and name to be alphanumeric or underscore. +func EventID(group, name string) (uint64, error) { + if !validIdentifier(group) { + return 0, fmt.Errorf("invalid tracefs group: %q", group) + } + + if !validIdentifier(name) { + return 0, fmt.Errorf("invalid tracefs name: %q", name) + } + + path, err := sanitizeTracefsPath("events", group, name, "id") + if err != nil { + return 0, err + } + tid, err := internal.ReadUint64FromFile("%d\n", path) + if errors.Is(err, os.ErrNotExist) { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +func probePrefix(ret bool, maxActive int) string { + if ret { + if maxActive > 0 { + return fmt.Sprintf("r%d", maxActive) + } + return "r" + } + return "p" +} + +// Event represents an entry in a tracefs probe events file. +type Event struct { + typ ProbeType + group, name string + // event id allocated by the kernel. 0 if the event has already been removed. + id uint64 +} + +// NewEvent creates a new ephemeral trace event. +// +// Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. Returns an error if +// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if +// the kernel is too old to support kretprobe maxactive. +func NewEvent(args ProbeArgs) (*Event, error) { + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + eventName := sanitizeIdentifier(args.Symbol) + _, err := EventID(args.Group, eventName) + if err == nil { + return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) + } + + // Open the kprobe_events file in tracefs. + f, err := args.Type.eventsFile() + if err != nil { + return nil, err + } + defer f.Close() + + var pe, token string + switch args.Type { + case Kprobe: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + if args.RetprobeMaxActive != 0 && !args.Ret { + return nil, ErrInvalidMaxActive + } + token = KprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token) + case Uprobe: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) + // + // See Documentation/trace/uprobetracer.txt for more details. + if args.RetprobeMaxActive != 0 { + return nil, ErrInvalidMaxActive + } + token = UprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token) + } + _, err = f.WriteString(pe) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) + } + + if err != nil { + return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err) + } + + // Get the newly-created trace event's id. + tid, err := EventID(args.Group, eventName) + if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) { + // Kernels < 4.12 don't support maxactive and therefore auto generate + // group and event names from the symbol and offset. The symbol is used + // without any sanitization. + // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712 + event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset) + if err := removeEvent(args.Type, event); err != nil { + return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) + } + return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("get trace event id: %w", err) + } + + evt := &Event{args.Type, args.Group, eventName, tid} + runtime.SetFinalizer(evt, (*Event).Close) + return evt, nil +} + +// Close removes the event from tracefs. +// +// Returns os.ErrClosed if the event has already been closed before. +func (evt *Event) Close() error { + if evt.id == 0 { + return os.ErrClosed + } + + evt.id = 0 + runtime.SetFinalizer(evt, nil) + pe := fmt.Sprintf("%s/%s", evt.group, evt.name) + return removeEvent(evt.typ, pe) +} + +func removeEvent(typ ProbeType, pe string) error { + f, err := typ.eventsFile() + if err != nil { + return err + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + if _, err = f.WriteString("-:" + pe); err != nil { + return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err) + } + + return nil +} + +// ID returns the tracefs ID associated with the event. +func (evt *Event) ID() uint64 { + return evt.id +} + +// Group returns the tracefs group used by the event. +func (evt *Event) Group() string { + return evt.group +} + +// KprobeToken creates the SYM[+offs] token for the tracefs api. +func KprobeToken(args ProbeArgs) string { + po := args.Symbol + + if args.Offset != 0 { + po += fmt.Sprintf("+%#x", args.Offset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go new file mode 100644 index 000000000..87cb0a059 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT. + +package tracefs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Kprobe-0] + _ = x[Uprobe-1] +} + +const _ProbeType_name = "kprobeuprobe" + +var _ProbeType_index = [...]uint8{0, 6, 12} + +func (i ProbeType) String() string { + if i >= ProbeType(len(_ProbeType_index)-1) { + return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go new file mode 100644 index 000000000..994f31260 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go @@ -0,0 +1,16 @@ +package tracefs + +import "fmt" + +// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func UprobeToken(args ProbeArgs) string { + po := fmt.Sprintf("%s:%#x", args.Path, args.Offset) + + if args.RefCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.RefCtrOffset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go new file mode 100644 index 000000000..d168d36f1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go @@ -0,0 +1,11 @@ +// Package unix re-exports Linux specific parts of golang.org/x/sys/unix. +// +// It avoids breaking compilation on other OS by providing stubs as follows: +// - Invoking a function always returns an error. +// - Errnos have distinct, non-zero values. +// - Constants have distinct but meaningless values. +// - Types use the same names for members, but may or may not follow the +// Linux layout. +package unix + +// Note: please don't add any custom API to this package. Use internal/sys instead. diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 000000000..d725cfaa3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,216 @@ +//go:build linux + +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +const ( + ENOENT = linux.ENOENT + EEXIST = linux.EEXIST + EAGAIN = linux.EAGAIN + ENOSPC = linux.ENOSPC + EINVAL = linux.EINVAL + EPOLLIN = linux.EPOLLIN + EINTR = linux.EINTR + EPERM = linux.EPERM + ESRCH = linux.ESRCH + ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG + EFAULT = linux.EFAULT + EACCES = linux.EACCES + EILSEQ = linux.EILSEQ + EOPNOTSUPP = linux.EOPNOTSUPP + ESTALE = linux.ESTALE +) + +const ( + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + EM_NONE = linux.EM_NONE + EM_BPF = linux.EM_BPF + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP +) + +type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t +type Rlimit = linux.Rlimit +type Signal = linux.Signal +type Sigset_t = linux.Sigset_t +type PerfEventMmapPage = linux.PerfEventMmapPage +type EpollEvent = linux.EpollEvent +type PerfEventAttr = linux.PerfEventAttr +type Utsname = linux.Utsname +type CPUSet = linux.CPUSet + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return linux.PthreadSigmask(how, set, oldset) +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +func IoctlSetInt(fd int, req uint, value int) error { + return linux.IoctlSetInt(fd, req, value) +} + +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +func Close(fd int) (err error) { + return linux.Close(fd) +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} + +func Uname(buf *Utsname) (err error) { + return linux.Uname(buf) +} + +func Getpid() int { + return linux.Getpid() +} + +func Gettid() int { + return linux.Gettid() +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return linux.Tgkill(tgid, tid, sig) +} + +func BytePtrFromString(s string) (*byte, error) { + return linux.BytePtrFromString(s) +} + +func ByteSliceToString(s []byte) string { + return linux.ByteSliceToString(s) +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} + +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) +} + +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) +} + +func SetsockoptInt(fd, level, opt, value int) error { + return linux.SetsockoptInt(fd, level, opt, value) +} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return linux.SchedSetaffinity(pid, set) +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return linux.SchedGetaffinity(pid, set) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 000000000..3ff896271 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,311 @@ +//go:build !linux + +package unix + +import ( + "fmt" + "runtime" + "syscall" +) + +var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + +// Errnos are distinct and non-zero. +const ( + ENOENT syscall.Errno = iota + 1 + EEXIST + EAGAIN + ENOSPC + EINVAL + EINTR + EPERM + ESRCH + ENODEV + EBADF + E2BIG + EFAULT + EACCES + EILSEQ + EOPNOTSUPP + ESTALE +) + +// Constants are distinct to avoid breaking switch statements. +const ( + BPF_F_NO_PREALLOC = iota + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE + BPF_F_MMAPABLE + BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN + BPF_F_XDP_HAS_FRAGS + BPF_OBJ_NAME_LEN + BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ + SYS_BPF + F_DUPFD_CLOEXEC + EPOLLIN + EPOLL_CTL_ADD + EPOLL_CLOEXEC + O_CLOEXEC + O_NONBLOCK + PROT_NONE + PROT_READ + PROT_WRITE + MAP_ANON + MAP_SHARED + MAP_PRIVATE + PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF + PerfBitWatermark + PerfBitWriteBackward + PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY + RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME + PERF_RECORD_LOST + PERF_RECORD_SAMPLE + AT_FDCWD + RENAME_NOREPLACE + SO_ATTACH_BPF + SO_DETACH_BPF + SOL_SOCKET + SIGPROF + SIG_BLOCK + SIG_UNBLOCK + EM_NONE + EM_BPF + BPF_FS_MAGIC + TRACEFS_MAGIC + DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP + BPF_F_LOCK +) + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Signal int + +type Sigset_t struct { + Val [4]uint64 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return 0, 0, syscall.ENOTSUP +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return errNonLinux +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux +} + +func IoctlSetInt(fd int, req uint, value int) error { + return errNonLinux +} + +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux +} + +func Close(fd int) (err error) { + return errNonLinux +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux +} + +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux +} + +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux +} + +func Munmap(b []byte) (err error) { + return errNonLinux +} + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux +} + +type Utsname struct { + Release [65]byte + Version [65]byte +} + +func Uname(buf *Utsname) (err error) { + return errNonLinux +} + +func Getpid() int { + return -1 +} + +func Gettid() int { + return -1 +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return errNonLinux +} + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return errNonLinux +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} + +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { + return errNonLinux +} + +func SetsockoptInt(fd, level, opt, value int) error { + return errNonLinux +} + +type CPUSet struct{} + +func (*CPUSet) Set(int) {} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go new file mode 100644 index 000000000..104927855 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/vdso.go @@ -0,0 +1,143 @@ +package internal + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + av, err := newAuxvRuntimeReader() + if err != nil { + return 0, err + } + + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + for { + tag, value, err := r.ReadAuxvPair() + if err != nil { + return 0, err + } + + switch tag { + case _AT_SYSINFO_EHDR: + if value != 0 { + return uintptr(value), nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, Align(n.NameSize, 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go new file mode 100644 index 000000000..acd4650af --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -0,0 +1,107 @@ +package internal + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal/unix" +) + +const ( + // Version constant used in ELF binaries indicating that the loader needs to + // substitute the eBPF program's version with the value of the kernel's + // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf + // and RedSift. + MagicKernelVersion = 0xFFFFFFFE +) + +// A Version in the form Major.Minor.Patch. +type Version [3]uint16 + +// NewVersion creates a version from a string like "Major.Minor.Patch". +// +// Patch is optional. +func NewVersion(ver string) (Version, error) { + var major, minor, patch uint16 + n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) + if n < 2 { + return Version{}, fmt.Errorf("invalid version: %s", ver) + } + return Version{major, minor, patch}, nil +} + +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + +func (v Version) String() string { + if v[2] == 0 { + return fmt.Sprintf("v%d.%d", v[0], v[1]) + } + return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) +} + +// Less returns true if the version is less than another version. +func (v Version) Less(other Version) bool { + for i, a := range v { + if a == other[i] { + continue + } + return a < other[i] + } + return false +} + +// Unspecified returns true if the version is all zero. +func (v Version) Unspecified() bool { + return v[0] == 0 && v[1] == 0 && v[2] == 0 +} + +// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. +// It represents the kernel version and patch level as a single value. +func (v Version) Kernel() uint32 { + + // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid + // overflowing into PATCHLEVEL. + // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). + s := v[2] + if s > 255 { + s = 255 + } + + // Truncate members to uint8 to prevent them from spilling over into + // each other when overflowing 8 bits. + return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) +} + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(func() (Version, error) { + return detectKernelVersion() +}) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (Version, error) { + vc, err := vdsoVersion() + if err != nil { + return Version{}, err + } + return NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/vendor/github.com/cilium/ebpf/link/anchor.go b/vendor/github.com/cilium/ebpf/link/anchor.go new file mode 100644 index 000000000..1a3b5f768 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/anchor.go @@ -0,0 +1,137 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const anchorFlags = sys.BPF_F_REPLACE | + sys.BPF_F_BEFORE | + sys.BPF_F_AFTER | + sys.BPF_F_ID | + sys.BPF_F_LINK_MPROG + +// Anchor is a reference to a link or program. +// +// It is used to describe where an attachment or detachment should take place +// for link types which support multiple attachment. +type Anchor interface { + // anchor returns an fd or ID and a set of flags. + // + // By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG + // changes this to refer to a link instead. + // + // BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program + // is attached. The default behaviour if none of these flags is specified + // matches BPF_F_AFTER. + anchor() (fdOrID, flags uint32, _ error) +} + +type firstAnchor struct{} + +func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_BEFORE, nil +} + +// Head is the position before all other programs or links. +func Head() Anchor { + return firstAnchor{} +} + +type lastAnchor struct{} + +func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_AFTER, nil +} + +// Tail is the position after all other programs or links. +func Tail() Anchor { + return lastAnchor{} +} + +// Before is the position just in front of target. +func BeforeLink(target Link) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLink(target Link) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +// Before is the position just in front of target. +func BeforeProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +type anchor struct { + target any + position uint32 +} + +func (ap anchor) anchor() (fdOrID, flags uint32, _ error) { + var typeFlag uint32 + switch target := ap.target.(type) { + case *ebpf.Program: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = 0 + case ebpf.ProgramID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_ID + case interface{ FD() int }: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = sys.BPF_F_LINK_MPROG + case ID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID + default: + return 0, 0, fmt.Errorf("invalid target %T", ap.target) + } + + return fdOrID, ap.position | typeFlag, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go new file mode 100644 index 000000000..f17d34f03 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -0,0 +1,208 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type cgroupAttachFlags uint32 + +const ( + // Allow programs attached to sub-cgroups to override the verdict of this + // program. + flagAllowOverride cgroupAttachFlags = 1 << iota + // Allow attaching multiple programs to the cgroup. Only works if the cgroup + // has zero or more programs attached using the Multi flag. Implies override. + flagAllowMulti + // Set automatically by progAttachCgroup.Update(). Used for updating a + // specific given program attached in multi-mode. + flagReplace +) + +type CgroupOptions struct { + // Path to a cgroupv2 folder. + Path string + // One of the AttachCgroup* constants + Attach ebpf.AttachType + // Program must be of type CGroup*, and the attach type must match Attach. + Program *ebpf.Program +} + +// AttachCgroup links a BPF program to a cgroup. +// +// If the running kernel doesn't support bpf_link, attempts to emulate its +// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not +// available, the returned [Link] will not support pinning to bpffs. +// +// If you need more control over attachment flags or the attachment mechanism +// used, look at [RawAttachProgram] and [AttachRawLink] instead. +func AttachCgroup(opts CgroupOptions) (cg Link, err error) { + cgroup, err := os.Open(opts.Path) + if err != nil { + return nil, fmt.Errorf("can't open cgroup: %s", err) + } + defer func() { + if _, ok := cg.(*progAttachCgroup); ok { + // Skip closing the cgroup handle if we return a valid progAttachCgroup, + // where the handle is retained to implement Update(). + return + } + cgroup.Close() + }() + + cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program) + if err == nil { + return cg, nil + } + + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti) + } + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride) + } + if err != nil { + return nil, err + } + + return cg, nil +} + +type progAttachCgroup struct { + cgroup *os.File + current *ebpf.Program + attachType ebpf.AttachType + flags cgroupAttachFlags +} + +var _ Link = (*progAttachCgroup)(nil) + +func (cg *progAttachCgroup) isLink() {} + +// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH. +// cgroup and prog are retained by [progAttachCgroup]. +func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { + if flags&flagAllowMulti > 0 { + if err := haveProgAttachReplace(); err != nil { + return nil, fmt.Errorf("can't support multiple programs: %w", err) + } + } + + // Use a program handle that cannot be closed by the caller. + clone, err := prog.Clone() + if err != nil { + return nil, err + } + + err = RawAttachProgram(RawAttachProgramOptions{ + Target: int(cgroup.Fd()), + Program: clone, + Flags: uint32(flags), + Attach: attach, + }) + if err != nil { + clone.Close() + return nil, fmt.Errorf("cgroup: %w", err) + } + + return &progAttachCgroup{cgroup, clone, attach, flags}, nil +} + +func (cg *progAttachCgroup) Close() error { + defer cg.cgroup.Close() + defer cg.current.Close() + + err := RawDetachProgram(RawDetachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: cg.current, + Attach: cg.attachType, + }) + if err != nil { + return fmt.Errorf("close cgroup: %s", err) + } + return nil +} + +func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { + new, err := prog.Clone() + if err != nil { + return err + } + + args := RawAttachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: prog, + Attach: cg.attachType, + Flags: uint32(cg.flags), + } + + if cg.flags&flagAllowMulti > 0 { + // Atomically replacing multiple programs requires at least + // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf + // program in MULTI mode") + args.Anchor = ReplaceProgram(cg.current) + } + + if err := RawAttachProgram(args); err != nil { + new.Close() + return fmt.Errorf("can't update cgroup: %s", err) + } + + cg.current.Close() + cg.current = new + return nil +} + +func (cg *progAttachCgroup) Pin(string) error { + return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Unpin() error { + return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + +type linkCgroup struct { + RawLink +} + +var _ Link = (*linkCgroup)(nil) + +// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE. +func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { + link, err := AttachRawLink(RawLinkOptions{ + Target: int(cgroup.Fd()), + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &linkCgroup{*link}, err +} + +func (cg *linkCgroup) Info() (*Info, error) { + var info sys.CgroupLinkInfo + if err := sys.ObjInfo(cg.fd, &info); err != nil { + return nil, fmt.Errorf("cgroup link info: %s", err) + } + extra := &CgroupInfo{ + CgroupId: info.CgroupId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go new file mode 100644 index 000000000..2bde35ed7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/doc.go @@ -0,0 +1,2 @@ +// Package link allows attaching eBPF programs to various kernel hooks. +package link diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go new file mode 100644 index 000000000..0a39faef8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/iter.go @@ -0,0 +1,84 @@ +package link + +import ( + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type IterOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceIter. The kind of iterator to attach to is + // determined at load time via the AttachTo field. + // + // AttachTo requires the kernel to include BTF of itself, + // and it to be compiled with a recent pahole (>= 1.16). + Program *ebpf.Program + + // Map specifies the target map for bpf_map_elem and sockmap iterators. + // It may be nil. + Map *ebpf.Map +} + +// AttachIter attaches a BPF seq_file iterator. +func AttachIter(opts IterOptions) (*Iter, error) { + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var info bpfIterLinkInfoMap + if opts.Map != nil { + mapFd := opts.Map.FD() + if mapFd < 0 { + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) + } + info.map_fd = uint32(mapFd) + } + + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), + } + + fd, err := sys.LinkCreateIter(&attr) + if err != nil { + if haveFeatErr := haveBPFLink(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("can't link iterator: %w", err) + } + + return &Iter{RawLink{fd, ""}}, err +} + +// Iter represents an attached bpf_iter. +type Iter struct { + RawLink +} + +// Open creates a new instance of the iterator. +// +// Reading from the returned reader triggers the BPF program. +func (it *Iter) Open() (io.ReadCloser, error) { + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), + } + + fd, err := sys.IterCreate(attr) + if err != nil { + return nil, fmt.Errorf("can't create iterator: %w", err) + } + + return fd.File("bpf_iter"), nil +} + +// union bpf_iter_link_info.map +type bpfIterLinkInfoMap struct { + map_fd uint32 +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go new file mode 100644 index 000000000..fe3f17c37 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -0,0 +1,365 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 + // Increase the maximum number of concurrent invocations of a kretprobe. + // Required when tracing some long running functions in the kernel. + // + // Deprecated: this setting forces the use of an outdated kernel API and is not portable + // across kernel versions. + RetprobeMaxActive int + // Prefix used for the event name if the kprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (ko *KprobeOptions) cookie() uint64 { + if ko == nil { + return 0 + } + return ko.Cookie +} + +// Kprobe attaches the given eBPF program to a perf event that fires when the +// given kernel symbol starts executing. See /proc/kallsyms for available +// symbols. For example, printk(): +// +// kp, err := Kprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// The returned Link may implement [PerfEvent]. +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// Kretprobe attaches the given eBPF program to a perf event that fires right +// before the given kernel symbol exits, with the function stack left intact. +// See /proc/kallsyms for available symbols. For example, printk(): +// +// kp, err := Kretprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol +// incorrectly returns unix.EINVAL instead of os.ErrNotExist. +// +// The returned Link may implement [PerfEvent]. +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true +} + +// kprobe opens a perf event on the given symbol and attaches prog to it. +// If ret is true, create a kretprobe. +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { + if symbol == "" { + return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Pid: perfAllThreads, + Symbol: symbol, + Ret: ret, + } + + if opts != nil { + args.RetprobeMaxActive = opts.RetprobeMaxActive + args.Cookie = opts.Cookie + args.Offset = opts.Offset + args.Group = opts.TraceFSPrefix + } + + // Use kprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := internal.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = pmuProbe(args) + } + } + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err) + } + + // Use tracefs if kprobe PMU is missing. + args.Symbol = symbol + tp, err = tracefsProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := internal.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = tracefsProbe(args) + } + } + if err != nil { + return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err) + } + + return tp, nil +} + +// pmuProbe opens a perf event based on a Performance Monitoring Unit. +// +// Requires at least a 4.17 kernel. +// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" +// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" +// +// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU +func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + // Getting the PMU type will fail if the kernel doesn't support + // the perf_[k,u]probe PMU. + eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type") + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported) + } + if err != nil { + return nil, err + } + + // Use tracefs if we want to set kretprobe's retprobeMaxActive. + if args.RetprobeMaxActive != 0 { + return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported) + } + + var config uint64 + if args.Ret { + bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe") + if err != nil { + return nil, err + } + config |= 1 << bit + } + + var ( + attr unix.PerfEventAttr + sp unsafe.Pointer + token string + ) + switch args.Type { + case tracefs.Kprobe: + // Create a pointer to a NUL-terminated string for the kernel. + sp, err = unsafeStringPtr(args.Symbol) + if err != nil { + return nil, err + } + + token = tracefs.KprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.Offset, // Kernel symbol offset + Config: config, // Retprobe flag + } + case tracefs.Uprobe: + sp, err = unsafeStringPtr(args.Path) + if err != nil { + return nil, err + } + + if args.RefCtrOffset != 0 { + config |= args.RefCtrOffset << uprobeRefCtrOffsetShift + } + + token = tracefs.UprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. The Size field controls the + // size of the internal buffer the kernel allocates for reading the + // perf_event_attr argument from userspace. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Uprobe path + Ext2: args.Offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag + } + } + + cpu := 0 + if args.Pid != perfAllThreads { + cpu = -1 + } + rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") { + return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported) + } + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, unix.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err) + } + + if err != nil { + return nil, fmt.Errorf("token %s: opening perf event: %w", token, err) + } + + // Ensure the string pointer is not collected before PerfEventOpen returns. + runtime.KeepAlive(sp) + + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + + // Kernel has perf_[k,u]probe PMU available, initialize perf event. + return newPerfEvent(fd, nil), nil +} + +// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. +// A new trace event group name is generated on every call to support creating +// multiple trace events for the same kernel or userspace symbol. +// Path and offset are only set in the case of uprobe(s) and are used to set +// the executable/library path on the filesystem and the offset where the probe is inserted. +// A perf event is then opened on the newly-created trace event and returned to the caller. +func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + groupPrefix := "ebpf" + if args.Group != "" { + groupPrefix = args.Group + } + + // Generate a random string for each trace event we attempt to create. + // This value is used as the 'group' token in tracefs to allow creating + // multiple kprobe trace events with the same name. + group, err := tracefs.RandomGroup(groupPrefix) + if err != nil { + return nil, fmt.Errorf("randomizing group name: %w", err) + } + args.Group = group + + // Create the [k,u]probe trace event using tracefs. + evt, err := tracefs.NewEvent(args) + if err != nil { + return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) + } + + // Kprobes are ephemeral tracepoints and share the same perf event type. + fd, err := openTracepointPerfEvent(evt.ID(), args.Pid) + if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = evt.Close() + return nil, err + } + + return newPerfEvent(fd, evt), nil +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go new file mode 100644 index 000000000..f7a8291f9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -0,0 +1,191 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeMultiOptions defines additional parameters that will be used +// when opening a KprobeMulti Link. +type KprobeMultiOptions struct { + // Symbols takes a list of kernel symbol names to attach an ebpf program to. + // + // Mutually exclusive with Addresses. + Symbols []string + + // Addresses takes a list of kernel symbol addresses in case they can not + // be referred to by name. + // + // Note that only start addresses can be specified, since the fprobe API + // limits the attach point to the function entry or return. + // + // Mutually exclusive with Symbols. + Addresses []uintptr + + // Cookies specifies arbitrary values that can be fetched from an eBPF + // program via `bpf_get_attach_cookie()`. + // + // If set, its length should be equal to the length of Symbols or Addresses. + // Each Cookie is assigned to the Symbol or Address specified at the + // corresponding slice index. + Cookies []uint64 +} + +// KprobeMulti attaches the given eBPF program to the entry point of a given set +// of kernel symbols. +// +// The difference with Kprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, 0) +} + +// KretprobeMulti attaches the given eBPF program to the return point of a given +// set of kernel symbols. +// +// The difference with Kretprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN) +} + +func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + syms := uint32(len(opts.Symbols)) + addrs := uint32(len(opts.Addresses)) + cookies := uint32(len(opts.Cookies)) + + if syms == 0 && addrs == 0 { + return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput) + } + if syms != 0 && addrs != 0 { + return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput) + } + if cookies > 0 && cookies != syms && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + KprobeMultiFlags: flags, + } + + switch { + case syms != 0: + attr.Count = syms + attr.Syms = sys.NewStringSlicePointer(opts.Symbols) + + case addrs != 0: + attr.Count = addrs + attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0])) + } + + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateKprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &kprobeMultiLink{RawLink{fd, ""}}, nil +} + +type kprobeMultiLink struct { + RawLink +} + +var _ Link = (*kprobeMultiLink)(nil) + +func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported) +} + +func (kml *kprobeMultiLink) Info() (*Info, error) { + var info sys.KprobeMultiLinkInfo + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("kprobe multi link info: %s", err) + } + extra := &KprobeMultiInfo{ + count: info.Count, + flags: info.Flags, + missed: info.Missed, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kpm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}) diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go new file mode 100644 index 000000000..9c34616c9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -0,0 +1,530 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +var ErrNotSupported = internal.ErrNotSupported + +// Link represents a Program attached to a BPF hook. +type Link interface { + // Replace the current program with a new program. + // + // Passing a nil program is an error. May return an error wrapping ErrNotSupported. + Update(*ebpf.Program) error + + // Persist a link by pinning it into a bpffs. + // + // May return an error wrapping ErrNotSupported. + Pin(string) error + + // Undo a previous call to Pin. + // + // May return an error wrapping ErrNotSupported. + Unpin() error + + // Close frees resources. + // + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is + // not called. + Close() error + + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + + // Prevent external users from implementing this interface. + isLink() +} + +// NewLinkFromFD creates a link from a raw fd. +// +// Deprecated: use [NewFromFD] instead. +func NewLinkFromFD(fd int) (Link, error) { + return NewFromFD(fd) +} + +// NewFromFD creates a link from a raw fd. +// +// You should not use fd after calling this function. +func NewFromFD(fd int) (Link, error) { + sysFD, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return wrapRawLink(&RawLink{fd: sysFD}) +} + +// NewFromID returns the link associated with the given id. +// +// Returns ErrNotExist if there is no link with the given id. +func NewFromID(id ID) (Link, error) { + getFdAttr := &sys.LinkGetFdByIdAttr{Id: id} + fd, err := sys.LinkGetFdById(getFdAttr) + if err != nil { + return nil, fmt.Errorf("get link fd from ID %d: %w", id, err) + } + + return wrapRawLink(&RawLink{fd, ""}) +} + +// LoadPinnedLink loads a link that was persisted into a bpffs. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (_ Link, err error) { + defer func() { + if err != nil { + raw.Close() + } + }() + + info, err := raw.Info() + if err != nil { + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + case KprobeMultiType: + return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil + case PerfEventType: + return &perfEventLink{*raw, nil}, nil + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil + case NetkitType: + return &netkitLink{*raw}, nil + case XDPType: + return &xdpLink{*raw}, nil + default: + return raw, nil + } +} + +// ID uniquely identifies a BPF link. +type ID = sys.LinkID + +// RawLinkOptions control the creation of a raw link. +type RawLinkOptions struct { + // File descriptor to attach to. This differs for each attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 +} + +// Info contains metadata on a link. +type Info struct { + Type Type + ID ID + Program ebpf.ProgramID + extra interface{} +} + +type TracingInfo struct { + AttachType sys.AttachType + TargetObjId uint32 + TargetBtfId sys.TypeID +} + +type CgroupInfo struct { + CgroupId uint64 + AttachType sys.AttachType + _ [4]byte +} + +type NetNsInfo struct { + NetnsIno uint32 + AttachType sys.AttachType +} + +type TCXInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type XDPInfo struct { + Ifindex uint32 +} + +type NetfilterInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + +type NetkitInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type KprobeMultiInfo struct { + count uint32 + flags uint32 + missed uint64 +} + +// AddressCount is the number of addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { + return kpm.count, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { + return kpm.flags, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { + return kpm.missed, kpm.count > 0 +} + +type PerfEventInfo struct { + Type sys.PerfEventType + extra interface{} +} + +func (r *PerfEventInfo) Kprobe() *KprobeInfo { + e, _ := r.extra.(*KprobeInfo) + return e +} + +type KprobeInfo struct { + address uint64 + missed uint64 +} + +func (kp *KprobeInfo) Address() (uint64, bool) { + return kp.address, kp.address > 0 +} + +func (kp *KprobeInfo) Missed() (uint64, bool) { + return kp.missed, kp.address > 0 +} + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// XDP returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e +} + +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + +// Netkit returns netkit type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netkit() *NetkitInfo { + e, _ := r.extra.(*NetkitInfo) + return e +} + +// KprobeMulti returns kprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) KprobeMulti() *KprobeMultiInfo { + e, _ := r.extra.(*KprobeMultiInfo) + return e +} + +// PerfEvent returns perf-event type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) PerfEvent() *PerfEventInfo { + e, _ := r.extra.(*PerfEventInfo) + return e +} + +// RawLink is the low-level API to bpf_link. +// +// You should consider using the higher level interfaces in this +// package instead. +type RawLink struct { + fd *sys.FD + pinnedPath string +} + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: opts.BTF, + Flags: opts.Flags, + } + fd, err := sys.LinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("create link: %w", err) + } + + return &RawLink{fd, ""}, nil +} + +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, fmt.Errorf("load pinned link: %w", err) + } + + return &RawLink{fd, fileName}, nil +} + +func (l *RawLink) isLink() {} + +// FD returns the raw file descriptor. +func (l *RawLink) FD() int { + return l.fd.Int() +} + +// Close breaks the link. +// +// Use Pin if you want to make the link persistent. +func (l *RawLink) Close() error { + return l.fd.Close() +} + +// Pin persists a link past the lifetime of the process. +// +// Calling Close on a pinned Link will not break the link +// until the pin is removed. +func (l *RawLink) Pin(fileName string) error { + if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { + return err + } + l.pinnedPath = fileName + return nil +} + +// Unpin implements the Link interface. +func (l *RawLink) Unpin() error { + if err := internal.Unpin(l.pinnedPath); err != nil { + return err + } + l.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Link has a non-empty pinned path. +func (l *RawLink) IsPinned() bool { + return l.pinnedPath != "" +} + +// Update implements the Link interface. +func (l *RawLink) Update(new *ebpf.Program) error { + return l.UpdateArgs(RawLinkUpdateOptions{ + New: new, + }) +} + +// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs. +type RawLinkUpdateOptions struct { + New *ebpf.Program + Old *ebpf.Program + Flags uint32 +} + +// UpdateArgs updates a link based on args. +func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { + newFd := opts.New.FD() + if newFd < 0 { + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var oldFd int + if opts.Old != nil { + oldFd = opts.Old.FD() + if oldFd < 0 { + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) + } + } + + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, + } + return sys.LinkUpdate(&attr) +} + +// Info returns metadata about the link. +// +// Linktype specific metadata is not included and can be retrieved +// via the linktype specific Info() method. +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { + return nil, fmt.Errorf("link info: %s", err) + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + nil, + }, nil +} + +// Iterator allows iterating over links attached into the kernel. +type Iterator struct { + // The ID of the current link. Only valid after a call to Next + ID ID + // The current link. Only valid until a call to Next. + // See Take if you want to retain the link. + Link Link + err error +} + +// Next retrieves the next link. +// +// Returns true if another link was found. Call [Iterator.Err] after the function returns false. +func (it *Iterator) Next() bool { + id := it.ID + for { + getIdAttr := &sys.LinkGetNextIdAttr{Id: id} + err := sys.LinkGetNextId(getIdAttr) + if errors.Is(err, os.ErrNotExist) { + // There are no more links. + break + } else if err != nil { + it.err = fmt.Errorf("get next link ID: %w", err) + break + } + + id = getIdAttr.NextId + l, err := NewFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Couldn't load the link fast enough. Try next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("get link for ID %d: %w", id, err) + break + } + + if it.Link != nil { + it.Link.Close() + } + it.ID, it.Link = id, l + return true + } + + // No more links or we encountered an error. + if it.Link != nil { + it.Link.Close() + } + it.Link = nil + return false +} + +// Take the ownership of the current link. +// +// It's the callers responsibility to close the link. +func (it *Iterator) Take() Link { + l := it.Link + it.Link = nil + return l +} + +// Err returns an error if iteration failed for some reason. +func (it *Iterator) Err() error { + return it.err +} + +func (it *Iterator) Close() { + if it.Link != nil { + it.Link.Close() + } +} diff --git a/vendor/github.com/cilium/ebpf/link/netfilter.go b/vendor/github.com/cilium/ebpf/link/netfilter.go new file mode 100644 index 000000000..34be39085 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -0,0 +1,90 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation + +type NetfilterAttachFlags uint32 + +type NetfilterOptions struct { + // Program must be a netfilter BPF program. + Program *ebpf.Program + // The protocol family. + ProtocolFamily uint32 + // The number of the hook you are interested in. + HookNumber uint32 + // Priority within hook + Priority int32 + // Extra link flags + Flags uint32 + // Netfilter flags + NetfilterFlags NetfilterAttachFlags +} + +type netfilterLink struct { + RawLink +} + +// AttachNetfilter links a netfilter BPF program to a netfilter hook. +func AttachNetfilter(opts NetfilterOptions) (Link, error) { + if opts.Program == nil { + return nil, fmt.Errorf("netfilter program is nil") + } + + if t := opts.Program.Type(); t != ebpf.Netfilter { + return nil, fmt.Errorf("invalid program type %s, expected netfilter", t) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateNetfilterAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.BPF_NETFILTER, + Flags: opts.Flags, + Pf: uint32(opts.ProtocolFamily), + Hooknum: uint32(opts.HookNumber), + Priority: opts.Priority, + NetfilterFlags: uint32(opts.NetfilterFlags), + } + + fd, err := sys.LinkCreateNetfilter(&attr) + if err != nil { + return nil, fmt.Errorf("attach netfilter link: %w", err) + } + + return &netfilterLink{RawLink{fd, ""}}, nil +} + +func (*netfilterLink) Update(new *ebpf.Program) error { + return fmt.Errorf("netfilter update: %w", ErrNotSupported) +} + +func (nf *netfilterLink) Info() (*Info, error) { + var info sys.NetfilterLinkInfo + if err := sys.ObjInfo(nf.fd, &info); err != nil { + return nil, fmt.Errorf("netfilter link info: %s", err) + } + extra := &NetfilterInfo{ + Pf: info.Pf, + Hooknum: info.Hooknum, + Priority: info.Priority, + Flags: info.Flags, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var _ Link = (*netfilterLink)(nil) diff --git a/vendor/github.com/cilium/ebpf/link/netkit.go b/vendor/github.com/cilium/ebpf/link/netkit.go new file mode 100644 index 000000000..5eee3b023 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netkit.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type NetkitOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachNetkit* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachNetkit(opts NetkitOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateNetkitAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateNetkit(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveNetkit(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + return &netkitLink{RawLink{fd, ""}}, nil +} + +type netkitLink struct { + RawLink +} + +var _ Link = (*netkitLink)(nil) + +func (netkit *netkitLink) Info() (*Info, error) { + var info sys.NetkitLinkInfo + if err := sys.ObjInfo(netkit.fd, &info); err != nil { + return nil, fmt.Errorf("netkit link info: %s", err) + } + extra := &NetkitInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go new file mode 100644 index 000000000..b1edd340a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -0,0 +1,55 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// NetNsLink is a program attached to a network namespace. +type NetNsLink struct { + RawLink +} + +// AttachNetNs attaches a program to a network namespace. +func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { + var attach ebpf.AttachType + switch t := prog.Type(); t { + case ebpf.FlowDissector: + attach = ebpf.AttachFlowDissector + case ebpf.SkLookup: + attach = ebpf.AttachSkLookup + default: + return nil, fmt.Errorf("can't attach %v to network namespace", t) + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: ns, + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &NetNsLink{*link}, nil +} + +func (ns *NetNsLink) Info() (*Info, error) { + var info sys.NetNsLinkInfo + if err := sys.ObjInfo(ns.fd, &info); err != nil { + return nil, fmt.Errorf("netns link info: %s", err) + } + extra := &NetNsInfo{ + NetnsIno: info.NetnsIno, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go new file mode 100644 index 000000000..1d8feb58c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -0,0 +1,332 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// Getting the terminology right is usually the hardest part. For posterity and +// for staying sane during implementation: +// +// - trace event: Representation of a kernel runtime hook. Filesystem entries +// under /events. Can be tracepoints (static), kprobes or uprobes. +// Can be instantiated into perf events (see below). +// - tracepoint: A predetermined hook point in the kernel. Exposed as trace +// events in (sub)directories under /events. Cannot be closed or +// removed, they are static. +// - k(ret)probe: Ephemeral trace events based on entry or exit points of +// exported kernel symbols. kprobe-based (tracefs) trace events can be +// created system-wide by writing to the /kprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries +// and offsets. uprobe-based (tracefs) trace events can be +// created system-wide by writing to the /uprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - perf event: An object instantiated based on an existing trace event or +// kernel symbol. Referred to by fd in userspace. +// Exactly one eBPF program can be attached to a perf event. Multiple perf +// events can be created from a single trace event. Closing a perf event +// stops any further invocations of the attached eBPF program. + +var ( + errInvalidInput = tracefs.ErrInvalidInput +) + +const ( + perfAllThreads = -1 +) + +// A perfEvent represents a perf event kernel object. Exactly one eBPF program +// can be attached to it. It is created based on a tracefs trace event or a +// Performance Monitoring Unit (PMU). +type perfEvent struct { + // Trace event backing this perfEvent. May be nil. + tracefsEvent *tracefs.Event + + // This is the perf event FD. + fd *sys.FD +} + +func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent { + pe := &perfEvent{event, fd} + // Both event and fd have their own finalizer, but we want to + // guarantee that they are closed in a certain order. + runtime.SetFinalizer(pe, (*perfEvent).Close) + return pe +} + +func (pe *perfEvent) Close() error { + runtime.SetFinalizer(pe, nil) + + if err := pe.fd.Close(); err != nil { + return fmt.Errorf("closing perf event fd: %w", err) + } + + if pe.tracefsEvent != nil { + return pe.tracefsEvent.Close() + } + + return nil +} + +// PerfEvent is implemented by some Link types which use a perf event under +// the hood. +type PerfEvent interface { + // PerfEvent returns a file for the underlying perf event. + // + // It is the callers responsibility to close the returned file. + // + // Making changes to the associated perf event lead to + // undefined behaviour. + PerfEvent() (*os.File, error) +} + +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +func (pl *perfEventLink) Close() error { + if err := pl.fd.Close(); err != nil { + return fmt.Errorf("perf link close: %w", err) + } + + // when created from pinned link + if pl.pe == nil { + return nil + } + + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event close: %w", err) + } + return nil +} + +func (pl *perfEventLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventLink)(nil) + +func (pl *perfEventLink) PerfEvent() (*os.File, error) { + // when created from pinned link + if pl.pe == nil { + return nil, ErrNotSupported + } + + fd, err := pl.pe.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +func (pl *perfEventLink) Info() (*Info, error) { + var info sys.PerfEventLinkInfo + if err := sys.ObjInfo(pl.fd, &info); err != nil { + return nil, fmt.Errorf("perf event link info: %s", err) + } + + var extra2 interface{} + switch info.PerfEventType { + case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE: + var kprobeInfo sys.KprobeLinkInfo + if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil { + return nil, fmt.Errorf("kprobe link info: %s", err) + } + extra2 = &KprobeInfo{ + address: kprobeInfo.Addr, + missed: kprobeInfo.Missed, + } + } + + extra := &PerfEventInfo{ + Type: info.PerfEventType, + extra: extra2, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent +} + +func (pi *perfEventIoctl) isLink() {} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pi *perfEventIoctl) Update(prog *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventIoctl)(nil) + +func (pi *perfEventIoctl) PerfEvent() (*os.File, error) { + fd, err := pi.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +// attach the given eBPF prog to the perf event stored in pe. +// pe must contain a valid perf event fd. +// prog's type must match the program type stored in pe. +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + if prog.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog, cookie) + } + + if cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } + + return attachPerfEventIoctl(pe, prog) +} + +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { + // Assign the eBPF program to the perf event. + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + if err != nil { + return nil, fmt.Errorf("setting perf event bpf program: %w", err) + } + + // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) + } + + return &perfEventIoctl{pe}, nil +} + +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: cookie, + }) + if err != nil { + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) + } + + return &perfEventLink{RawLink{fd: fd}, pe}, nil +} + +// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. +func unsafeStringPtr(str string) (unsafe.Pointer, error) { + p, err := unix.BytePtrFromString(str) + if err != nil { + return nil, err + } + return unsafe.Pointer(p), nil +} + +// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide +// [k,u]probes created by writing to /[k,u]probe_events are tracepoints +// behind the scenes, and can be attached to using these perf events. +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { + attr := unix.PerfEventAttr{ + Type: unix.PERF_TYPE_TRACEPOINT, + Config: tid, + Sample_type: unix.PERF_SAMPLE_RAW, + Sample: 1, + Wakeup: 1, + } + + cpu := 0 + if pid != perfAllThreads { + cpu = -1 + } + fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("opening tracepoint perf event: %w", err) + } + + return sys.NewFD(fd) +} + +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + return err + } + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go new file mode 100644 index 000000000..d8a2a15f9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -0,0 +1,107 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawAttachProgramOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional. + Flags uint32 + // Only attach if the internal revision matches the given value. + ExpectedRevision uint64 +} + +// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawAttachProgram(opts RawAttachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgAttachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("attach program: %w", err) + } + + if flags == sys.BPF_F_REPLACE { + // Ensure that replacing a program works on old kernels. + attr.ReplaceBpfFd = fdOrID + } else { + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + } + + if err := sys.ProgAttach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("attach program: %w", err) + } + runtime.KeepAlive(opts.Program) + + return nil +} + +type RawDetachProgramOptions RawAttachProgramOptions + +// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawDetachProgram(opts RawDetachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgDetachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("detach program: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + + if err := sys.ProgDetach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("can't detach program: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go new file mode 100644 index 000000000..fe534f8ef --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/query.go @@ -0,0 +1,111 @@ +package link + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// QueryOptions defines additional parameters when querying for programs. +type QueryOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Attach specifies the AttachType of the programs queried for + Attach ebpf.AttachType + // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE + QueryFlags uint32 +} + +// QueryResult describes which programs and links are active. +type QueryResult struct { + // List of attached programs. + Programs []AttachedProgram + + // Incremented by one every time the set of attached programs changes. + // May be zero if not supported by the [ebpf.AttachType]. + Revision uint64 +} + +// HaveLinkInfo returns true if the kernel supports querying link information +// for a particular [ebpf.AttachType]. +func (qr *QueryResult) HaveLinkInfo() bool { + return qr.Revision > 0 +} + +type AttachedProgram struct { + ID ebpf.ProgramID + linkID ID +} + +// LinkID returns the ID associated with the program. +// +// Returns 0, false if the kernel doesn't support retrieving the ID or if the +// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you +// need to tell the two apart. +func (ap *AttachedProgram) LinkID() (ID, bool) { + return ap.linkID, ap.linkID != 0 +} + +// QueryPrograms retrieves a list of programs for the given AttachType. +// +// Returns a slice of attached programs, which may be empty. +// revision counts how many times the set of attached programs has changed and +// may be zero if not supported by the [ebpf.AttachType]. +// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY +func QueryPrograms(opts QueryOptions) (*QueryResult, error) { + // query the number of programs to allocate correct slice size + attr := sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + } + err := sys.ProgQuery(&attr) + if err != nil { + if haveFeatErr := haveProgQuery(); haveFeatErr != nil { + return nil, fmt.Errorf("query programs: %w", haveFeatErr) + } + return nil, fmt.Errorf("query programs: %w", err) + } + if attr.Count == 0 { + return &QueryResult{Revision: attr.Revision}, nil + } + + // The minimum bpf_mprog revision is 1, so we can use the field to detect + // whether the attach type supports link ids. + haveLinkIDs := attr.Revision != 0 + + count := attr.Count + progIds := make([]ebpf.ProgramID, count) + attr = sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + Count: count, + ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])), + } + + var linkIds []ID + if haveLinkIDs { + linkIds = make([]ID, count) + attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0])) + } + + if err := sys.ProgQuery(&attr); err != nil { + return nil, fmt.Errorf("query programs: %w", err) + } + + // NB: attr.Count might have changed between the two syscalls. + var programs []AttachedProgram + for i, id := range progIds[:attr.Count] { + ap := AttachedProgram{ID: id} + if haveLinkIDs { + ap.linkID = linkIds[i] + } + programs = append(programs, ap) + } + + return &QueryResult{programs, attr.Revision}, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go new file mode 100644 index 000000000..925e621cb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -0,0 +1,87 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawTracepointOptions struct { + // Tracepoint name. + Name string + // Program must be of type RawTracepoint* + Program *ebpf.Program +} + +// AttachRawTracepoint links a BPF program to a raw_tracepoint. +// +// Requires at least Linux 4.17. +func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable { + return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) + } + if opts.Program.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), + }) + if err != nil { + return nil, err + } + + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil +} + +type simpleRawTracepoint struct { + fd *sys.FD +} + +var _ Link = (*simpleRawTracepoint)(nil) + +func (frt *simpleRawTracepoint) isLink() {} + +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() +} + +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Unpin() error { + return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 000000000..84f0b656f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go new file mode 100644 index 000000000..d09b5acb0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -0,0 +1,200 @@ +package link + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Type is the kind of link. +type Type = sys.LinkType + +// Valid link types. +const ( + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT + KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER + NetkitType = sys.BPF_LINK_TYPE_NETKIT +) + +var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + + // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB, + // so being able to load the program is enough to infer that we + // have the syscall. + prog.Close() + return nil +}) + +var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error { + if err := haveProgAttach(); err != nil { + return err + } + + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + + // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. + // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't + // present. + attr := sys.ProgAttachAttr{ + // We rely on this being checked after attachFlags. + TargetFdOrIfindex: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), + } + + err = sys.ProgAttach(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error { + attr := sys.LinkCreateAttr{ + // This is a hopefully invalid file descriptor, which triggers EBADF. + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + _, err := sys.LinkCreate(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error { + attr := sys.ProgQueryAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect EBADF here + // as an indication that the feature is present. + TargetFdOrIfindex: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + + err := sys.ProgQuery(&attr) + + if errors.Is(err, unix.EBADF) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) + +var haveTCX = internal.NewFeatureTest("tcx", "6.6", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateTcxAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachTCXIngress), + } + + _, err = sys.LinkCreateTcx(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) + +var haveNetkit = internal.NewFeatureTest("netkit", "6.7", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateNetkitAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachNetkitPrimary), + } + + _, err = sys.LinkCreateNetkit(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}) diff --git a/vendor/github.com/cilium/ebpf/link/tcx.go b/vendor/github.com/cilium/ebpf/link/tcx.go new file mode 100644 index 000000000..ac045b71d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tcx.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type TCXOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachTCX* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachTCX(opts TCXOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateTcxAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateTcx(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveTCX(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + return &tcxLink{RawLink{fd, ""}}, nil +} + +type tcxLink struct { + RawLink +} + +var _ Link = (*tcxLink)(nil) + +func (tcx *tcxLink) Info() (*Info, error) { + var info sys.TcxLinkInfo + if err := sys.ObjInfo(tcx.fd, &info); err != nil { + return nil, fmt.Errorf("tcx link info: %s", err) + } + extra := &TCXInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go new file mode 100644 index 000000000..6fc78b982 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -0,0 +1,70 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/tracefs" +) + +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + +// Tracepoint attaches the given eBPF program to the tracepoint with the given +// group and name. See /sys/kernel/tracing/events to find available +// tracepoints. The top-level directory is the group, the event's subdirectory +// is the name. Example: +// +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is +// only possible as of kernel 4.14 (commit cf5f5ce). +// +// The returned Link may implement [PerfEvent]. +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { + if group == "" || name == "" { + return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.TracePoint { + return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) + } + + tid, err := tracefs.EventID(group, name) + if err != nil { + return nil, err + } + + fd, err := openTracepointPerfEvent(tid, perfAllThreads) + if err != nil { + return nil, err + } + + var cookie uint64 + if opts != nil { + cookie = opts.Cookie + } + + pe := newPerfEvent(fd, nil) + + lnk, err := attachPerfEvent(pe, prog, cookie) + if err != nil { + pe.Close() + return nil, err + } + + return lnk, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 000000000..9e570afc9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,218 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(new *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +func (f *tracing) Info() (*Info, error) { + var info sys.TracingLinkInfo + if err := sys.ObjInfo(f.fd, &info); err != nil { + return nil, fmt.Errorf("tracing link info: %s", err) + } + extra := &TracingInfo{ + TargetObjId: info.TargetObjId, + TargetBtfId: info.TargetBtfId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + btfHandle, err := targetProg.Handle() + if err != nil { + return nil, err + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return nil, err + } + + var function *btf.Func + if err := spec.TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program + // Program attach type. Can be one of: + // - AttachTraceFEntry + // - AttachTraceFExit + // - AttachModifyReturn + // - AttachTraceRawTp + // This field is optional. + AttachType ebpf.AttachType + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + var ( + fd *sys.FD + err error + ) + switch at { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp, + ebpf.AttachModifyReturn, ebpf.AttachLSMMac: + // Attach via BPF link + fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{ + ProgFd: uint32(program.FD()), + AttachType: sys.AttachType(at), + Cookie: cookie, + }) + if err == nil { + break + } + if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("create tracing link: %w", err) + } + fallthrough + case ebpf.AttachNone: + // Attach via RawTracepointOpen + if cookie > 0 { + return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported) + } + + fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("create raw tracepoint: %w", err) + } + default: + return nil, fmt.Errorf("invalid attach type: %s", at.String()) + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + return &tracing{raw}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + switch opts.AttachType { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn, + ebpf.AttachTraceRawTp, ebpf.AttachNone: + default: + return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String()) + } + + return attachBTFID(opts.Program, opts.AttachType, opts.Cookie) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie) +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go new file mode 100644 index 000000000..194d1d319 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -0,0 +1,335 @@ +package link + +import ( + "debug/elf" + "errors" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/tracefs" +) + +var ( + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + return nil + }) + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") +) + +// Executable defines an executable program on the filesystem. +type Executable struct { + // Path of the executable on the filesystem. + path string + // Parsed ELF and dynamic symbols' cachedAddresses. + cachedAddresses map[string]uint64 + // Keep track of symbol table lazy load. + cacheAddressesOnce sync.Once +} + +// UprobeOptions defines additional parameters that will be used +// when loading Uprobes. +type UprobeOptions struct { + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. + Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Prefix used for the event name if the uprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (uo *UprobeOptions) cookie() uint64 { + if uo == nil { + return 0 + } + return uo.Cookie +} + +// To open a new Executable, use: +// +// OpenExecutable("/bin/bash") +// +// The returned value can then be used to open Uprobe(s). +func OpenExecutable(path string) (*Executable, error) { + if path == "" { + return nil, fmt.Errorf("path cannot be empty") + } + + f, err := internal.OpenSafeELFFile(path) + if err != nil { + return nil, fmt.Errorf("parse ELF file: %w", err) + } + defer f.Close() + + if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") + } + + return &Executable{ + path: path, + cachedAddresses: make(map[string]uint64), + }, nil +} + +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + dynsyms, err := f.DynamicSymbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + syms = append(syms, dynsyms...) + + for _, s := range syms { + if elf.ST_TYPE(s.Info) != elf.STT_FUNC { + // Symbol not associated with a function or other executable code. + continue + } + + address := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + address = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.cachedAddresses[s.Name] = address + } + + return nil +} + +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil + } + + var err error + ex.cacheAddressesOnce.Do(func() { + var f *internal.SafeELFFile + f, err = internal.OpenSafeELFFile(ex.path) + if err != nil { + err = fmt.Errorf("parse ELF file: %w", err) + return + } + defer f.Close() + + err = ex.load(f) + }) + if err != nil { + return 0, fmt.Errorf("lazy load symbols: %w", err) + } + + address, ok := ex.cachedAddresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) + } + + return address + offset, nil +} + +// Uprobe attaches the given eBPF program to a perf event that fires when the +// given symbol starts executing in the given Executable. +// For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// Uretprobe attaches the given eBPF program to a perf event that fires right +// before the given symbol exits. For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uretprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// uprobe opens a perf event for the given binary/symbol and attaches prog to it. +// If ret is true, create a uretprobe. +func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) + } + if opts == nil { + opts = &UprobeOptions{} + } + + offset, err := ex.address(symbol, opts.Address, opts.Offset) + if err != nil { + return nil, err + } + + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } + + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) + } + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Uprobe, + Symbol: symbol, + Path: ex.path, + Offset: offset, + Pid: pid, + RefCtrOffset: opts.RefCtrOffset, + Ret: ret, + Cookie: opts.Cookie, + Group: opts.TraceFSPrefix, + } + + // Use uprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if err == nil { + return tp, nil + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) + } + + // Use tracefs if uprobe PMU is missing. + tp, err = tracefsProbe(args) + if err != nil { + return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) + } + + return tp, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go new file mode 100644 index 000000000..aea807b32 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -0,0 +1,216 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// UprobeMultiOptions defines additional parameters that will be used +// when opening a UprobeMulti Link. +type UprobeMultiOptions struct { + // Symbol addresses. If set, overrides the addresses eventually parsed from + // the executable. Mutually exclusive with UprobeMulti's symbols argument. + Addresses []uint64 + + // Offsets into functions provided by UprobeMulti's symbols argument. + // For example: to set uprobes to main+5 and _start+10, call UprobeMulti + // with: + // symbols: "main", "_start" + // opt.Offsets: 5, 10 + Offsets []uint64 + + // Optional list of associated ref counter offsets. + RefCtrOffsets []uint64 + + // Optional list of associated BPF cookies. + Cookies []uint64 + + // Only set the uprobe_multi link on the given process ID, zero PID means + // system-wide. + PID uint32 +} + +func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + return ex.uprobeMulti(symbols, prog, opts, 0) +} + +func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + + // The return probe is not limited for symbols entry, so there's no special + // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets + // and opts.Addresses arrays follow the same logic as for entry uprobes. + return ex.uprobeMulti(symbols, prog, opts, unix.BPF_F_UPROBE_MULTI_RETURN) +} + +func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + if opts == nil { + opts = &UprobeMultiOptions{} + } + + addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets) + if err != nil { + return nil, err + } + + addrs := len(addresses) + cookies := len(opts.Cookies) + refCtrOffsets := len(opts.RefCtrOffsets) + + if addrs == 0 { + return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput) + } + if refCtrOffsets > 0 && refCtrOffsets != addrs { + return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) + } + if cookies > 0 && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateUprobeMultiAttr{ + Path: sys.NewStringPointer(ex.path), + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + UprobeMultiFlags: flags, + Count: uint32(addrs), + Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])), + Pid: opts.PID, + } + + if refCtrOffsets != 0 { + attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0])) + } + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateUprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &uprobeMultiLink{RawLink{fd, ""}}, nil +} + +func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) { + n := len(symbols) + if n == 0 { + n = len(addresses) + } + + if n == 0 { + return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput) + } + + if symbols != nil && len(symbols) != n { + return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n) + } + + if addresses != nil && len(addresses) != n { + return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n) + } + + if offsets != nil && len(offsets) != n { + return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n) + } + + results := make([]uint64, 0, n) + for i := 0; i < n; i++ { + var sym string + if symbols != nil { + sym = symbols[i] + } + + var addr, off uint64 + if addresses != nil { + addr = addresses[i] + } + + if offsets != nil { + off = offsets[i] + } + + result, err := ex.address(sym, addr, off) + if err != nil { + return nil, err + } + + results = append(results, result) + } + + return results, nil +} + +type uprobeMultiLink struct { + RawLink +} + +var _ Link = (*uprobeMultiLink)(nil) + +func (kml *uprobeMultiLink) Update(prog *ebpf.Program) error { + return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) +} + +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", "6.6", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_upm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceUprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + // We try to create uprobe multi link on '/' path which results in + // error with -EBADF in case uprobe multi link is supported. + fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + Path: sys.NewStringPointer("/"), + Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})), + Count: 1, + }) + switch { + case errors.Is(err, unix.EBADF): + return nil + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + case err != nil: + return err + } + + // should not happen + fd.Close() + return errors.New("successfully attached uprobe_multi to /, kernel bug?") +}) diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 000000000..2ec441229 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,80 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + if err != nil { + return nil, fmt.Errorf("failed to attach link: %w", err) + } + + return &xdpLink{*rawLink}, nil +} + +type xdpLink struct { + RawLink +} + +func (xdp *xdpLink) Info() (*Info, error) { + var info sys.XDPLinkInfo + if err := sys.ObjInfo(xdp.fd, &info); err != nil { + return nil, fmt.Errorf("xdp link info: %s", err) + } + extra := &XDPInfo{ + Ifindex: info.Ifindex, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 000000000..788f21b7b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,459 @@ +package ebpf + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "io/fs" + "math" + "slices" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// handles stores handle objects to avoid gc cleanup +type handles []*btf.Handle + +func (hs *handles) add(h *btf.Handle) (int, error) { + if h == nil { + return 0, nil + } + + if len(*hs) == math.MaxInt16 { + return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16) + } + + *hs = append(*hs, h) + + // return length of slice so that indexes start at 1 + return len(*hs), nil +} + +func (hs handles) fdArray() []int32 { + // first element of fda is reserved as no module can be indexed with 0 + fda := []int32{0} + for _, h := range hs { + fda = append(fda, int32(h.FD())) + } + + return fda +} + +func (hs *handles) Close() error { + var errs []error + for _, h := range *hs { + errs = append(errs, h.Close()) + } + return errors.Join(errs...) +} + +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. +// +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } + + currentSym := insns[0].Symbol() + if currentSym == "" { + return nil, errors.New("insns must start with a Symbol") + } + + start := 0 + progs := make(map[string]asm.Instructions) + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue + } + + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) + } + + return progs, nil +} + +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true + } + } + return false +} + +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// Passing a nil target will relocate against the running kernel. insns are +// modified in place. +func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + if bo == nil { + bo = internal.NativeEndian + } + + if len(targets) == 0 { + kernelTarget, err := btf.LoadKernelSpec() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) + } + targets = append(targets, kernelTarget) + + if kmodName != "" { + kmodTarget, err := btf.LoadKernelModuleSpec(kmodName) + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("load kernel module spec: %w", err) + } + if err == nil { + targets = append(targets, kmodTarget) + } + } + } + + fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) + if err != nil { + return err + } + + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("fixup for %s: %w", relos[i], err) + } + } + + return nil +} + +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } + + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) + } + + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } +} + +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(refs[prog])) + copy(pending, refs[prog]) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. + continue + } + + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue + } + + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) + } + + return insns +} + +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + // Map load was tagged with a Reference, but does not contain a Map pointer. + needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil + if ins.IsLoadFromMap() && needsMap && ins.Map() == nil { + return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference) + } + + fixupProbeReadKernel(ins) + } + + return nil +} + +// POISON_CALL_KFUNC_BASE in libbpf. +// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767 +const kfuncCallPoisonBase = 2002000000 + +// fixupKfuncs loops over all instructions in search for kfunc calls. +// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant +// and Instruction.Offset to the correct values. +func fixupKfuncs(insns asm.Instructions) (_ handles, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil { + goto fixups + } + } + + return nil, nil + +fixups: + // only load the kernel spec if we found at least one kfunc call + kernelSpec, err := btf.LoadKernelSpec() + if err != nil { + return nil, err + } + + fdArray := make(handles, 0) + defer closeOnError(&fdArray) + + for { + ins := iter.Ins + + metadata := ins.Metadata.Get(kfuncMetaKey{}) + if metadata == nil { + if !iter.Next() { + // break loop if this was the last instruction in the stream. + break + } + continue + } + + // check meta, if no meta return err + kfm, _ := metadata.(*kfuncMeta) + if kfm == nil { + return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") + } + + target := btf.Type((*btf.Func)(nil)) + spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target) + if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a recognizable constant + // to make it easier to debug. And set src to zero so the verifier doesn't complain + // about the invalid imm/offset values before dead-code elimination. + ins.Constant = kfuncCallPoisonBase + ins.Src = 0 + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue + } + // Error on non-weak kfunc not found. + if errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) + } + if err != nil { + return nil, err + } + + idx, err := fdArray.add(module) + if err != nil { + return nil, err + } + + if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil { + return nil, &incompatibleKfuncError{kfm.Func.Name, err} + } + + id, err := spec.TypeID(target) + if err != nil { + return nil, err + } + + ins.Constant = int64(id) + ins.Offset = int16(idx) + + if !iter.Next() { + break + } + } + + return fdArray, nil +} + +type incompatibleKfuncError struct { + name string + err error +} + +func (ike *incompatibleKfuncError) Error() string { + return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err) +} + +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } + + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } + + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} + +// resolveKconfigReferences creates and populates a .kconfig map if necessary. +// +// Returns a nil Map and no error if no references exist. +func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + var spec *MapSpec + iter := insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta != nil { + spec = meta.Map + break + } + } + + if spec == nil { + return nil, nil + } + + cpy := spec.Copy() + if err := resolveKconfig(cpy); err != nil { + return nil, err + } + + kconfig, err := NewMap(cpy) + if err != nil { + return nil, err + } + defer closeOnError(kconfig) + + // Resolve all instructions which load from .kconfig map with actual map + // and offset inside it. + iter = insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta == nil { + continue + } + + if meta.Map != spec { + return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index) + } + + if err := iter.Ins.AssociateMap(kconfig); err != nil { + return nil, fmt.Errorf("instruction %d: %w", iter.Index, err) + } + + // Encode a map read at the offset of the var in the datasec. + iter.Ins.Constant = int64(uint64(meta.Offset) << 32) + iter.Ins.Metadata.Set(kconfigMetaKey{}, nil) + } + + return kconfig, nil +} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 000000000..0b62101c3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,1669 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + "time" + "unsafe" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// Errors returned by Map and MapIterator methods. +var ( + ErrKeyNotExist = errors.New("key does not exist") + ErrKeyExist = errors.New("key already exists") + ErrIterationAborted = errors.New("iteration aborted") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") + + // pre-allocating these errors here since they may get called in hot code paths + // and cause unnecessary memory allocations + errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist) +) + +// MapOptions control loading a map into the kernel. +type MapOptions struct { + // The base path to pin maps in if requested via PinByName. + // Existing maps will be re-used if they are compatible, otherwise an + // error is returned. + PinPath string + LoadPinOptions LoadPinOptions +} + +// MapID represents the unique ID of an eBPF map +type MapID uint32 + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + + // Flags is passed to the kernel and specifies additional map + // creation attributes. + Flags uint32 + + // Automatically pin and load a map from MapOptions.PinPath. + // Generates an error if an existing pinned map is incompatible with the MapSpec. + Pinning PinType + + // Specify numa node during map creation + // (effective only if unix.BPF_F_NUMA_NODE flag is set, + // which can be imported from golang.org/x/sys/unix) + NumaNode uint32 + + // The initial contents of the map. May be nil. + Contents []MapKV + + // Whether to freeze a map after setting its initial contents. + Freeze bool + + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec + + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +// +// MapSpec.Contents is a shallow copy. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + cpy.Contents = slices.Clone(cpy.Contents) + cpy.Key = btf.Copy(cpy.Key) + cpy.Value = btf.Copy(cpy.Value) + + if cpy.InnerMap == ms { + cpy.InnerMap = &cpy + } else { + cpy.InnerMap = ms.InnerMap.Copy() + } + + if cpy.Extra != nil { + extra := *cpy.Extra + cpy.Extra = &extra + } + + return &cpy +} + +// fixupMagicFields fills fields of MapSpec which are usually +// left empty in ELF or which depend on runtime information. +// +// The method doesn't modify Spec, instead returning a copy. +// The copy is only performed if fixups are necessary, so callers mustn't mutate +// the returned spec. +func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + + spec = spec.Copy() + spec.ValueSize = 4 + + case PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + + spec = spec.Copy() + spec.KeySize = 4 + spec.ValueSize = 4 + + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup perf event array: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // MaxEntries should be zero most of the time, but there is code + // out there which hardcodes large constants. Clamp the number + // of entries to the number of CPUs at most. Allow creating maps with + // less than n items since some kernel selftests relied on this + // behaviour in the past. + spec.MaxEntries = n + } + } + + return spec, nil +} + +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + +// MapKV is used to initialize the contents of a Map. +type MapKV struct { + Key interface{} + Value interface{} +} + +// Compatible returns nil if an existing map may be used instead of creating +// one from the spec. +// +// Returns an error wrapping [ErrMapIncompatible] otherwise. +func (ms *MapSpec) Compatible(m *Map) error { + ms, err := ms.fixupMagicFields() + if err != nil { + return err + } + + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } + + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this + // mismatch. + if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) && + m.flags != ms.Flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags)) + } + + if len(diffs) == 0 { + return nil + } + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + name string + fd *sys.FD + typ MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + pinnedPath string + // Per CPU maps return values larger than the size in the spec + fullValueSize int +} + +// NewMapFromFD creates a map from a raw fd. +// +// You should not use fd after calling this function. +func NewMapFromFD(fd int) (*Map, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newMapFromFD(f) +} + +func newMapFromFD(fd *sys.FD) (*Map, error) { + info, err := newMapInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("get map info: %w", err) + } + + return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) +} + +// NewMap creates a new Map. +// +// It's equivalent to calling NewMapWithOptions with default options. +func NewMap(spec *MapSpec) (*Map, error) { + return NewMapWithOptions(spec, MapOptions{}) +} + +// NewMapWithOptions creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +// +// The caller is responsible for ensuring the process' rlimit is set +// sufficiently high for locking memory during map creation. This can be done +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. +// +// May return an error wrapping ErrMapIncompatible. +func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { + m, err := newMapWithOptions(spec, opts) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + if err := m.finalize(spec); err != nil { + m.Close() + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil +} + +func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + switch spec.Pinning { + case PinByName: + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") + } + + path := filepath.Join(opts.PinPath, spec.Name) + m, err := LoadPinnedMap(path, &opts.LoadPinOptions) + if errors.Is(err, unix.ENOENT) { + break + } + if err != nil { + return nil, fmt.Errorf("load pinned map: %w", err) + } + defer closeOnError(m) + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) + } + + return m, nil + + case PinNone: + // Nothing to do here + + default: + return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) + } + + var innerFd *sys.FD + if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.InnerMap == nil { + return nil, fmt.Errorf("%s requires InnerMap", spec.Type) + } + + if spec.InnerMap.Pinning != PinNone { + return nil, errors.New("inner maps cannot be pinned") + } + + template, err := spec.InnerMap.createMap(nil, opts) + if err != nil { + return nil, fmt.Errorf("inner map: %w", err) + } + defer template.Close() + + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + + innerFd = template.fd + } + + m, err := spec.createMap(innerFd, opts) + if err != nil { + return nil, err + } + defer closeOnError(m) + + if spec.Pinning == PinByName { + path := filepath.Join(opts.PinPath, spec.Name) + if err := m.Pin(path); err != nil { + return nil, fmt.Errorf("pin map to %s: %w", path, err) + } + } + + return m, nil +} + +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) { + closeOnError := func(closer io.Closer) { + if err != nil { + closer.Close() + } + } + + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + } + + spec, err = spec.fixupMagicFields() + if err != nil { + return nil, err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), + KeySize: spec.KeySize, + ValueSize: spec.ValueSize, + MaxEntries: spec.MaxEntries, + MapFlags: sys.MapFlags(spec.Flags), + NumaNode: spec.NumaNode, + } + + if inner != nil { + attr.InnerMapFd = inner.Uint() + } + + if haveObjName() == nil { + attr.MapName = sys.NewObjName(spec.Name) + } + + if spec.Key != nil || spec.Value != nil { + handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + defer handle.Close() + + // Use BTF k/v during map creation. + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = keyTypeID + attr.BtfValueTypeId = valueTypeID + } + } + + fd, err := sys.MapCreate(&attr) + + // Some map types don't support BTF k/v in earlier kernel versions. + // Remove BTF metadata and retry map creation. + if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 { + attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0 + fd, err = sys.MapCreate(&attr) + } + if err != nil { + return nil, handleMapCreateError(attr, spec, err) + } + + defer closeOnError(fd) + m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + return m, nil +} + +func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if errors.Is(err, unix.EPERM) { + return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if errors.Is(err, unix.EINVAL) && spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type) + } + + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_MMAPABLE > 0 { + if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_INNER_MAP > 0 { + if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { + if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size. + if errors.Is(err, unix.EINVAL) && + (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) { + pageSize := uint32(os.Getpagesize()) + maxEntries := attr.MaxEntries + if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) { + return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize) + } + } + + return fmt.Errorf("map create: %w", err) +} + +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { + m := &Map{ + name, + fd, + typ, + keySize, + valueSize, + maxEntries, + flags, + "", + int(valueSize), + } + + if !typ.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + if m.name != "" { + return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) + } + return fmt.Sprintf("%s#%v", m.typ, m.fd) +} + +// Type returns the underlying type of the map. +func (m *Map) Type() MapType { + return m.typ +} + +// KeySize returns the size of the map key in bytes. +func (m *Map) KeySize() uint32 { + return m.keySize +} + +// ValueSize returns the size of the map value in bytes. +func (m *Map) ValueSize() uint32 { + return m.valueSize +} + +// MaxEntries returns the maximum number of elements the map can hold. +func (m *Map) MaxEntries() uint32 { + return m.maxEntries +} + +// Flags returns the flags of the map. +func (m *Map) Flags() uint32 { + return m.flags +} + +// Info returns metadata about the map. +func (m *Map) Info() (*MapInfo, error) { + return newMapInfoFromFd(m.fd) +} + +// Handle returns a reference to the Map's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the Map. +func (m *Map) Handle() (*btf.Handle, error) { + info, err := m.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = unix.BPF_F_LOCK + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + return m.LookupWithFlags(key, valueOut, 0) +} + +// LookupWithFlags retrieves a value from a Map with flags. +// +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupPerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil { + return err + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.LookupAndDeleteWithFlags(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupAndDeletePerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil { + return err + } + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := sys.NewSlicePointer(valueBytes) + + err := m.lookup(key, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + if errors.Is(err, unix.ENOENT) { + return errMapLookupKeyNotExist + } + return fmt.Errorf("lookup: %w", wrapMapError(err)) + } + return nil +} + +func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil +} + +func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return nil +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value any, flags MapUpdateFlags) error { + if m.typ.hasPerCPUValue() { + return m.updatePerCPU(key, value, flags) + } + + valuePtr, err := m.marshalValue(value) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error { + valuePtr, err := marshalPerCPUValue(value, int(m.valueSize)) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("marshal key: %w", err) + } + + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) + } + + return nil +} + +// Delete removes a value. +// +// Returns ErrKeyNotExist if the key does not exist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) + } + return nil +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +// +// Returns ErrKeyNotExist if there is no next key. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) + + if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil { + return err + } + + if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil { + return fmt.Errorf("can't unmarshal next key: %w", err) + } + return nil +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +// +// Returns nil if there are no more keys. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.keySize) + nextKeyPtr := sys.NewSlicePointer(nextKey) + + err := m.nextKey(key, nextKeyPtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { + var ( + keyPtr sys.Pointer + err error + ) + + if key != nil { + keyPtr, err = m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + } + + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, + } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.NewSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + + return nil +} + +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { + return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) +}) + +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Map a protected page and use that as the value pointer. This saves some + // work copying out the value, which we're not interested in. + page, err := mmapProtectedPage() + if err != nil { + return nil, err + } + valuePtr := sys.NewSlicePointer(page) + + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + +// BatchLookup looks up many elements in a map at once. +// +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup: %w", err) + } + return n, nil +} + +// BatchLookupAndDelete looks up many elements in a map at once, +// +// It then deletes all those elements. +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup and delete: %w", err) + } + return n, nil +} + +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) + } + + count, err := batchCount(keysOut, valuesOut) + if err != nil { + return 0, err + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(err, unix.ENOSPC) { + // Hash tables return ENOSPC when the size of the batch is smaller than + // any bucket. + return n, fmt.Errorf("%w (batch size too small?)", err) + } else if err != nil { + return n, err + } + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err + } + + return n, nil +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf := make([]byte, count*int(m.fullValueSize)) + valuePtr := sys.NewSlicePointer(valueBuf) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, err + } + + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + cursorLen := int(m.keySize) + if cursorLen < 4 { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen = 4 + } + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") + } + + if err := haveBatchAPI(); err != nil { + return 0, err + } + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) + + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyBuf.Pointer(), + Values: valuePtr, + Count: uint32(count), + InBatch: sys.NewSlicePointer(inBatch), + OutBatch: sys.NewSlicePointer(cursor.opaque), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + if err := keyBuf.Unmarshal(keysOut); err != nil { + return 0, err + } + + return int(attr.Count), sysErr +} + +// BatchUpdate updates the map with multiple keys and values +// simultaneously. +// "keys" and "values" must be of type slice, a pointer +// to a slice or buffer will not work. +func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchUpdatePerCPU(keys, values, opts) + } + + count, err := batchCount(keys, values) + if err != nil { + return 0, err + } + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, err + } + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts) +} + +// BatchDelete batch deletes entries in the map by keys. +// "keys" must be of type slice, a pointer to a slice or buffer will not work. +func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, fmt.Errorf("cannot marshal keys: %v", err) + } + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.Close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + return m.fd.Int() +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// If the original map was pinned, the cloned map will not be pinned by default. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone map: %w", err) + } + + return &Map{ + m.name, + dup, + m.typ, + m.keySize, + m.valueSize, + m.maxEntries, + m.flags, + "", + m.fullValueSize, + }, nil +} + +// Pin persists the map on the BPF virtual file system past the lifetime of +// the process that created it . +// +// Calling Pin on a previously pinned map will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// You can Clone a map to pin it to a different path. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (m *Map) Pin(fileName string) error { + if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { + return err + } + m.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the map from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Map returns nil. +func (m *Map) Unpin() error { + if err := internal.Unpin(m.pinnedPath); err != nil { + return err + } + m.pinnedPath = "" + return nil +} + +// IsPinned returns true if the map has a non-empty pinned path. +func (m *Map) IsPinned() bool { + return m.pinnedPath != "" +} + +// Freeze prevents a map to be modified from user space. +// +// It makes no changes to kernel-side restrictions. +func (m *Map) Freeze() error { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("can't freeze map: %w", haveFeatErr) + } + return fmt.Errorf("can't freeze map: %w", err) + } + return nil +} + +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { + if err := m.Put(kv.Key, kv.Value); err != nil { + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) + } + } + + if spec.Freeze { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + + return nil +} + +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { + if data == nil { + if m.keySize == 0 { + // Queues have a key length of zero, so passing nil here is valid. + return sys.NewPointer(nil), nil + } + return sys.Pointer{}, errors.New("can't use nil as key of map") + } + + return marshalMapSyscallInput(data, int(m.keySize)) +} + +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { + var ( + buf []byte + err error + ) + + switch value := data.(type) { + case *Map: + if !m.typ.canStoreMap() { + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + } + buf, err = marshalMap(value, int(m.valueSize)) + + case *Program: + if !m.typ.canStoreProgram() { + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + } + buf, err = marshalProgram(value, int(m.valueSize)) + + default: + return marshalMapSyscallInput(data, int(m.valueSize)) + } + + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { + switch value := value.(type) { + case **Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + + other, err := unmarshalMap(buf) + if err != nil { + return err + } + + // The caller might close the map externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + return errors.New("require pointer to *Map") + + case **Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + + other, err := unmarshalProgram(buf) + if err != nil { + return err + } + + // The caller might close the program externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + return errors.New("require pointer to *Program") + } + + return buf.Unmarshal(value) +} + +// LoadPinnedMap loads a Map from a BPF file. +func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + m, err := newMapFromFD(fd) + if err == nil { + m.pinnedPath = fileName + } + + return m, err +} + +// unmarshalMap creates a map from a map ID encoded in host endianness. +func unmarshalMap(buf sysenc.Buffer) (*Map, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + return NewMapFromID(MapID(id)) +} + +// marshalMap marshals the fd of a map into a buffer in host endianness. +func marshalMap(m *Map, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal map to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) + return buf, nil +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.maxEntries, + } +} + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + // For array-like maps NextKey returns nil only after maxEntries + // iterations. + for mi.count <= mi.maxEntries { + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key + // is returned. If we pass an uninitialized []byte instead, it'll see a + // non-nil interface and try to marshal it. + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) + } else { + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) + } + + if errors.Is(mi.err, ErrKeyNotExist) { + mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) + return false + } + + mi.count++ + mi.err = mi.target.Lookup(mi.cursor, valueOut) + if errors.Is(mi.err, ErrKeyNotExist) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + mi.err = fmt.Errorf("look up next key: %w", mi.err) + return false + } + + buf := mi.cursor.([]byte) + if ptr, ok := keyOut.(unsafe.Pointer); ok { + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) + } else { + mi.err = sysenc.Unmarshal(keyOut, buf) + } + + return mi.err == nil + } + + mi.err = fmt.Errorf("%w", ErrIterationAborted) + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +// +// Returns ErrIterationAborted if it wasn't possible to do a full iteration. +func (mi *MapIterator) Err() error { + return mi.err +} + +// MapGetNextID returns the ID of the next eBPF map. +// +// Returns ErrNotExist, if there is no next eBPF map. +func MapGetNextID(startID MapID) (MapID, error) { + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) +} + +// NewMapFromID returns the map for a given id. +// +// Returns ErrNotExist, if there is no eBPF map with the given id. +func NewMapFromID(id MapID) (*Map, error) { + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, err + } + + return newMapFromFD(fd) +} + +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) + } + return sliceValue.Len(), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 000000000..57a0a8e88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,210 @@ +package ebpf + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" +) + +// marshalMapSyscallInput converts an arbitrary value into a pointer suitable +// to be passed to the kernel. +// +// As an optimization, it returns the original value if it is an +// unsafe.Pointer. +func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return sys.NewPointer(ptr), nil + } + + buf, err := sysenc.Marshal(data, length) + if err != nil { + return sys.Pointer{}, err + } + + return buf.Pointer(), nil +} + +func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { + if ptr, ok := dst.(unsafe.Pointer); ok { + return sysenc.UnsafeBuffer(ptr) + } + + _, ok := dst.(encoding.BinaryUnmarshaler) + if ok { + return sysenc.SyscallOutput(nil, length) + } + + return sysenc.SyscallOutput(dst, length) +} + +// appendPerCPUSlice encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, errors.New("per-CPU value requires slice") + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") + } + + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := sysenc.Marshal(elem, elemLength) + if err != nil { + return nil, err + } + + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) + } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") + } + sliceValue := reflect.ValueOf(slice) + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return nil, fmt.Errorf("batch %d: %w", i, err) + } + } + return buf, nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + stride := internal.Align(elemLength, 8) + for i := 0; i < possibleCPUs; i++ { + var elem any + v := sliceValue.Index(i) + if sliceElemIsPointer { + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() + } else { + elem = v.Addr().Interface() + } + err := sysenc.Unmarshal(elem, buf[:elemLength]) + if err != nil { + return fmt.Errorf("cpu %d: %w", i, err) + } + + buf = buf[stride:] + } + return nil +} + +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") + } + + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml new file mode 100644 index 000000000..67c83f3b3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/netlify.toml @@ -0,0 +1,4 @@ +[build] + base = "docs/" + publish = "site/" + command = "mkdocs build" diff --git a/vendor/github.com/cilium/ebpf/perf/doc.go b/vendor/github.com/cilium/ebpf/perf/doc.go new file mode 100644 index 000000000..b92bc56af --- /dev/null +++ b/vendor/github.com/cilium/ebpf/perf/doc.go @@ -0,0 +1,5 @@ +// Package perf allows reading from BPF perf event arrays. +// +// A perf event array contains multiple perf event ringbuffers which can be used +// to exchange sample like data with user space. +package perf diff --git a/vendor/github.com/cilium/ebpf/perf/reader.go b/vendor/github.com/cilium/ebpf/perf/reader.go new file mode 100644 index 000000000..3c3d56942 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/perf/reader.go @@ -0,0 +1,491 @@ +package perf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "time" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/epoll" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrClosed = os.ErrClosed + ErrFlushed = epoll.ErrFlushed + errEOR = errors.New("end of ring") +) + +var perfEventHeaderSize = binary.Size(perfEventHeader{}) + +// perfEventHeader must match 'struct perf_event_header` in . +type perfEventHeader struct { + Type uint32 + Misc uint16 + Size uint16 +} + +// Record contains either a sample or a counter of the +// number of lost samples. +type Record struct { + // The CPU this record was generated on. + CPU int + + // The data submitted via bpf_perf_event_output. + // Due to a kernel bug, this can contain between 0 and 7 bytes of trailing + // garbage from the ring depending on the input sample's length. + RawSample []byte + + // The number of samples which could not be output, since + // the ring buffer was full. + LostSamples uint64 + + // The minimum number of bytes remaining in the per-CPU buffer after this Record has been read. + // Negative for overwritable buffers. + Remaining int +} + +// Read a record from a reader and tag it as being from the given CPU. +// +// buf must be at least perfEventHeaderSize bytes long. +func readRecord(rd io.Reader, rec *Record, buf []byte, overwritable bool) error { + // Assert that the buffer is large enough. + buf = buf[:perfEventHeaderSize] + _, err := io.ReadFull(rd, buf) + if errors.Is(err, io.EOF) { + return errEOR + } else if err != nil { + return fmt.Errorf("read perf event header: %v", err) + } + + header := perfEventHeader{ + internal.NativeEndian.Uint32(buf[0:4]), + internal.NativeEndian.Uint16(buf[4:6]), + internal.NativeEndian.Uint16(buf[6:8]), + } + + switch header.Type { + case unix.PERF_RECORD_LOST: + rec.RawSample = rec.RawSample[:0] + rec.LostSamples, err = readLostRecords(rd) + return err + + case unix.PERF_RECORD_SAMPLE: + rec.LostSamples = 0 + // We can reuse buf here because perfEventHeaderSize > perfEventSampleSize. + rec.RawSample, err = readRawSample(rd, buf, rec.RawSample) + return err + + default: + return &unknownEventError{header.Type} + } +} + +func readLostRecords(rd io.Reader) (uint64, error) { + // lostHeader must match 'struct perf_event_lost in kernel sources. + var lostHeader struct { + ID uint64 + Lost uint64 + } + + err := binary.Read(rd, internal.NativeEndian, &lostHeader) + if err != nil { + return 0, fmt.Errorf("can't read lost records header: %v", err) + } + + return lostHeader.Lost, nil +} + +var perfEventSampleSize = binary.Size(uint32(0)) + +// This must match 'struct perf_event_sample in kernel sources. +type perfEventSample struct { + Size uint32 +} + +func readRawSample(rd io.Reader, buf, sampleBuf []byte) ([]byte, error) { + buf = buf[:perfEventSampleSize] + if _, err := io.ReadFull(rd, buf); err != nil { + return nil, fmt.Errorf("read sample size: %w", err) + } + + sample := perfEventSample{ + internal.NativeEndian.Uint32(buf), + } + + var data []byte + if size := int(sample.Size); cap(sampleBuf) < size { + data = make([]byte, size) + } else { + data = sampleBuf[:size] + } + + if _, err := io.ReadFull(rd, data); err != nil { + return nil, fmt.Errorf("read sample: %w", err) + } + return data, nil +} + +// Reader allows reading bpf_perf_event_output +// from user space. +type Reader struct { + poller *epoll.Poller + + // mu protects read/write access to the Reader structure with the + // exception fields protected by 'pauseMu'. + // If locking both 'mu' and 'pauseMu', 'mu' must be locked first. + mu sync.Mutex + array *ebpf.Map + rings []*perfEventRing + epollEvents []unix.EpollEvent + epollRings []*perfEventRing + eventHeader []byte + deadline time.Time + overwritable bool + bufferSize int + pendingErr error + + // pauseMu protects eventFds so that Pause / Resume can be invoked while + // Read is blocked. + pauseMu sync.Mutex + eventFds []*sys.FD + paused bool +} + +// ReaderOptions control the behaviour of the user +// space reader. +type ReaderOptions struct { + // The number of events required in any per CPU buffer before + // Read will process data. This is mutually exclusive with Watermark. + // The default is zero, which means Watermark will take precedence. + WakeupEvents int + // The number of written bytes required in any per CPU buffer before + // Read will process data. Must be smaller than PerCPUBuffer. + // The default is to start processing as soon as data is available. + Watermark int + // This perf ring buffer is overwritable, once full the oldest event will be + // overwritten by newest. + Overwritable bool +} + +// NewReader creates a new reader with default options. +// +// array must be a PerfEventArray. perCPUBuffer gives the size of the +// per CPU buffer in bytes. It is rounded up to the nearest multiple +// of the current page size. +func NewReader(array *ebpf.Map, perCPUBuffer int) (*Reader, error) { + return NewReaderWithOptions(array, perCPUBuffer, ReaderOptions{}) +} + +// NewReaderWithOptions creates a new reader with the given options. +func NewReaderWithOptions(array *ebpf.Map, perCPUBuffer int, opts ReaderOptions) (pr *Reader, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + if perCPUBuffer < 1 { + return nil, errors.New("perCPUBuffer must be larger than 0") + } + if opts.WakeupEvents > 0 && opts.Watermark > 0 { + return nil, errors.New("WakeupEvents and Watermark cannot both be non-zero") + } + + var ( + nCPU = int(array.MaxEntries()) + rings = make([]*perfEventRing, 0, nCPU) + eventFds = make([]*sys.FD, 0, nCPU) + ) + + poller, err := epoll.New() + if err != nil { + return nil, err + } + defer closeOnError(poller) + + // bpf_perf_event_output checks which CPU an event is enabled on, + // but doesn't allow using a wildcard like -1 to specify "all CPUs". + // Hence we have to create a ring for each CPU. + bufferSize := 0 + for i := 0; i < nCPU; i++ { + event, ring, err := newPerfEventRing(i, perCPUBuffer, opts) + if errors.Is(err, unix.ENODEV) { + // The requested CPU is currently offline, skip it. + continue + } + + if err != nil { + return nil, fmt.Errorf("failed to create perf ring for CPU %d: %v", i, err) + } + defer closeOnError(event) + defer closeOnError(ring) + + bufferSize = ring.size() + rings = append(rings, ring) + eventFds = append(eventFds, event) + + if err := poller.Add(event.Int(), 0); err != nil { + return nil, err + } + } + + // Closing a PERF_EVENT_ARRAY removes all event fds + // stored in it, so we keep a reference alive. + array, err = array.Clone() + if err != nil { + return nil, err + } + + pr = &Reader{ + array: array, + rings: rings, + poller: poller, + deadline: time.Time{}, + epollEvents: make([]unix.EpollEvent, len(rings)), + epollRings: make([]*perfEventRing, 0, len(rings)), + eventHeader: make([]byte, perfEventHeaderSize), + eventFds: eventFds, + overwritable: opts.Overwritable, + bufferSize: bufferSize, + } + if err = pr.Resume(); err != nil { + return nil, err + } + runtime.SetFinalizer(pr, (*Reader).Close) + return pr, nil +} + +// Close frees resources used by the reader. +// +// It interrupts calls to Read. +// +// Calls to perf_event_output from eBPF programs will return +// ENOENT after calling this method. +func (pr *Reader) Close() error { + if err := pr.poller.Close(); err != nil { + if errors.Is(err, os.ErrClosed) { + return nil + } + return fmt.Errorf("close poller: %w", err) + } + + // Trying to poll will now fail, so Read() can't block anymore. Acquire the + // locks so that we can clean up. + pr.mu.Lock() + defer pr.mu.Unlock() + + pr.pauseMu.Lock() + defer pr.pauseMu.Unlock() + + for _, ring := range pr.rings { + ring.Close() + } + for _, event := range pr.eventFds { + event.Close() + } + pr.rings = nil + pr.eventFds = nil + pr.array.Close() + + return nil +} + +// SetDeadline controls how long Read and ReadInto will block waiting for samples. +// +// Passing a zero time.Time will remove the deadline. Passing a deadline in the +// past will prevent the reader from blocking if there are no records to be read. +func (pr *Reader) SetDeadline(t time.Time) { + pr.mu.Lock() + defer pr.mu.Unlock() + + pr.deadline = t +} + +// Read the next record from the perf ring buffer. +// +// The method blocks until there are at least Watermark bytes in one +// of the per CPU buffers. Records from buffers below the Watermark +// are not returned. +// +// Records can contain between 0 and 7 bytes of trailing garbage from the ring +// depending on the input sample's length. +// +// Calling [Close] interrupts the method with [os.ErrClosed]. Calling [Flush] +// makes it return all records currently in the ring buffer, followed by [ErrFlushed]. +// +// Returns [os.ErrDeadlineExceeded] if a deadline was set and after all records +// have been read from the ring. +// +// See [Reader.ReadInto] for a more efficient version of this method. +func (pr *Reader) Read() (Record, error) { + var r Record + + return r, pr.ReadInto(&r) +} + +var errMustBePaused = fmt.Errorf("perf ringbuffer: must have been paused before reading overwritable buffer") + +// ReadInto is like [Reader.Read] except that it allows reusing Record and associated buffers. +func (pr *Reader) ReadInto(rec *Record) error { + pr.mu.Lock() + defer pr.mu.Unlock() + + pr.pauseMu.Lock() + defer pr.pauseMu.Unlock() + + if pr.overwritable && !pr.paused { + return errMustBePaused + } + + if pr.rings == nil { + return fmt.Errorf("perf ringbuffer: %w", ErrClosed) + } + + for { + if len(pr.epollRings) == 0 { + if pe := pr.pendingErr; pe != nil { + // All rings have been emptied since the error occurred, return + // appropriate error. + pr.pendingErr = nil + return pe + } + + // NB: The deferred pauseMu.Unlock will panic if Wait panics, which + // might obscure the original panic. + pr.pauseMu.Unlock() + _, err := pr.poller.Wait(pr.epollEvents, pr.deadline) + pr.pauseMu.Lock() + + if errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, ErrFlushed) { + // We've hit the deadline, check whether there is any data in + // the rings that we've not been woken up for. + pr.pendingErr = err + } else if err != nil { + return err + } + + // Re-validate pr.paused since we dropped pauseMu. + if pr.overwritable && !pr.paused { + return errMustBePaused + } + + // Waking up userspace is expensive, make the most of it by checking + // all rings. + for _, ring := range pr.rings { + ring.loadHead() + pr.epollRings = append(pr.epollRings, ring) + } + } + + // Start at the last available event. The order in which we + // process them doesn't matter, and starting at the back allows + // resizing epollRings to keep track of processed rings. + err := pr.readRecordFromRing(rec, pr.epollRings[len(pr.epollRings)-1]) + if err == errEOR { + // We've emptied the current ring buffer, process + // the next one. + pr.epollRings = pr.epollRings[:len(pr.epollRings)-1] + continue + } + + return err + } +} + +// Pause stops all notifications from this Reader. +// +// While the Reader is paused, any attempts to write to the event buffer from +// BPF programs will return -ENOENT. +// +// Subsequent calls to Read will block until a call to Resume. +func (pr *Reader) Pause() error { + pr.pauseMu.Lock() + defer pr.pauseMu.Unlock() + + if pr.eventFds == nil { + return fmt.Errorf("%w", ErrClosed) + } + + for i := range pr.eventFds { + if err := pr.array.Delete(uint32(i)); err != nil && !errors.Is(err, ebpf.ErrKeyNotExist) { + return fmt.Errorf("could't delete event fd for CPU %d: %w", i, err) + } + } + + pr.paused = true + + return nil +} + +// Resume allows this perf reader to emit notifications. +// +// Subsequent calls to Read will block until the next event notification. +func (pr *Reader) Resume() error { + pr.pauseMu.Lock() + defer pr.pauseMu.Unlock() + + if pr.eventFds == nil { + return fmt.Errorf("%w", ErrClosed) + } + + for i, fd := range pr.eventFds { + if fd == nil { + continue + } + + if err := pr.array.Put(uint32(i), fd.Uint()); err != nil { + return fmt.Errorf("couldn't put event fd %d for CPU %d: %w", fd, i, err) + } + } + + pr.paused = false + + return nil +} + +// BufferSize is the size in bytes of each per-CPU buffer +func (pr *Reader) BufferSize() int { + return pr.bufferSize +} + +// Flush unblocks Read/ReadInto and successive Read/ReadInto calls will return pending samples at this point, +// until you receive a [ErrFlushed] error. +func (pr *Reader) Flush() error { + return pr.poller.Flush() +} + +// NB: Has to be preceded by a call to ring.loadHead. +func (pr *Reader) readRecordFromRing(rec *Record, ring *perfEventRing) error { + defer ring.writeTail() + + rec.CPU = ring.cpu + err := readRecord(ring, rec, pr.eventHeader, pr.overwritable) + if pr.overwritable && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) { + return errEOR + } + rec.Remaining = ring.remaining() + return err +} + +type unknownEventError struct { + eventType uint32 +} + +func (uev *unknownEventError) Error() string { + return fmt.Sprintf("unknown event type: %d", uev.eventType) +} + +// IsUnknownEvent returns true if the error occurred +// because an unknown event was submitted to the perf event ring. +func IsUnknownEvent(err error) bool { + var uee *unknownEventError + return errors.As(err, &uee) +} diff --git a/vendor/github.com/cilium/ebpf/perf/ring.go b/vendor/github.com/cilium/ebpf/perf/ring.go new file mode 100644 index 000000000..63555f323 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/perf/ring.go @@ -0,0 +1,293 @@ +package perf + +import ( + "errors" + "fmt" + "io" + "math" + "os" + "runtime" + "sync/atomic" + "unsafe" + + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// perfEventRing is a page of metadata followed by +// a variable number of pages which form a ring buffer. +type perfEventRing struct { + cpu int + mmap []byte + ringReader +} + +func newPerfEventRing(cpu, perCPUBuffer int, opts ReaderOptions) (_ *sys.FD, _ *perfEventRing, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + if opts.Watermark >= perCPUBuffer { + return nil, nil, errors.New("watermark must be smaller than perCPUBuffer") + } + + fd, err := createPerfEvent(cpu, opts) + if err != nil { + return nil, nil, err + } + defer closeOnError(fd) + + if err := unix.SetNonblock(fd.Int(), true); err != nil { + return nil, nil, err + } + + protections := unix.PROT_READ + if !opts.Overwritable { + protections |= unix.PROT_WRITE + } + + mmap, err := unix.Mmap(fd.Int(), 0, perfBufferSize(perCPUBuffer), protections, unix.MAP_SHARED) + if err != nil { + return nil, nil, fmt.Errorf("can't mmap: %v", err) + } + + // This relies on the fact that we allocate an extra metadata page, + // and that the struct is smaller than an OS page. + // This use of unsafe.Pointer isn't explicitly sanctioned by the + // documentation, since a byte is smaller than sampledPerfEvent. + meta := (*unix.PerfEventMmapPage)(unsafe.Pointer(&mmap[0])) + + var reader ringReader + if opts.Overwritable { + reader = newReverseReader(meta, mmap[meta.Data_offset:meta.Data_offset+meta.Data_size]) + } else { + reader = newForwardReader(meta, mmap[meta.Data_offset:meta.Data_offset+meta.Data_size]) + } + + ring := &perfEventRing{ + cpu: cpu, + mmap: mmap, + ringReader: reader, + } + runtime.SetFinalizer(ring, (*perfEventRing).Close) + + return fd, ring, nil +} + +// perfBufferSize returns a valid mmap buffer size for use with perf_event_open (1+2^n pages) +func perfBufferSize(perCPUBuffer int) int { + pageSize := os.Getpagesize() + + // Smallest whole number of pages + nPages := (perCPUBuffer + pageSize - 1) / pageSize + + // Round up to nearest power of two number of pages + nPages = int(math.Pow(2, math.Ceil(math.Log2(float64(nPages))))) + + // Add one for metadata + nPages += 1 + + return nPages * pageSize +} + +func (ring *perfEventRing) Close() error { + runtime.SetFinalizer(ring, nil) + mmap := ring.mmap + ring.mmap = nil + return unix.Munmap(mmap) +} + +func createPerfEvent(cpu int, opts ReaderOptions) (*sys.FD, error) { + wakeup := 0 + bits := 0 + if opts.WakeupEvents > 0 { + wakeup = opts.WakeupEvents + } else { + wakeup = opts.Watermark + if wakeup == 0 { + wakeup = 1 + } + bits |= unix.PerfBitWatermark + } + + if opts.Overwritable { + bits |= unix.PerfBitWriteBackward + } + + attr := unix.PerfEventAttr{ + Type: unix.PERF_TYPE_SOFTWARE, + Config: unix.PERF_COUNT_SW_BPF_OUTPUT, + Bits: uint64(bits), + Sample_type: unix.PERF_SAMPLE_RAW, + Wakeup: uint32(wakeup), + } + + attr.Size = uint32(unsafe.Sizeof(attr)) + fd, err := unix.PerfEventOpen(&attr, -1, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("can't create perf event: %w", err) + } + return sys.NewFD(fd) +} + +type ringReader interface { + loadHead() + size() int + remaining() int + writeTail() + Read(p []byte) (int, error) +} + +type forwardReader struct { + meta *unix.PerfEventMmapPage + head, tail uint64 + mask uint64 + ring []byte +} + +func newForwardReader(meta *unix.PerfEventMmapPage, ring []byte) *forwardReader { + return &forwardReader{ + meta: meta, + head: atomic.LoadUint64(&meta.Data_head), + tail: atomic.LoadUint64(&meta.Data_tail), + // cap is always a power of two + mask: uint64(cap(ring) - 1), + ring: ring, + } +} + +func (rr *forwardReader) loadHead() { + rr.head = atomic.LoadUint64(&rr.meta.Data_head) +} + +func (rr *forwardReader) size() int { + return len(rr.ring) +} + +func (rr *forwardReader) remaining() int { + return int((rr.head - rr.tail) & rr.mask) +} + +func (rr *forwardReader) writeTail() { + // Commit the new tail. This lets the kernel know that + // the ring buffer has been consumed. + atomic.StoreUint64(&rr.meta.Data_tail, rr.tail) +} + +func (rr *forwardReader) Read(p []byte) (int, error) { + start := int(rr.tail & rr.mask) + + n := len(p) + // Truncate if the read wraps in the ring buffer + if remainder := cap(rr.ring) - start; n > remainder { + n = remainder + } + + // Truncate if there isn't enough data + if remainder := int(rr.head - rr.tail); n > remainder { + n = remainder + } + + copy(p, rr.ring[start:start+n]) + rr.tail += uint64(n) + + if rr.tail == rr.head { + return n, io.EOF + } + + return n, nil +} + +type reverseReader struct { + meta *unix.PerfEventMmapPage + // head is the position where the kernel last wrote data. + head uint64 + // read is the position we read the next data from. Updated as reads are made. + read uint64 + // tail is the end of the ring buffer. No reads must be made past it. + tail uint64 + mask uint64 + ring []byte +} + +func newReverseReader(meta *unix.PerfEventMmapPage, ring []byte) *reverseReader { + rr := &reverseReader{ + meta: meta, + mask: uint64(cap(ring) - 1), + ring: ring, + } + rr.loadHead() + return rr +} + +func (rr *reverseReader) loadHead() { + // The diagram below represents an overwritable perf ring buffer: + // + // head read tail + // | | | + // V V V + // +---+--------+------------+---------+--------+ + // | |H-D....D|H-C........C|H-B.....B|H-A....A| + // +---+--------+------------+---------+--------+ + // <--Write from right to left + // Read from left to right--> + // (H means header) + // + // The buffer is read left to right beginning from head to tail. + // [head, read) is the read portion of the buffer, [read, tail) the unread one. + // read is adjusted as we progress through the buffer. + + // Avoid reading sample D multiple times by discarding unread samples C, B, A. + rr.tail = rr.head + + // Get the new head and starting reading from it. + rr.head = atomic.LoadUint64(&rr.meta.Data_head) + rr.read = rr.head + + if rr.tail-rr.head > uint64(cap(rr.ring)) { + // ring has been fully written, only permit at most cap(rr.ring) + // bytes to be read. + rr.tail = rr.head + uint64(cap(rr.ring)) + } +} + +func (rr *reverseReader) size() int { + return len(rr.ring) +} + +func (rr *reverseReader) remaining() int { + // remaining data is inaccurate for overwritable buffers + // once an overwrite happens, so return -1 here. + return -1 +} + +func (rr *reverseReader) writeTail() { + // We do not care about tail for over writable perf buffer. + // So, this function is noop. +} + +func (rr *reverseReader) Read(p []byte) (int, error) { + start := int(rr.read & rr.mask) + + n := len(p) + // Truncate if the read wraps in the ring buffer + if remainder := cap(rr.ring) - start; n > remainder { + n = remainder + } + + // Truncate if there isn't enough data + if remainder := int(rr.tail - rr.read); n > remainder { + n = remainder + } + + copy(p, rr.ring[start:start+n]) + rr.read += uint64(n) + + if rr.read == rr.tail { + return n, io.EOF + } + + return n, nil +} diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 000000000..9bc6325f8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,1141 @@ +package ebpf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "path/filepath" + "runtime" + "strings" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotSupported is returned whenever the kernel doesn't support a feature. +var ErrNotSupported = internal.ErrNotSupported + +// errBadRelocation is returned when the verifier rejects a program due to a +// bad CO-RE relocation. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errBadRelocation = errors.New("bad CO-RE relocation") + +// errUnknownKfunc is returned when the verifier rejects a program due to an +// unknown kfunc. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errUnknownKfunc = errors.New("unknown kfunc") + +// ProgramID represents the unique ID of an eBPF program. +type ProgramID uint32 + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// Deprecated: the correct log size is now detected automatically and this +// constant is unused. +const DefaultVerifierLogSize = 64 * 1024 + +// minVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const minVerifierLogSize = 64 * 1024 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. + // LogLevel-type values can be ORed together to request specific kinds of + // verifier output. See the documentation on [ebpf.LogLevel] for details. + // + // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats) + // + // If left to its default value, the program will first be loaded without + // verifier output enabled. Upon error, the program load will be repeated + // with LogLevelBranch and the given (or default) LogSize value. + // + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier + // log, populating the [ebpf.Program.VerifierLog] field on successful loads + // and including detailed verifier errors if the program is rejected. This + // will always allocate an output buffer, but will result in only a single + // attempt at loading the program. + LogLevel LogLevel + + // Deprecated: the correct log buffer size is determined automatically + // and this field is ignored. + LogSize int + + // Disables the verifier log completely, regardless of other options. + LogDisabled bool + + // Type information used for CO-RE relocations. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec + + // Type information used for CO-RE relocations of kernel modules, + // indexed by module name. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel module BTF from a well-known location if nil. + KernelModuleTypes map[string]*btf.Spec +} + +// ProgramSpec defines a Program. +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + + // Type determines at which hook in the kernel a program will run. + Type ProgramType + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. + AttachType AttachType + + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + + // The program to attach to. Must be provided manually. + AttachTarget *Program + + // The name of the ELF section this program originated from. + SectionName string + + Instructions asm.Instructions + + // Flags is passed to the kernel and specifies additional program + // load attributes. + Flags uint32 + + // License of the program. Some helpers are only available if + // the license is deemed compatible with the GPL. + // + // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 + License string + + // Version used by Kprobe programs. + // + // Deprecated on kernels 5.0 and later. Leave empty to let the library + // detect this value automatically. + KernelVersion uint32 + + // The byte order this program was compiled for, may be nil. + ByteOrder binary.ByteOrder +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Tag calculates the kernel tag for a series of instructions. +// +// Use asm.Instructions.Tag if you need to calculate for non-native endianness. +func (ps *ProgramSpec) Tag() (string, error) { + return ps.Instructions.Tag(internal.NativeEndian) +} + +// KernelModule returns the kernel module, if any, the AttachTo function is contained in. +func (ps *ProgramSpec) KernelModule() (string, error) { + if ps.AttachTo == "" { + return "", nil + } + + switch ps.Type { + default: + return "", nil + case Tracing: + switch ps.AttachType { + default: + return "", nil + case AttachTraceFEntry: + case AttachTraceFExit: + } + fallthrough + case Kprobe: + return kallsyms.KernelModule(ps.AttachTo) + } +} + +// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a +// program is rejected by the verifier. +// +// Use [errors.As] to access the error. +type VerifierError = internal.VerifierError + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *sys.FD + name string + pinnedPath string + typ ProgramType +} + +// NewProgram creates a new Program. +// +// See [NewProgramWithOptions] for details. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + + prog, err := newProgramWithOptions(spec, opts) + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { + return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) + } + return prog, err +} + +var ( + coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel)) + // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in + // addition to function id") which first appeared in v4.10 and has remained + // unchanged since. + coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel)) + kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) +) + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("instructions cannot be empty") + } + + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) + } + + // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") + // require the version field to be set to the value of the KERNEL_VERSION + // macro for kprobe-type programs. + // Overwrite Kprobe program version if set to zero or the magic version constant. + kv := spec.KernelVersion + if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { + v, err := internal.KernelVersion() + if err != nil { + return nil, fmt.Errorf("detecting kernel version: %w", err) + } + kv = v.Kernel() + } + + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, + } + + if haveObjName() == nil { + attr.ProgName = sys.NewObjName(spec.Name) + } + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) + + kmodName, err := spec.KernelModule() + if err != nil { + return nil, fmt.Errorf("kernel module search: %w", err) + } + + var targets []*btf.Spec + if opts.KernelTypes != nil { + targets = append(targets, opts.KernelTypes) + } + if kmodName != "" && opts.KernelModuleTypes != nil { + if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok { + targets = append(targets, modBTF) + } + } + + var b btf.Builder + if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) + } + + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos + } + + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) + if err != nil { + return nil, fmt.Errorf("marshal ext_infos: %w", err) + } + + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.NewSlicePointer(fib) + + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.NewSlicePointer(lib) + } + + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { + return nil, fmt.Errorf("load BTF: %w", err) + } + defer handle.Close() + + attr.ProgBtfFd = uint32(handle.FD()) + } + + kconfig, err := resolveKconfigReferences(insns) + if err != nil { + return nil, fmt.Errorf("resolve .kconfig: %w", err) + } + defer kconfig.Close() + + if err := fixupAndValidate(insns); err != nil { + return nil, err + } + + handles, err := fixupKfuncs(insns) + if err != nil { + return nil, fmt.Errorf("fixing up kfuncs: %w", err) + } + defer handles.Close() + + if len(handles) > 0 { + fdArray := handles.fdArray() + attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0])) + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err = insns.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) + + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + if module != nil { + attr.AttachBtfObjFd = uint32(module.FD()) + defer module.Close() + } + } + + // The caller requested a specific verifier log level. Set up the log buffer + // so that there is a chance of loading the program in a single shot. + var logBuf []byte + if !opts.LogDisabled && opts.LogLevel != 0 { + logBuf = make([]byte, minVerifierLogSize) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + for { + var fd *sys.FD + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + if opts.LogDisabled { + break + } + + if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Logging is enabled and the error is not ENOSPC, so we can infer + // that the log buffer is large enough. + break + } + + if attr.LogLevel == 0 { + // Logging is not enabled but loading the program failed. Enable + // basic logging. + attr.LogLevel = LogLevelBranch + } + + // Make an educated guess how large the buffer should be. Start + // at minVerifierLogSize and then double the size. + logSize := uint32(max(len(logBuf)*2, minVerifierLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.LogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.LogTrueSize + } + + logBuf = make([]byte, logSize) + attr.LogSize = logSize + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + end := bytes.IndexByte(logBuf, 0) + if end < 0 { + end = len(logBuf) + } + + tail := logBuf[max(end-256, 0):end] + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + + case errors.Is(err, unix.EINVAL): + if bytes.Contains(tail, coreBadCall) { + err = errBadRelocation + break + } else if bytes.Contains(tail, kfuncBadCall) { + err = errUnknownKfunc + break + } + + case errors.Is(err, unix.EACCES): + if bytes.Contains(tail, coreBadLoad) { + err = errBadRelocation + break + } + } + + // hasFunctionReferences may be expensive, so check it last. + if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && + hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } + + return nil, internal.ErrorWithLog("load program", err, logBuf) +} + +// NewProgramFromFD creates a program from a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.10. +func NewProgramFromFD(fd int) (*Program, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newProgramFromFD(f) +} + +// NewProgramFromID returns the program for a given id. +// +// Returns ErrNotExist, if there is no eBPF program with the given id. +func NewProgramFromID(id ProgramID) (*Program, error) { + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get program by id: %w", err) + } + + return newProgramFromFD(fd) +} + +func newProgramFromFD(fd *sys.FD) (*Program, error) { + info, err := newProgramInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("discover program type: %w", err) + } + + return &Program{"", fd, info.Name, "", info.Type}, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) + } + return fmt.Sprintf("%s(%v)", p.typ, p.fd) +} + +// Type returns the underlying type of the program. +func (p *Program) Type() ProgramType { + return p.typ +} + +// Info returns metadata about the program. +// +// Requires at least 4.10. +func (p *Program) Info() (*ProgramInfo, error) { + return newProgramInfoFromFd(p.fd) +} + +// Handle returns a reference to the program's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone program: %w", err) + } + + return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil +} + +// Pin persists the Program on the BPF virtual file system past the lifetime of +// the process that created it +// +// Calling Pin on a previously pinned program will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (p *Program) Pin(fileName string) error { + if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { + return err + } + p.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the Program from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Program returns nil. +func (p *Program) Unpin() error { + if err := internal.Unpin(p.pinnedPath); err != nil { + return err + } + p.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Program has a non-empty pinned path. +func (p *Program) IsPinned() bool { + return p.pinnedPath != "" +} + +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.Close() +} + +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + // + // The kernel expects at least 14 bytes input for an ethernet header for + // XDP and SKB programs. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Minimum number of times to run Program. Optional field. Defaults to 1. + // + // The program may be executed more often than this due to interruptions, e.g. + // when runtime.AllThreadsSyscall is invoked. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + // + // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead. + Reset func() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.run(&opts) + if err != nil { + return ret, nil, fmt.Errorf("test program: %w", err) + } + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + ret, _, err := p.run(opts) + if err != nil { + return ret, fmt.Errorf("run program: %w", err) + } + return ret, nil +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// Returns the result of the last execution of the program and the time per +// run or an error. reset is called whenever the benchmark syscall is +// interrupted, and should be set to testing.B.ResetTimer or similar. +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.run(&opts) + if err != nil { + return ret, total, fmt.Errorf("benchmark program: %w", err) + } + return ret, total, nil +} + +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error { + prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return err + } + defer prog.Close() + + in := internal.EmptyBPFContext + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), + } + + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): + // Check for EINVAL specifically, rather than err != nil since we + // otherwise misdetect due to insufficient permissions. + return internal.ErrNotSupported + + case errors.Is(err, unix.EINTR): + // We know that PROG_TEST_RUN is supported if we get EINTR. + return nil + + case errors.Is(err, sys.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil + } + + return err +}) + +func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") + } + + if err := haveProgRun(); err != nil { + return 0, 0, err + } + + var ctxBytes []byte + if opts.Context != nil { + ctx := new(bytes.Buffer) + if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + ctxBytes = ctx.Bytes() + } + + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) + } + + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.NewSlicePointer(opts.Data), + DataOut: sys.NewSlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.NewSlicePointer(ctxBytes), + CtxOut: sys.NewSlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, + } + +retry: + for { + err := sys.ProgRun(&attr) + if err == nil { + break retry + } + + if errors.Is(err, unix.EINTR) { + if attr.Repeat <= 1 { + // Older kernels check whether enough repetitions have been + // executed only after checking for pending signals. + // + // run signal? done? run ... + // + // As a result we can get EINTR for repeat==1 even though + // the program was run exactly once. Treat this as a + // successful run instead. + // + // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code") + // the conditions are reversed: + // run done? signal? ... + break retry + } + + if opts.Reset != nil { + opts.Reset() + } + continue retry + } + + if errors.Is(err, sys.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, err + } + + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] + } + + if len(ctxOut) != 0 { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil +} + +func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + return NewProgramFromID(ProgramID(id)) +} + +func marshalProgram(p *Program, length int) ([]byte, error) { + if length != 4 { + return nil, fmt.Errorf("can't marshal program to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) + return buf, nil +} + +// LoadPinnedProgram loads a Program from a BPF file. +// +// Requires at least Linux 4.11. +func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { + fd, err := sys.ObjGet(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + info, err := newProgramInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("info for %s: %w", fileName, err) + } + + var progName string + if haveObjName() == nil { + progName = info.Name + } else { + progName = filepath.Base(fileName) + } + + return &Program{"", fd, progName, fileName, info.Type}, nil +} + +// SanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. Use this to automatically generate valid names for maps +// and programs at runtime. +// +// The set of allowed characters depends on the running kernel version. +// Dots are only allowed as of kernel 5.2. +func SanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + if invalidBPFObjNameChar(char) { + return replacement + } + return char + }, name) +} + +// ProgramGetNextID returns the ID of the next eBPF program. +// +// Returns ErrNotExist, if there is no next eBPF program. +func ProgramGetNextID(startID ProgramID) (ProgramID, error) { + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) +} + +// BindMap binds map to the program and is only released once program is released. +// +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), + } + + return sys.ProgBindMap(attr) +} + +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// name, progType and attachType determine which type we need to attach to. +// +// The attach target may be in a loaded kernel module. +// In that case the returned handle will be non-nil. +// The caller is responsible for closing the handle. +// +// Returns errUnrecognizedAttachType if the combination of progType and attachType +// is not recognised. +func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var ( + typeName, featureName string + target btf.Type + ) + + switch (match{progType, attachType}) { + case match{LSM, AttachLSMMac}: + typeName = "bpf_lsm_" + name + featureName = name + " LSM hook" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceIter}: + typeName = "bpf_iter_" + name + featureName = name + " iterator" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + target = (*btf.Typedef)(nil) + default: + return nil, 0, errUnrecognizedAttachType + } + + spec, err := btf.LoadKernelSpec() + if err != nil { + return nil, 0, fmt.Errorf("load kernel spec: %w", err) + } + + spec, module, err := findTargetInKernel(spec, typeName, &target) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, &internal.UnsupportedFeatureError{Name: featureName} + } + // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel + // symbols, we should explicitly refuse program loads. They will not reliably + // do what the caller intended. + if errors.Is(err, btf.ErrMultipleMatches) { + return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err) + } + if err != nil { + return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err) + } + + id, err := spec.TypeID(target) + if err != nil { + module.Close() + return nil, 0, err + } + + return module, id, nil +} + +// findTargetInKernel attempts to find a named type in the current kernel. +// +// target will point at the found type after a successful call. Searches both +// vmlinux and any loaded modules. +// +// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound +// if the type wasn't found at all. +func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + err := kernelSpec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + spec, module, err := findTargetInModule(kernelSpec, typeName, target) + if err != nil { + return nil, nil, fmt.Errorf("find target in modules: %w", err) + } + return spec, module, nil + } + if err != nil { + return nil, nil, fmt.Errorf("find target in vmlinux: %w", err) + } + return kernelSpec, nil, err +} + +// findTargetInModule attempts to find a named type in any loaded module. +// +// base must contain the kernel's types and is used to parse kmod BTF. Modules +// are searched in the order they were loaded. +// +// Returns btf.ErrNotFound if the target can't be found in any module. +func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + it := new(btf.HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err) + } + + if !info.IsModule() { + continue + } + + spec, err := it.Handle.Spec(base) + if err != nil { + return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err) + } + + err = spec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + continue + } + if err != nil { + return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err) + } + + return spec, it.Take(), nil + } + if err := it.Err(); err != nil { + return nil, nil, fmt.Errorf("iterate modules: %w", err) + } + + return nil, nil, btf.ErrNotFound +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}, + match{Tracing, AttachTraceFEntry}, + match{Tracing, AttachTraceFExit}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) + } + + return spec.TypeID(targetFunc) +} diff --git a/vendor/github.com/cilium/ebpf/ringbuf/doc.go b/vendor/github.com/cilium/ebpf/ringbuf/doc.go new file mode 100644 index 000000000..9e4501218 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/ringbuf/doc.go @@ -0,0 +1,6 @@ +// Package ringbuf allows interacting with Linux BPF ring buffer. +// +// BPF allows submitting custom events to a BPF ring buffer map set up +// by userspace. This is very useful to push things like packet samples +// from BPF to a daemon running in user space. +package ringbuf diff --git a/vendor/github.com/cilium/ebpf/ringbuf/reader.go b/vendor/github.com/cilium/ebpf/ringbuf/reader.go new file mode 100644 index 000000000..3d3ba0ecf --- /dev/null +++ b/vendor/github.com/cilium/ebpf/ringbuf/reader.go @@ -0,0 +1,197 @@ +package ringbuf + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/epoll" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrClosed = os.ErrClosed + ErrFlushed = epoll.ErrFlushed + errEOR = errors.New("end of ring") + errBusy = errors.New("sample not committed yet") +) + +// ringbufHeader from 'struct bpf_ringbuf_hdr' in kernel/bpf/ringbuf.c +type ringbufHeader struct { + Len uint32 + _ uint32 // pg_off, only used by kernel internals +} + +func (rh *ringbufHeader) isBusy() bool { + return rh.Len&unix.BPF_RINGBUF_BUSY_BIT != 0 +} + +func (rh *ringbufHeader) isDiscard() bool { + return rh.Len&unix.BPF_RINGBUF_DISCARD_BIT != 0 +} + +func (rh *ringbufHeader) dataLen() int { + return int(rh.Len & ^uint32(unix.BPF_RINGBUF_BUSY_BIT|unix.BPF_RINGBUF_DISCARD_BIT)) +} + +type Record struct { + RawSample []byte + + // The minimum number of bytes remaining in the ring buffer after this Record has been read. + Remaining int +} + +// Reader allows reading bpf_ringbuf_output +// from user space. +type Reader struct { + poller *epoll.Poller + + // mu protects read/write access to the Reader structure + mu sync.Mutex + ring *ringbufEventRing + epollEvents []unix.EpollEvent + haveData bool + deadline time.Time + bufferSize int + pendingErr error +} + +// NewReader creates a new BPF ringbuf reader. +func NewReader(ringbufMap *ebpf.Map) (*Reader, error) { + if ringbufMap.Type() != ebpf.RingBuf { + return nil, fmt.Errorf("invalid Map type: %s", ringbufMap.Type()) + } + + maxEntries := int(ringbufMap.MaxEntries()) + if maxEntries == 0 || (maxEntries&(maxEntries-1)) != 0 { + return nil, fmt.Errorf("ringbuffer map size %d is zero or not a power of two", maxEntries) + } + + poller, err := epoll.New() + if err != nil { + return nil, err + } + + if err := poller.Add(ringbufMap.FD(), 0); err != nil { + poller.Close() + return nil, err + } + + ring, err := newRingBufEventRing(ringbufMap.FD(), maxEntries) + if err != nil { + poller.Close() + return nil, fmt.Errorf("failed to create ringbuf ring: %w", err) + } + + return &Reader{ + poller: poller, + ring: ring, + epollEvents: make([]unix.EpollEvent, 1), + bufferSize: ring.size(), + }, nil +} + +// Close frees resources used by the reader. +// +// It interrupts calls to Read. +func (r *Reader) Close() error { + if err := r.poller.Close(); err != nil { + if errors.Is(err, os.ErrClosed) { + return nil + } + return err + } + + // Acquire the lock. This ensures that Read isn't running. + r.mu.Lock() + defer r.mu.Unlock() + + if r.ring != nil { + r.ring.Close() + r.ring = nil + } + + return nil +} + +// SetDeadline controls how long Read and ReadInto will block waiting for samples. +// +// Passing a zero time.Time will remove the deadline. +func (r *Reader) SetDeadline(t time.Time) { + r.mu.Lock() + defer r.mu.Unlock() + + r.deadline = t +} + +// Read the next record from the BPF ringbuf. +// +// Calling [Close] interrupts the method with [os.ErrClosed]. Calling [Flush] +// makes it return all records currently in the ring buffer, followed by [ErrFlushed]. +// +// Returns [os.ErrDeadlineExceeded] if a deadline was set and after all records +// have been read from the ring. +// +// See [ReadInto] for a more efficient version of this method. +func (r *Reader) Read() (Record, error) { + var rec Record + return rec, r.ReadInto(&rec) +} + +// ReadInto is like Read except that it allows reusing Record and associated buffers. +func (r *Reader) ReadInto(rec *Record) error { + r.mu.Lock() + defer r.mu.Unlock() + + if r.ring == nil { + return fmt.Errorf("ringbuffer: %w", ErrClosed) + } + + for { + if !r.haveData { + if pe := r.pendingErr; pe != nil { + r.pendingErr = nil + return pe + } + + _, err := r.poller.Wait(r.epollEvents[:cap(r.epollEvents)], r.deadline) + if errors.Is(err, os.ErrDeadlineExceeded) || errors.Is(err, ErrFlushed) { + // Ignoring this for reading a valid entry after timeout or flush. + // This can occur if the producer submitted to the ring buffer + // with BPF_RB_NO_WAKEUP. + r.pendingErr = err + } else if err != nil { + return err + } + r.haveData = true + } + + for { + err := r.ring.readRecord(rec) + // Not using errors.Is which is quite a bit slower + // For a tight loop it might make a difference + if err == errBusy { + continue + } + if err == errEOR { + r.haveData = false + break + } + return err + } + } +} + +// BufferSize returns the size in bytes of the ring buffer +func (r *Reader) BufferSize() int { + return r.bufferSize +} + +// Flush unblocks Read/ReadInto and successive Read/ReadInto calls will return pending samples at this point, +// until you receive a ErrFlushed error. +func (r *Reader) Flush() error { + return r.poller.Flush() +} diff --git a/vendor/github.com/cilium/ebpf/ringbuf/ring.go b/vendor/github.com/cilium/ebpf/ringbuf/ring.go new file mode 100644 index 000000000..8f8f4bce3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/ringbuf/ring.go @@ -0,0 +1,137 @@ +package ringbuf + +import ( + "fmt" + "io" + "os" + "runtime" + "sync/atomic" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +type ringbufEventRing struct { + prod []byte + cons []byte + *ringReader +} + +func newRingBufEventRing(mapFD, size int) (*ringbufEventRing, error) { + cons, err := unix.Mmap(mapFD, 0, os.Getpagesize(), unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + if err != nil { + return nil, fmt.Errorf("can't mmap consumer page: %w", err) + } + + prod, err := unix.Mmap(mapFD, (int64)(os.Getpagesize()), os.Getpagesize()+2*size, unix.PROT_READ, unix.MAP_SHARED) + if err != nil { + _ = unix.Munmap(cons) + return nil, fmt.Errorf("can't mmap data pages: %w", err) + } + + cons_pos := (*uint64)(unsafe.Pointer(&cons[0])) + prod_pos := (*uint64)(unsafe.Pointer(&prod[0])) + + ring := &ringbufEventRing{ + prod: prod, + cons: cons, + ringReader: newRingReader(cons_pos, prod_pos, prod[os.Getpagesize():]), + } + runtime.SetFinalizer(ring, (*ringbufEventRing).Close) + + return ring, nil +} + +func (ring *ringbufEventRing) Close() { + runtime.SetFinalizer(ring, nil) + + _ = unix.Munmap(ring.prod) + _ = unix.Munmap(ring.cons) + + ring.prod = nil + ring.cons = nil +} + +type ringReader struct { + // These point into mmap'ed memory and must be accessed atomically. + prod_pos, cons_pos *uint64 + mask uint64 + ring []byte +} + +func newRingReader(cons_ptr, prod_ptr *uint64, ring []byte) *ringReader { + return &ringReader{ + prod_pos: prod_ptr, + cons_pos: cons_ptr, + // cap is always a power of two + mask: uint64(cap(ring)/2 - 1), + ring: ring, + } +} + +// To be able to wrap around data, data pages in ring buffers are mapped twice in +// a single contiguous virtual region. +// Therefore the returned usable size is half the size of the mmaped region. +func (rr *ringReader) size() int { + return cap(rr.ring) / 2 +} + +// Read a record from an event ring. +func (rr *ringReader) readRecord(rec *Record) error { + prod := atomic.LoadUint64(rr.prod_pos) + cons := atomic.LoadUint64(rr.cons_pos) + + for { + if remaining := prod - cons; remaining == 0 { + return errEOR + } else if remaining < unix.BPF_RINGBUF_HDR_SZ { + return fmt.Errorf("read record header: %w", io.ErrUnexpectedEOF) + } + + // read the len field of the header atomically to ensure a happens before + // relationship with the xchg in the kernel. Without this we may see len + // without BPF_RINGBUF_BUSY_BIT before the written data is visible. + // See https://github.com/torvalds/linux/blob/v6.8/kernel/bpf/ringbuf.c#L484 + start := cons & rr.mask + len := atomic.LoadUint32((*uint32)((unsafe.Pointer)(&rr.ring[start]))) + header := ringbufHeader{Len: len} + + if header.isBusy() { + // the next sample in the ring is not committed yet so we + // exit without storing the reader/consumer position + // and start again from the same position. + return errBusy + } + + cons += unix.BPF_RINGBUF_HDR_SZ + + // Data is always padded to 8 byte alignment. + dataLenAligned := uint64(internal.Align(header.dataLen(), 8)) + if remaining := prod - cons; remaining < dataLenAligned { + return fmt.Errorf("read sample data: %w", io.ErrUnexpectedEOF) + } + + start = cons & rr.mask + cons += dataLenAligned + + if header.isDiscard() { + // when the record header indicates that the data should be + // discarded, we skip it by just updating the consumer position + // to the next record. + atomic.StoreUint64(rr.cons_pos, cons) + continue + } + + if n := header.dataLen(); cap(rec.RawSample) < n { + rec.RawSample = make([]byte, n) + } else { + rec.RawSample = rec.RawSample[:n] + } + + copy(rec.RawSample, rr.ring[start:]) + rec.Remaining = int(prod - cons) + atomic.StoreUint64(rr.cons_pos, cons) + return nil + } +} diff --git a/vendor/github.com/cilium/ebpf/rlimit/rlimit.go b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go new file mode 100644 index 000000000..2a6973744 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go @@ -0,0 +1,123 @@ +// Package rlimit allows raising RLIMIT_MEMLOCK if necessary for the use of BPF. +package rlimit + +import ( + "errors" + "fmt" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + unsupportedMemcgAccounting = &internal.UnsupportedFeatureError{ + MinimumVersion: internal.Version{5, 11, 0}, + Name: "memcg-based accounting for BPF memory", + } + haveMemcgAccounting error + + rlimitMu sync.Mutex +) + +func init() { + // We have to run this feature test at init, since it relies on changing + // RLIMIT_MEMLOCK. Doing so is not safe in a concurrent program. Instead, + // we rely on the initialization order guaranteed by the Go runtime to + // execute the test in a safe environment: + // + // the invocation of init functions happens in a single goroutine, + // sequentially, one package at a time. + // + // This is also the reason why RemoveMemlock is in its own package: + // we only want to run the initializer if RemoveMemlock is called + // from somewhere. + haveMemcgAccounting = detectMemcgAccounting() +} + +func detectMemcgAccounting() error { + // Retrieve the original limit to prevent lowering Max, since + // doing so is a permanent operation when running unprivileged. + var oldLimit unix.Rlimit + if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, nil, &oldLimit); err != nil { + return fmt.Errorf("getting original memlock rlimit: %s", err) + } + + // Drop the current limit to zero, maintaining the old Max value. + // This is always permitted by the kernel for unprivileged users. + // Retrieve a new copy of the old limit tuple to minimize the chances + // of failing the restore operation below. + zeroLimit := unix.Rlimit{Cur: 0, Max: oldLimit.Max} + if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &zeroLimit, &oldLimit); err != nil { + return fmt.Errorf("lowering memlock rlimit: %s", err) + } + + attr := sys.MapCreateAttr{ + MapType: 2, /* Array */ + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + } + + // Creating a map allocates shared (and locked) memory that counts against + // the rlimit on pre-5.11 kernels, but against the memory cgroup budget on + // kernels 5.11 and over. If this call succeeds with the process' memlock + // rlimit set to 0, we can reasonably assume memcg accounting is supported. + fd, mapErr := sys.MapCreate(&attr) + + // Restore old limits regardless of what happened. + if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &oldLimit, nil); err != nil { + return fmt.Errorf("restoring old memlock rlimit: %s", err) + } + + // Map creation successful, memcg accounting supported. + if mapErr == nil { + fd.Close() + return nil + } + + // EPERM shows up when map creation would exceed the memory budget. + if errors.Is(mapErr, unix.EPERM) { + return unsupportedMemcgAccounting + } + + // This shouldn't happen really. + return fmt.Errorf("unexpected error detecting memory cgroup accounting: %s", mapErr) +} + +// RemoveMemlock removes the limit on the amount of memory the current +// process can lock into RAM, if necessary. +// +// This is not required to load eBPF resources on kernel versions 5.11+ +// due to the introduction of cgroup-based memory accounting. On such kernels +// the function is a no-op. +// +// Since the function may change global per-process limits it should be invoked +// at program start up, in main() or init(). +// +// This function exists as a convenience and should only be used when +// permanently raising RLIMIT_MEMLOCK to infinite is appropriate. Consider +// invoking prlimit(2) directly with a more reasonable limit if desired. +// +// Requires CAP_SYS_RESOURCE on kernels < 5.11. +func RemoveMemlock() error { + if haveMemcgAccounting == nil { + return nil + } + + if !errors.Is(haveMemcgAccounting, unsupportedMemcgAccounting) { + return haveMemcgAccounting + } + + rlimitMu.Lock() + defer rlimitMu.Unlock() + + // pid 0 affects the current process. Requires CAP_SYS_RESOURCE. + newLimit := unix.Rlimit{Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY} + if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &newLimit, nil); err != nil { + return fmt.Errorf("failed to set memlock rlimit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 000000000..4aef7faeb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,337 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + "runtime" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + // pre-allocating these here since they may + // get called in hot code paths and cause + // unnecessary memory allocations + sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT) + sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST) + sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) +) + +// invalidBPFObjNameChar returns true if char may not appear in +// a BPF object name. +func invalidBPFObjNameChar(char rune) bool { + dotAllowed := objNameAllowsDot() == nil + + switch { + case char >= 'A' && char <= 'Z': + return false + case char >= 'a' && char <= 'z': + return false + case char >= '0' && char <= '9': + return false + case dotAllowed && char == '.': + return false + case char == '_': + return false + default: + return true + } +} + +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err + } + bytecode := buf.Bytes() + + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) +} + +var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error { + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + // Invalid file descriptor. + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}) + +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error { + // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since + // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_RDONLY_PROG, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error { + // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_MMAPABLE, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error { + // This checks BPF_F_INNER_MAP, which appeared in 5.10. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_INNER_MAP, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: unix.BPF_F_NO_PREALLOC, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}) + +func wrapMapError(err error) error { + if err == nil { + return nil + } + + if errors.Is(err, unix.ENOENT) { + return sysErrKeyNotExist + } + + if errors.Is(err, unix.EEXIST) { + return sysErrKeyExist + } + + if errors.Is(err, sys.ENOTSUPP) { + return sysErrNotSupported + } + + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + + return err +} + +var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error { + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName("feature_test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error { + if err := haveObjName(); err != nil { + return err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName(".test"), + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + + _ = fd.Close() + return nil +}) + +var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error { + var maxEntries uint32 = 2 + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: maxEntries, + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + defer fd.Close() + + keys := []uint32{1, 2} + values := []uint32{3, 4} + kp, _ := marshalMapSyscallInput(keys, 8) + vp, _ := marshalMapSyscallInput(values, 8) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) + if err != nil { + return internal.ErrNotSupported + } + return nil +}) + +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) + +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}) + +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error { + prefix := internal.PlatformPrefix() + if prefix == "" { + return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Symbol: prefix + "sys_bpf", + Pid: -1, + } + + var err error + args.Group, err = tracefs.RandomGroup("ebpf_probe") + if err != nil { + return err + } + + evt, err := tracefs.NewEvent(args) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + return evt.Close() +}) + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", "5.0", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, + }) + + if errors.Is(err, unix.EBADF) { + return nil + } + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}) diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 000000000..542c2397c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,299 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = iota + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than throw an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps + // DevMap - Specialized map to store references to network devices. + DevMap + // SockMap - Specialized map to store references to sockets. + SockMap + // CPUMap - Specialized map to store references to CPUs. + CPUMap + // XSKMap - Specialized map for XDP programs to store references to open sockets. + XSKMap + // SockHash - Specialized hash to store references to sockets. + SockHash + // CGroupStorage - Special map for CGroups. + CGroupStorage + // ReusePortSockArray - Specialized map to store references to sockets that can be reused. + ReusePortSockArray + // PerCPUCGroupStorage - Special per CPU map for CGroups. + PerCPUCGroupStorage + // Queue - FIFO storage for BPF programs. + Queue + // Stack - LIFO storage for BPF programs. + Stack + // SkStorage - Specialized map for local storage at SK for BPF programs. + SkStorage + // DevMapHash - Hash-based indexing scheme for references to network devices. + DevMapHash + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. + RingBuf + // InodeStorage - Specialized local storage map for inodes. + InodeStorage + // TaskStorage - Specialized local storage map for task_struct. + TaskStorage +) + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage +} + +// canStoreMapOrProgram returns true if the Map stores references to another Map +// or Program. +func (mt MapType) canStoreMapOrProgram() bool { + return mt.canStoreMap() || mt.canStoreProgram() +} + +// canStoreMap returns true if the map type accepts a map fd +// for update and returns a map id for lookup. +func (mt MapType) canStoreMap() bool { + return mt == ArrayOfMaps || mt == HashOfMaps +} + +// canStoreProgram returns true if the map type accepts a program fd +// for update and returns a program id for lookup. +func (mt MapType) canStoreProgram() bool { + return mt == ProgramArray +} + +// ProgramType of the eBPF program +type ProgramType uint32 + +// eBPF program types +const ( + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) +) + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type AttachType -trimprefix Attach + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. +const AttachNone AttachType = 0 + +const ( + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) + AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT) + AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG) + AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG) + AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME) + AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME) + AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY) + AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) +) + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 + +// PinType determines whether a map is pinned into a BPFFS. +type PinType uint32 + +// Valid pin types. +// +// Mirrors enum libbpf_pin_type. +const ( + PinNone PinType = iota + // Pin an object by using its name as the filename. + PinByName +) + +// LoadPinOptions control how a pinned object is loaded. +type LoadPinOptions struct { + // Request a read-only or write-only object. The default is a read-write + // object. Only one of the flags may be set. + ReadOnly bool + WriteOnly bool + + // Raw flags for the syscall. Other fields of this struct take precedence. + Flags uint32 +} + +// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. +func (lpo *LoadPinOptions) Marshal() uint32 { + if lpo == nil { + return 0 + } + + flags := lpo.Flags + if lpo.ReadOnly { + flags |= unix.BPF_F_RDONLY + } + if lpo.WriteOnly { + flags |= unix.BPF_F_WRONLY + } + return flags +} + +// BatchOptions batch map operations options +// +// Mirrors libbpf struct bpf_map_batch_opts +// Currently BPF_F_FLAG is the only supported +// flag (for ElemFlags). +type BatchOptions struct { + ElemFlags uint64 + Flags uint64 +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +// These constants can be used for the ProgramOptions.LogLevel field. +type LogLevel = sys.LogLevel + +const ( + // Print verifier state at branch points. + LogLevelBranch = sys.BPF_LOG_LEVEL1 + + // Print verifier state for every instruction. + // Available since Linux v5.2. + LogLevelInstruction = sys.BPF_LOG_LEVEL2 + + // Print verifier errors and stats at the end of the verification process. + // Available since Linux v5.2. + LogLevelStats = sys.BPF_LOG_STATS +) diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 000000000..ee60b5be5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,119 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] + _ = x[DevMap-14] + _ = x[SockMap-15] + _ = x[CPUMap-16] + _ = x[XSKMap-17] + _ = x[SockHash-18] + _ = x[CGroupStorage-19] + _ = x[ReusePortSockArray-20] + _ = x[PerCPUCGroupStorage-21] + _ = x[Queue-22] + _ = x[Stack-23] + _ = x[SkStorage-24] + _ = x[DevMapHash-25] + _ = x[StructOpsMap-26] + _ = x[RingBuf-27] + _ = x[InodeStorage-28] + _ = x[TaskStorage-29] +} + +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage" + +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290} + +func (i MapType) String() string { + if i >= MapType(len(_MapType_index)-1) { + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] + _ = x[Tracing-26] + _ = x[StructOps-27] + _ = x[Extension-28] + _ = x[LSM-29] + _ = x[SkLookup-30] + _ = x[Syscall-31] + _ = x[Netfilter-32] +} + +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + +func (i ProgramType) String() string { + if i >= ProgramType(len(_ProgramType_index)-1) { + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PinNone-0] + _ = x[PinByName-1] +} + +const _PinType_name = "PinNonePinByName" + +var _PinType_index = [...]uint8{0, 7, 16} + +func (i PinType) String() string { + if i >= PinType(len(_PinType_index)-1) { + return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 000000000..0d82a2dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 000000000..3cd6a59d1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,270 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containernetworking/cni/pkg/types" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 000000000..3cdb4bc8d --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 000000000..8defe4dd3 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 000000000..3ad07aa8f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,187 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 000000000..e62029eb7 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 000000000..9bcfb4553 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 000000000..7665125b1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 000000000..5ab5cc885 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 000000000..99b151ff2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 000000000..3633b0eaa --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 000000000..0e1e8b857 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 000000000..7516f03ef --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 000000000..ed28b33e8 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 000000000..bdbe4b0a5 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 000000000..963630912 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 000000000..fba17dfc0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 000000000..b8ec38874 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 000000000..808c33b83 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 000000000..17b22b6b0 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 000000000..25c3810b2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 000000000..1326f8038 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/LICENSE b/vendor/github.com/containernetworking/plugins/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go new file mode 100644 index 000000000..b4db50b9a --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/addr_linux.go @@ -0,0 +1,68 @@ +// Copyright 2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "syscall" + "time" + + "github.com/vishvananda/netlink" +) + +const SETTLE_INTERVAL = 50 * time.Millisecond + +// SettleAddresses waits for all addresses on a link to leave tentative state. +// This is particularly useful for ipv6, where all addresses need to do DAD. +// There is no easy way to wait for this as an event, so just loop until the +// addresses are no longer tentative. +// If any addresses are still tentative after timeout seconds, then error. +func SettleAddresses(ifName string, timeout int) error { + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("failed to retrieve link: %v", err) + } + + deadline := time.Now().Add(time.Duration(timeout) * time.Second) + for { + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("could not list addresses: %v", err) + } + + if len(addrs) == 0 { + return nil + } + + ok := true + for _, addr := range addrs { + if addr.Flags&(syscall.IFA_F_TENTATIVE|syscall.IFA_F_DADFAILED) > 0 { + ok = false + break // Break out of the `range addrs`, not the `for` + } + } + + if ok { + return nil + } + if time.Now().After(deadline) { + return fmt.Errorf("link %s still has tentative addresses after %d seconds", + ifName, + timeout) + } + + time.Sleep(SETTLE_INTERVAL) + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go new file mode 100644 index 000000000..8b380fc74 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/cidr.go @@ -0,0 +1,105 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "math/big" + "net" +) + +// NextIP returns IP incremented by 1, if IP is invalid, return nil +func NextIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Add(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// PrevIP returns IP decremented by 1, if IP is invalid, return nil +func PrevIP(ip net.IP) net.IP { + normalizedIP := normalizeIP(ip) + if normalizedIP == nil { + return nil + } + + i := ipToInt(normalizedIP) + return intToIP(i.Sub(i, big.NewInt(1)), len(normalizedIP) == net.IPv6len) +} + +// Cmp compares two IPs, returning the usual ordering: +// a < b : -1 +// a == b : 0 +// a > b : 1 +// incomparable : -2 +func Cmp(a, b net.IP) int { + normalizedA := normalizeIP(a) + normalizedB := normalizeIP(b) + + if len(normalizedA) == len(normalizedB) && len(normalizedA) != 0 { + return ipToInt(normalizedA).Cmp(ipToInt(normalizedB)) + } + + return -2 +} + +func ipToInt(ip net.IP) *big.Int { + return big.NewInt(0).SetBytes(ip) +} + +func intToIP(i *big.Int, isIPv6 bool) net.IP { + intBytes := i.Bytes() + + if len(intBytes) == net.IPv4len || len(intBytes) == net.IPv6len { + return intBytes + } + + if isIPv6 { + return append(make([]byte, net.IPv6len-len(intBytes)), intBytes...) + } + + return append(make([]byte, net.IPv4len-len(intBytes)), intBytes...) +} + +// normalizeIP will normalize IP by family, +// IPv4 : 4-byte form +// IPv6 : 16-byte form +// others : nil +func normalizeIP(ip net.IP) net.IP { + if ipTo4 := ip.To4(); ipTo4 != nil { + return ipTo4 + } + return ip.To16() +} + +// Network masks off the host portion of the IP, if IPNet is invalid, +// return nil +func Network(ipn *net.IPNet) *net.IPNet { + if ipn == nil { + return nil + } + + maskedIP := ipn.IP.Mask(ipn.Mask) + if maskedIP == nil { + return nil + } + + return &net.IPNet{ + IP: maskedIP, + Mask: ipn.Mask, + } +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go new file mode 100644 index 000000000..4469e1b5d --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ip.go @@ -0,0 +1,105 @@ +// Copyright 2021 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + "strings" +) + +// IP is a CNI maintained type inherited from net.IPNet which can +// represent a single IP address with or without prefix. +type IP struct { + net.IPNet +} + +// newIP will create an IP with net.IP and net.IPMask +func newIP(ip net.IP, mask net.IPMask) *IP { + return &IP{ + IPNet: net.IPNet{ + IP: ip, + Mask: mask, + }, + } +} + +// ParseIP will parse string s as an IP, and return it. +// The string s must be formed like [/]. +// If s is not a valid textual representation of an IP, +// will return nil. +func ParseIP(s string) *IP { + if strings.ContainsAny(s, "/") { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil + } + return newIP(ip, ipNet.Mask) + } else { + ip := net.ParseIP(s) + if ip == nil { + return nil + } + return newIP(ip, nil) + } +} + +// ToIP will return a net.IP in standard form from this IP. +// If this IP can not be converted to a valid net.IP, will return nil. +func (i *IP) ToIP() net.IP { + switch { + case i.IP.To4() != nil: + return i.IP.To4() + case i.IP.To16() != nil: + return i.IP.To16() + default: + return nil + } +} + +// String returns the string form of this IP. +func (i *IP) String() string { + if len(i.Mask) > 0 { + return i.IPNet.String() + } + return i.IP.String() +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String, +// But when len(ip) is zero, will return an empty slice. +func (i *IP) MarshalText() ([]byte, error) { + if len(i.IP) == 0 { + return []byte{}, nil + } + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The textual bytes are expected in a form accepted by Parse, +// But when len(b) is zero, will return an empty IP. +func (i *IP) UnmarshalText(b []byte) error { + if len(b) == 0 { + *i = IP{} + return nil + } + + ip := ParseIP(string(b)) + if ip == nil { + return fmt.Errorf("invalid IP address %s", string(b)) + } + *i = *ip + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go new file mode 100644 index 000000000..0e8b6b691 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipforward_linux.go @@ -0,0 +1,62 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "bytes" + "os" + + current "github.com/containernetworking/cni/pkg/types/100" +) + +func EnableIP4Forward() error { + return echo1("/proc/sys/net/ipv4/ip_forward") +} + +func EnableIP6Forward() error { + return echo1("/proc/sys/net/ipv6/conf/all/forwarding") +} + +// EnableForward will enable forwarding for all configured +// address families +func EnableForward(ips []*current.IPConfig) error { + v4 := false + v6 := false + + for _, ip := range ips { + isV4 := ip.Address.IP.To4() != nil + if isV4 && !v4 { + if err := EnableIP4Forward(); err != nil { + return err + } + v4 = true + } else if !isV4 && !v6 { + if err := EnableIP6Forward(); err != nil { + return err + } + v6 = true + } + } + return nil +} + +func echo1(f string) error { + if content, err := os.ReadFile(f); err == nil { + if bytes.Equal(bytes.TrimSpace(content), []byte("1")) { + return nil + } + } + return os.WriteFile(f, []byte("1"), 0644) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go new file mode 100644 index 000000000..cc640a605 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/ipmasq_linux.go @@ -0,0 +1,126 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/coreos/go-iptables/iptables" +) + +// SetupIPMasq installs iptables rules to masquerade traffic +// coming from ip of ipn and going outside of ipn +func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + var multicastNet string + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + multicastNet = "ff00::/8" + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + multicastNet = "224.0.0.0/4" + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + // Create chain if doesn't exist + exists := false + chains, err := ipt.ListChains("nat") + if err != nil { + return fmt.Errorf("failed to list chains: %v", err) + } + for _, ch := range chains { + if ch == chain { + exists = true + break + } + } + if !exists { + if err = ipt.NewChain("nat", chain); err != nil { + return err + } + } + + // Packets to this network should not be touched + if err := ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Don't masquerade multicast - pods should be able to talk to other pods + // on the local network via multicast. + if err := ipt.AppendUnique("nat", chain, "!", "-d", multicastNet, "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil { + return err + } + + // Packets from the specific IP of this network will hit the chain + return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) +} + +// TeardownIPMasq undoes the effects of SetupIPMasq +func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error { + isV6 := ipn.IP.To4() == nil + + var ipt *iptables.IPTables + var err error + + if isV6 { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + } else { + ipt, err = iptables.NewWithProtocol(iptables.ProtocolIPv4) + } + if err != nil { + return fmt.Errorf("failed to locate iptables: %v", err) + } + + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.IP.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + // for downward compatibility + err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment) + if err != nil && !isNotExist(err) { + return err + } + + err = ipt.ClearChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + + } + + err = ipt.DeleteChain("nat", chain) + if err != nil && !isNotExist(err) { + return err + } + + return nil +} + +// isNotExist returnst true if the error is from iptables indicating +// that the target does not exist. +func isNotExist(err error) bool { + e, ok := err.(*iptables.Error) + if !ok { + return false + } + return e.IsNotExist() +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go new file mode 100644 index 000000000..91f931b57 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/link_linux.go @@ -0,0 +1,261 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "crypto/rand" + "errors" + "fmt" + "net" + "os" + + "github.com/safchain/ethtool" + "github.com/vishvananda/netlink" + + "github.com/containernetworking/plugins/pkg/ns" + "github.com/containernetworking/plugins/pkg/utils/sysctl" +) + +var ( + ErrLinkNotFound = errors.New("link not found") +) + +// makeVethPair is called from within the container's network namespace +func makeVethPair(name, peer string, mtu int, mac string, hostNS ns.NetNS) (netlink.Link, error) { + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: name, + MTU: mtu, + }, + PeerName: peer, + PeerNamespace: netlink.NsFd(int(hostNS.Fd())), + } + if mac != "" { + m, err := net.ParseMAC(mac) + if err != nil { + return nil, err + } + veth.LinkAttrs.HardwareAddr = m + } + if err := netlink.LinkAdd(veth); err != nil { + return nil, err + } + // Re-fetch the container link to get its creation-time parameters, e.g. index and mac + veth2, err := netlink.LinkByName(name) + if err != nil { + netlink.LinkDel(veth) // try and clean up the link if possible. + return nil, err + } + + return veth2, nil +} + +func peerExists(name string) bool { + if _, err := netlink.LinkByName(name); err != nil { + return false + } + return true +} + +func makeVeth(name, vethPeerName string, mtu int, mac string, hostNS ns.NetNS) (peerName string, veth netlink.Link, err error) { + for i := 0; i < 10; i++ { + if vethPeerName != "" { + peerName = vethPeerName + } else { + peerName, err = RandomVethName() + if err != nil { + return + } + } + + veth, err = makeVethPair(name, peerName, mtu, mac, hostNS) + switch { + case err == nil: + return + + case os.IsExist(err): + if peerExists(peerName) && vethPeerName == "" { + continue + } + err = fmt.Errorf("container veth name provided (%v) already exists", name) + return + + default: + err = fmt.Errorf("failed to make veth pair: %v", err) + return + } + } + + // should really never be hit + err = fmt.Errorf("failed to find a unique veth name") + return +} + +// RandomVethName returns string "veth" with random prefix (hashed from entropy) +func RandomVethName() (string, error) { + entropy := make([]byte, 4) + _, err := rand.Read(entropy) + if err != nil { + return "", fmt.Errorf("failed to generate random veth name: %v", err) + } + + // NetworkManager (recent versions) will ignore veth devices that start with "veth" + return fmt.Sprintf("veth%x", entropy), nil +} + +func RenameLink(curName, newName string) error { + link, err := netlink.LinkByName(curName) + if err == nil { + err = netlink.LinkSetName(link, newName) + } + return err +} + +func ifaceFromNetlinkLink(l netlink.Link) net.Interface { + a := l.Attrs() + return net.Interface{ + Index: a.Index, + MTU: a.MTU, + Name: a.Name, + HardwareAddr: a.HardwareAddr, + Flags: a.Flags, + } +} + +// SetupVethWithName sets up a pair of virtual ethernet devices. +// Call SetupVethWithName from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// hostVethName: If hostVethName is not specified, the host-side veth name will use a random string. +// On success, SetupVethWithName returns (hostVeth, containerVeth, nil) +func SetupVethWithName(contVethName, hostVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + hostVethName, contVeth, err := makeVeth(contVethName, hostVethName, mtu, contVethMac, hostNS) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + + var hostVeth netlink.Link + err = hostNS.Do(func(_ ns.NetNS) error { + hostVeth, err = netlink.LinkByName(hostVethName) + if err != nil { + return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err) + } + + if err = netlink.LinkSetUp(hostVeth); err != nil { + return fmt.Errorf("failed to set %q up: %v", hostVethName, err) + } + + // we want to own the routes for this interface + _, _ = sysctl.Sysctl(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0") + return nil + }) + if err != nil { + return net.Interface{}, net.Interface{}, err + } + return ifaceFromNetlinkLink(hostVeth), ifaceFromNetlinkLink(contVeth), nil +} + +// SetupVeth sets up a pair of virtual ethernet devices. +// Call SetupVeth from inside the container netns. It will create both veth +// devices and move the host-side veth into the provided hostNS namespace. +// On success, SetupVeth returns (hostVeth, containerVeth, nil) +func SetupVeth(contVethName string, mtu int, contVethMac string, hostNS ns.NetNS) (net.Interface, net.Interface, error) { + return SetupVethWithName(contVethName, "", mtu, contVethMac, hostNS) +} + +// DelLinkByName removes an interface link. +func DelLinkByName(ifName string) error { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return ErrLinkNotFound + } + return fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + return nil +} + +// DelLinkByNameAddr remove an interface and returns its addresses +func DelLinkByNameAddr(ifName string) ([]*net.IPNet, error) { + iface, err := netlink.LinkByName(ifName) + if err != nil { + if _, ok := err.(netlink.LinkNotFoundError); ok { + return nil, ErrLinkNotFound + } + return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err) + } + + addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL) + if err != nil { + return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err) + } + + if err = netlink.LinkDel(iface); err != nil { + return nil, fmt.Errorf("failed to delete %q: %v", ifName, err) + } + + out := []*net.IPNet{} + for _, addr := range addrs { + if addr.IP.IsGlobalUnicast() { + out = append(out, addr.IPNet) + } + } + + return out, nil +} + +// GetVethPeerIfindex returns the veth link object, the peer ifindex of the +// veth, or an error. This peer ifindex will only be valid in the peer's +// network namespace. +func GetVethPeerIfindex(ifName string) (netlink.Link, int, error) { + link, err := netlink.LinkByName(ifName) + if err != nil { + return nil, -1, fmt.Errorf("could not look up %q: %v", ifName, err) + } + if _, ok := link.(*netlink.Veth); !ok { + return nil, -1, fmt.Errorf("interface %q was not a veth interface", ifName) + } + + // veth supports IFLA_LINK (what vishvananda/netlink calls ParentIndex) + // on 4.1 and higher kernels + peerIndex := link.Attrs().ParentIndex + if peerIndex <= 0 { + // Fall back to ethtool for 4.0 and earlier kernels + e, err := ethtool.NewEthtool() + if err != nil { + return nil, -1, fmt.Errorf("failed to initialize ethtool: %v", err) + } + defer e.Close() + + stats, err := e.Stats(link.Attrs().Name) + if err != nil { + return nil, -1, fmt.Errorf("failed to request ethtool stats: %v", err) + } + n, ok := stats["peer_ifindex"] + if !ok { + return nil, -1, fmt.Errorf("failed to find 'peer_ifindex' in ethtool stats") + } + if n > 32767 || n == 0 { + return nil, -1, fmt.Errorf("invalid 'peer_ifindex' %d", n) + } + peerIndex = int(n) + } + + return link, peerIndex, nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go new file mode 100644 index 000000000..e92b6c53e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/route_linux.go @@ -0,0 +1,52 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +// AddRoute adds a universally-scoped route to a device. +func AddRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: ipn, + Gw: gw, + }) +} + +// AddHostRoute adds a host-scoped route to a device. +func AddHostRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { + return netlink.RouteAdd(&netlink.Route{ + LinkIndex: dev.Attrs().Index, + Scope: netlink.SCOPE_HOST, + Dst: ipn, + Gw: gw, + }) +} + +// AddDefaultRoute sets the default route on the given gateway. +func AddDefaultRoute(gw net.IP, dev netlink.Link) error { + var defNet *net.IPNet + if gw.To4() != nil { + _, defNet, _ = net.ParseCIDR("0.0.0.0/0") + } else { + _, defNet, _ = net.ParseCIDR("::/0") + } + return AddRoute(defNet, gw, dev) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go new file mode 100644 index 000000000..943117e18 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ip/utils_linux.go @@ -0,0 +1,116 @@ +//go:build linux +// +build linux + +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ip + +import ( + "fmt" + "net" + + "github.com/containernetworking/cni/pkg/types" + current "github.com/containernetworking/cni/pkg/types/100" + "github.com/vishvananda/netlink" +) + +func ValidateExpectedInterfaceIPs(ifName string, resultIPs []*current.IPConfig) error { + + // Ensure ips + for _, ips := range resultIPs { + ourAddr := netlink.Addr{IPNet: &ips.Address} + match := false + + link, err := netlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("Cannot find container link %v", ifName) + } + + addrList, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("Cannot obtain List of IP Addresses") + } + + for _, addr := range addrList { + if addr.Equal(ourAddr) { + match = true + break + } + } + if match == false { + return fmt.Errorf("Failed to match addr %v on interface %v", ourAddr, ifName) + } + + // Convert the host/prefixlen to just prefix for route lookup. + _, ourPrefix, err := net.ParseCIDR(ourAddr.String()) + + findGwy := &netlink.Route{Dst: ourPrefix} + routeFilter := netlink.RT_FILTER_DST + + family := netlink.FAMILY_V6 + if ips.Address.IP.To4() != nil { + family = netlink.FAMILY_V4 + } + + gwy, err := netlink.RouteListFiltered(family, findGwy, routeFilter) + if err != nil { + return fmt.Errorf("Error %v trying to find Gateway %v for interface %v", err, ips.Gateway, ifName) + } + if gwy == nil { + return fmt.Errorf("Failed to find Gateway %v for interface %v", ips.Gateway, ifName) + } + } + + return nil +} + +func ValidateExpectedRoute(resultRoutes []*types.Route) error { + + // Ensure that each static route in prevResults is found in the routing table + for _, route := range resultRoutes { + find := &netlink.Route{Dst: &route.Dst, Gw: route.GW} + routeFilter := netlink.RT_FILTER_DST | netlink.RT_FILTER_GW + var family int + + switch { + case route.Dst.IP.To4() != nil: + family = netlink.FAMILY_V4 + // Default route needs Dst set to nil + if route.Dst.String() == "0.0.0.0/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + case len(route.Dst.IP) == net.IPv6len: + family = netlink.FAMILY_V6 + // Default route needs Dst set to nil + if route.Dst.String() == "::/0" { + find = &netlink.Route{Dst: nil, Gw: route.GW} + routeFilter = netlink.RT_FILTER_DST + } + default: + return fmt.Errorf("Invalid static route found %v", route) + } + + wasFound, err := netlink.RouteListFiltered(family, find, routeFilter) + if err != nil { + return fmt.Errorf("Expected Route %v not route table lookup error %v", route, err) + } + if wasFound == nil { + return fmt.Errorf("Expected Route %v not found in routing table", route) + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/README.md b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md new file mode 100644 index 000000000..1e265c7a0 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/README.md @@ -0,0 +1,41 @@ +### Namespaces, Threads, and Go +On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code. + +### Namespace Switching +Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads. + +Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in. + +For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly. + +### Do() The Recommended Thing +The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example: + +```go +err = targetNs.Do(func(hostNs ns.NetNS) error { + dummy := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: "dummy0", + }, + } + return netlink.LinkAdd(dummy) +}) +``` + +Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem. + +When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled. + +In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. + + +### Creating network namespaces +Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration. + + +### Further Reading + - https://github.com/golang/go/wiki/LockOSThread + - http://morsmachine.dk/go-scheduler + - https://github.com/containernetworking/cni/issues/262 + - https://golang.org/pkg/runtime/ + - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix diff --git a/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go new file mode 100644 index 000000000..f260f2813 --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go @@ -0,0 +1,234 @@ +// Copyright 2015-2017 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "fmt" + "os" + "runtime" + "sync" + "syscall" + + "golang.org/x/sys/unix" +) + +// Returns an object representing the current OS thread's network namespace +func GetCurrentNS() (NetNS, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return GetNS(getCurrentThreadNetNSPath()) +} + +func getCurrentThreadNetNSPath() string { + // /proc/self/ns/net returns the namespace of the main thread, not + // of whatever thread this goroutine is running on. Make sure we + // use the thread's net namespace since the thread is switching around + return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) +} + +func (ns *netNS) Close() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := ns.file.Close(); err != nil { + return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err) + } + ns.closed = true + + return nil +} + +func (ns *netNS) Set() error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err) + } + + return nil +} + +type NetNS interface { + // Executes the passed closure in this object's network namespace, + // attempting to restore the original namespace before returning. + // However, since each OS thread can have a different network namespace, + // and Go's thread scheduling is highly variable, callers cannot + // guarantee any specific namespace is set unless operations that + // require that namespace are wrapped with Do(). Also, no code called + // from Do() should call runtime.UnlockOSThread(), or the risk + // of executing code in an incorrect namespace will be greater. See + // https://github.com/golang/go/wiki/LockOSThread for further details. + Do(toRun func(NetNS) error) error + + // Sets the current network namespace to this object's network namespace. + // Note that since Go's thread scheduling is highly variable, callers + // cannot guarantee the requested namespace will be the current namespace + // after this function is called; to ensure this wrap operations that + // require the namespace with Do() instead. + Set() error + + // Returns the filesystem path representing this object's network namespace + Path() string + + // Returns a file descriptor representing this object's network namespace + Fd() uintptr + + // Cleans up this instance of the network namespace; if this instance + // is the last user the namespace will be destroyed + Close() error +} + +type netNS struct { + file *os.File + closed bool +} + +// netNS implements the NetNS interface +var _ NetNS = &netNS{} + +const ( + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h + NSFS_MAGIC = unix.NSFS_MAGIC + PROCFS_MAGIC = unix.PROC_SUPER_MAGIC +) + +type NSPathNotExistErr struct{ msg string } + +func (e NSPathNotExistErr) Error() string { return e.msg } + +type NSPathNotNSErr struct{ msg string } + +func (e NSPathNotNSErr) Error() string { return e.msg } + +func IsNSorErr(nspath string) error { + stat := syscall.Statfs_t{} + if err := syscall.Statfs(nspath, &stat); err != nil { + if os.IsNotExist(err) { + err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)} + } else { + err = fmt.Errorf("failed to Statfs %q: %v", nspath, err) + } + return err + } + + switch stat.Type { + case PROCFS_MAGIC, NSFS_MAGIC: + return nil + default: + return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)} + } +} + +// Returns an object representing the namespace referred to by @path +func GetNS(nspath string) (NetNS, error) { + err := IsNSorErr(nspath) + if err != nil { + return nil, err + } + + fd, err := os.Open(nspath) + if err != nil { + return nil, err + } + + return &netNS{file: fd}, nil +} + +func (ns *netNS) Path() string { + return ns.file.Name() +} + +func (ns *netNS) Fd() uintptr { + return ns.file.Fd() +} + +func (ns *netNS) errorIfClosed() error { + if ns.closed { + return fmt.Errorf("%q has already been closed", ns.file.Name()) + } + return nil +} + +func (ns *netNS) Do(toRun func(NetNS) error) error { + if err := ns.errorIfClosed(); err != nil { + return err + } + + containedCall := func(hostNS NetNS) error { + threadNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("failed to open current netns: %v", err) + } + defer threadNS.Close() + + // switch to target namespace + if err = ns.Set(); err != nil { + return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err) + } + defer func() { + err := threadNS.Set() // switch back + if err == nil { + // Unlock the current thread only when we successfully switched back + // to the original namespace; otherwise leave the thread locked which + // will force the runtime to scrap the current thread, that is maybe + // not as optimal but at least always safe to do. + runtime.UnlockOSThread() + } + }() + + return toRun(hostNS) + } + + // save a handle to current network namespace + hostNS, err := GetCurrentNS() + if err != nil { + return fmt.Errorf("Failed to open current namespace: %v", err) + } + defer hostNS.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Start the callback in a new green thread so that if we later fail + // to switch the namespace back to the original one, we can safely + // leave the thread locked to die without a risk of the current thread + // left lingering with incorrect namespace. + var innerError error + go func() { + defer wg.Done() + runtime.LockOSThread() + innerError = containedCall(hostNS) + }() + wg.Wait() + + return innerError +} + +// WithNetNSPath executes the passed closure under the given network +// namespace, restoring the original namespace afterwards. +func WithNetNSPath(nspath string, toRun func(NetNS) error) error { + ns, err := GetNS(nspath) + if err != nil { + return err + } + defer ns.Close() + return ns.Do(toRun) +} diff --git a/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go new file mode 100644 index 000000000..469e9be9e --- /dev/null +++ b/vendor/github.com/containernetworking/plugins/pkg/utils/sysctl/sysctl_linux.go @@ -0,0 +1,78 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sysctl + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// Sysctl provides a method to set/get values from /proc/sys - in linux systems +// new interface to set/get values of variables formerly handled by sysctl syscall +// If optional `params` have only one string value - this function will +// set this value into corresponding sysctl variable +func Sysctl(name string, params ...string) (string, error) { + if len(params) > 1 { + return "", fmt.Errorf("unexcepted additional parameters") + } else if len(params) == 1 { + return setSysctl(name, params[0]) + } + return getSysctl(name) +} + +func getSysctl(name string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + data, err := os.ReadFile(fullName) + if err != nil { + return "", err + } + + return string(data[:len(data)-1]), nil +} + +func setSysctl(name, value string) (string, error) { + fullName := filepath.Join("/proc/sys", toNormalName(name)) + if err := os.WriteFile(fullName, []byte(value), 0644); err != nil { + return "", err + } + + return getSysctl(name) +} + +// Normalize names by using slash as separator +// Sysctl names can use dots or slashes as separator: +// - if dots are used, dots and slashes are interchanged. +// - if slashes are used, slashes and dots are left intact. +// Separator in use is determined by first occurrence. +func toNormalName(name string) string { + interchange := false + for _, c := range name { + if c == '.' { + interchange = true + break + } + if c == '/' { + break + } + } + + if interchange { + r := strings.NewReplacer(".", "/", "/", ".") + return r.Replace(name) + } + return name +} diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE new file mode 100644 index 000000000..23a0ada2f --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go new file mode 100644 index 000000000..85047e59d --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -0,0 +1,680 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + if e.ExitStatus() != 1 { + return false + } + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" + return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + waitSupportSecond bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever +} + +// Stat represents a structured statistic entry. +type Stat struct { + Packets uint64 `json:"pkts"` + Bytes uint64 `json:"bytes"` + Target string `json:"target"` + Protocol string `json:"prot"` + Opt string `json:"opt"` + Input string `json:"in"` + Output string `json:"out"` + Source *net.IPNet `json:"source"` + Destination *net.IPNet `json:"destination"` + Options string `json:"options"` +} + +type option func(*IPTables) + +func IPFamily(proto Protocol) option { + return func(ipt *IPTables) { + ipt.proto = proto + } +} + +func Timeout(timeout int) option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto)) + if err != nil { + return nil, err + } + ipt.path = path + + vstring, err := getIptablesVersionString(path) + if err != nil { + return nil, fmt.Errorf("could not get iptables version: %v", err) + } + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + if err != nil { + return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) + } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode + + checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.waitSupportSecond = waitSupportSecond + ipt.hasRandomFully = randomFullyPresent + + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +// ParseStat parses a single statistic row into a Stat struct. The input should +// be a string slice that is returned from calling the Stat method. +func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { + // For forward-compatibility, expect at least 10 fields in the stat + if len(stat) < 10 { + return parsed, fmt.Errorf("stat contained fewer fields than expected") + } + + // Convert the fields that are not plain strings + parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse packets") + } + parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse bytes") + } + _, parsed.Source, err = net.ParseCIDR(stat[7]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse source") + } + _, parsed.Destination, err = net.ParseCIDR(stat[8]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse destination") + } + + // Put the fields that are strings + parsed.Target = stat[2] + parsed.Protocol = stat[3] + parsed.Opt = stat[4] + parsed.Input = stat[5] + parsed.Output = stat[6] + parsed.Options = stat[9] + + return parsed, nil +} + +// StructuredStats returns statistics as structured data which may be further +// parsed and marshaled. +func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { + rawStats, err := ipt.Stats(table, chain) + if err != nil { + return nil, err + } + + structStats := []Stat{} + for _, rawStat := range rawStats { + stat, err := ipt.ParseStat(rawStat) + if err != nil { + return nil, err + } + structStats = append(structStats, stat) + } + + return structStats, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +const existsErr = 1 + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + +func (ipt *IPTables) ClearAll() error { + return ipt.run("-F") +} + +func (ipt *IPTables) DeleteAll() error { + return ipt.run("-X") +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + if ipt.timeout != 0 && ipt.waitSupportSecond { + args = append(args, strconv.Itoa(ipt.timeout)) + } + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + syscall.Close(fmu.fd) + return err + } + defer ul.Unlock() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol) string { + if proto == ProtocolIPv6 { + return "ip6tables" + } else { + return "iptables" + } +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +//Checks if an iptablse version is after 1.6.0, when --wait support second +func iptablesWaitSupportSecond(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 >= 6 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go new file mode 100644 index 000000000..a88e92b4e --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (_ nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 000000000..1cade6cef --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..42bf32aab --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,16 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..8a290f197 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,382 @@ +package md2man + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + firstDD bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB" + codespanCloseTag = "\\fR" + codeTag = "\n.EX\n" + codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on). + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + dtTag = "\n.TP\n" + dd2Tag = "\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" + tablePreprocessor = `'\" t` +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // We need to walk the tree to check if there are any tables. + // If there are, we need to enable the roff table preprocessor. + ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Table { + out(w, tablePreprocessor+"\n") + return blackfriday.Terminate + } + return blackfriday.GoToNext + }) + + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + walkAction := blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + escapeSpecialChars(w, node.Literal) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) + } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.HTMLSpan: + // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte(" Response + s.IsResponse = true + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[0]) + if err != nil { + return err + } + + // Compute code + s.ResponseCode, err = strconv.Atoi(splits[1]) + if err != nil { + return err + } + + // Compute status line + s.ResponseStatus = splits[2] + + } else { + + // --> Request + + // Validate method + s.Method, err = GetSIPMethod(splits[0]) + if err != nil { + return err + } + + s.RequestURI = splits[1] + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[2]) + if err != nil { + return err + } + } + + return nil +} + +// ParseHeader will parse a SIP Header +// SIP Headers are quite simple, there are colon separated name and value +// Headers can be spread over multiple lines +// +// Examples of header : +// +// CSeq: 1 REGISTER +// Via: SIP/2.0/UDP there.com:5060 +// Authorization:Digest username="UserB", +// realm="MCI WorldCom SIP", +// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="", +// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824" +func (s *SIP) ParseHeader(header []byte) (err error) { + + // Ignore empty headers + if len(header) == 0 { + return + } + + // Check if this is the following of last header + // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of + // multiline headers must begin by SP or TAB + if header[0] == '\t' || header[0] == ' ' { + + header = bytes.TrimSpace(header) + s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header)) + return + } + + // Find the ':' to separate header name and value + index := bytes.Index(header, []byte(":")) + if index >= 0 { + + headerName := strings.ToLower(string(bytes.Trim(header[:index], " "))) + headerValue := string(bytes.Trim(header[index+1:], " ")) + + // Add header to object + s.Headers[headerName] = append(s.Headers[headerName], headerValue) + s.lastHeaderParsed = headerName + + // Compute specific headers + err = s.ParseSpecificHeaders(headerName, headerValue) + if err != nil { + return err + } + } + + return nil +} + +// ParseSpecificHeaders will parse some specific key values from +// specific headers like CSeq or Content-Length integer values +func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) { + + switch headerName { + case "cseq": + + // CSeq header value is formatted like that : + // CSeq: 123 INVITE + // We split the value to parse Cseq integer value, and method + splits := strings.Split(headerValue, " ") + if len(splits) > 1 { + + // Parse Cseq + s.cseq, err = strconv.ParseInt(splits[0], 10, 64) + if err != nil { + return err + } + + // Validate method + if s.IsResponse { + s.Method, err = GetSIPMethod(splits[1]) + if err != nil { + return err + } + } + } + + case "content-length": + + // Parse Content-Length + s.contentLength, err = strconv.ParseInt(headerValue, 10, 64) + if err != nil { + return err + } + } + + return nil +} + +// GetAllHeaders will return the full headers of the +// current SIP packets in a map[string][]string +func (s *SIP) GetAllHeaders() map[string][]string { + return s.Headers +} + +// GetHeader will return all the headers with +// the specified name. +func (s *SIP) GetHeader(headerName string) []string { + headerName = strings.ToLower(headerName) + h := make([]string, 0) + if _, ok := s.Headers[headerName]; ok { + return s.Headers[headerName] + } + compactHeader := compactSipHeadersCorrespondance[headerName] + if _, ok := s.Headers[compactHeader]; ok { + return s.Headers[compactHeader] + } + return h +} + +// GetFirstHeader will return the first header with +// the specified name. If the current SIP packet has multiple +// headers with the same name, it returns the first. +func (s *SIP) GetFirstHeader(headerName string) string { + headers := s.GetHeader(headerName) + if len(headers) > 0 { + return headers[0] + } + return "" +} + +// +// Some handy getters for most used SIP headers +// + +// GetAuthorization will return the Authorization +// header of the current SIP packet +func (s *SIP) GetAuthorization() string { + return s.GetFirstHeader("Authorization") +} + +// GetFrom will return the From +// header of the current SIP packet +func (s *SIP) GetFrom() string { + return s.GetFirstHeader("From") +} + +// GetTo will return the To +// header of the current SIP packet +func (s *SIP) GetTo() string { + return s.GetFirstHeader("To") +} + +// GetContact will return the Contact +// header of the current SIP packet +func (s *SIP) GetContact() string { + return s.GetFirstHeader("Contact") +} + +// GetCallID will return the Call-ID +// header of the current SIP packet +func (s *SIP) GetCallID() string { + return s.GetFirstHeader("Call-ID") +} + +// GetUserAgent will return the User-Agent +// header of the current SIP packet +func (s *SIP) GetUserAgent() string { + return s.GetFirstHeader("User-Agent") +} + +// GetContentLength will return the parsed integer +// Content-Length header of the current SIP packet +func (s *SIP) GetContentLength() int64 { + return s.contentLength +} + +// GetCSeq will return the parsed integer CSeq header +// header of the current SIP packet +func (s *SIP) GetCSeq() int64 { + return s.cseq +} diff --git a/vendor/github.com/gopacket/gopacket/layers/stp.go b/vendor/github.com/gopacket/gopacket/layers/stp.go new file mode 100644 index 000000000..5ced41281 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/stp.go @@ -0,0 +1,150 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/gopacket/gopacket" +) + +type STPSwitchID struct { + Priority uint16 // Bridge priority + SysID uint16 // VLAN ID + HwAddr net.HardwareAddr +} + +// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message. +type STP struct { + BaseLayer + ProtocolID uint16 + Version uint8 + Type uint8 + TC, TCA bool // TC: Topologie change ; TCA: Topologie change ack + RouteID, BridgeID STPSwitchID + Cost uint32 + PortID uint16 + MessageAge uint16 + MaxAge uint16 + HelloTime uint16 + FDelay uint16 +} + +// LayerType returns gopacket.LayerTypeSTP. +func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP } + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (s *STP) CanDecode() gopacket.LayerClass { + return LayerTypeSTP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (stp *STP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + stpLength := 35 + if len(data) < stpLength { + df.SetTruncated() + return fmt.Errorf("STP length %d too short", len(data)) + } + + stp.ProtocolID = binary.BigEndian.Uint16(data[:2]) + stp.Version = uint8(data[2]) + stp.Type = uint8(data[3]) + stp.TC = data[4]&0x01 != 0 + stp.TCA = data[4]&0x80 != 0 + stp.RouteID.Priority = binary.BigEndian.Uint16(data[5:7]) & 0xf000 + stp.RouteID.SysID = binary.BigEndian.Uint16(data[5:7]) & 0x0fff + stp.RouteID.HwAddr = net.HardwareAddr(data[7:13]) + stp.Cost = binary.BigEndian.Uint32(data[13:17]) + stp.BridgeID.Priority = binary.BigEndian.Uint16(data[17:19]) & 0xf000 + stp.BridgeID.SysID = binary.BigEndian.Uint16(data[17:19]) & 0x0fff + stp.BridgeID.HwAddr = net.HardwareAddr(data[19:25]) + stp.PortID = binary.BigEndian.Uint16(data[25:27]) + stp.MessageAge = binary.BigEndian.Uint16(data[27:29]) + stp.MaxAge = binary.BigEndian.Uint16(data[29:31]) + stp.HelloTime = binary.BigEndian.Uint16(data[31:33]) + stp.FDelay = binary.BigEndian.Uint16(data[33:35]) + stp.Contents = data[:stpLength] + stp.Payload = data[stpLength:] + + return nil +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (stp *STP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// Check if the priority value is correct. +func checkPriority(prio uint16) (uint16, error) { + if prio == 0 { + return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096") + } + if prio%4096 == 0 { + return prio, nil + } else { + return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096") + } +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (s *STP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var flags uint8 = 0x00 + bytes, err := b.PrependBytes(35) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, s.ProtocolID) + bytes[2] = s.Version + bytes[3] = s.Type + if s.TC { + flags |= 0x01 + } + if s.TCA { + flags |= 0x80 + } + bytes[4] = flags + + prioRoot, err := checkPriority(s.RouteID.Priority) + if err != nil { + panic(err) + } + if s.RouteID.SysID >= 4096 { + panic("Invalid VlanID value ..!") + } + binary.BigEndian.PutUint16(bytes[5:7], prioRoot|s.RouteID.SysID) + copy(bytes[7:13], s.RouteID.HwAddr) + + binary.BigEndian.PutUint32(bytes[13:17], s.Cost) + + prioBridge, err := checkPriority(s.BridgeID.Priority) + if err != nil { + panic(err) + } + if s.BridgeID.SysID >= 4096 { + panic("Invalid VlanID value ..!") + } + binary.BigEndian.PutUint16(bytes[17:19], prioBridge|s.BridgeID.SysID) + copy(bytes[19:25], s.BridgeID.HwAddr) + + binary.BigEndian.PutUint16(bytes[25:27], s.PortID) + binary.BigEndian.PutUint16(bytes[27:29], s.MessageAge) + binary.BigEndian.PutUint16(bytes[29:31], s.MaxAge) + binary.BigEndian.PutUint16(bytes[31:33], s.HelloTime) + binary.BigEndian.PutUint16(bytes[33:35], s.FDelay) + + return nil +} + +func decodeSTP(data []byte, p gopacket.PacketBuilder) error { + stp := &STP{} + return decodingLayerDecoder(stp, data, p) +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tcp.go b/vendor/github.com/gopacket/gopacket/layers/tcp.go new file mode 100644 index 000000000..b38858950 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tcp.go @@ -0,0 +1,361 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/gopacket/gopacket" +) + +// TCP is the layer for TCP headers. +type TCP struct { + BaseLayer + SrcPort, DstPort TCPPort + Seq uint32 + Ack uint32 + DataOffset uint8 + FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool + Window uint16 + Checksum uint16 + Urgent uint16 + sPort, dPort []byte + Options []TCPOption + Padding []byte + opts [4]TCPOption + tcpipchecksum +} + +// TCPOptionKind represents a TCP option code. +type TCPOptionKind uint8 + +const ( + TCPOptionKindEndList = 0 + TCPOptionKindNop = 1 + TCPOptionKindMSS = 2 // len = 4 + TCPOptionKindWindowScale = 3 // len = 3 + TCPOptionKindSACKPermitted = 4 // len = 2 + TCPOptionKindSACK = 5 // len = n + TCPOptionKindEcho = 6 // len = 6, obsolete + TCPOptionKindEchoReply = 7 // len = 6, obsolete + TCPOptionKindTimestamps = 8 // len = 10 + TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete + TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete + TCPOptionKindCC = 11 // obsolete + TCPOptionKindCCNew = 12 // obsolete + TCPOptionKindCCEcho = 13 // obsolete + TCPOptionKindAltChecksum = 14 // len = 3, obsolete + TCPOptionKindAltChecksumData = 15 // len = n, obsolete +) + +func (k TCPOptionKind) String() string { + switch k { + case TCPOptionKindEndList: + return "EndList" + case TCPOptionKindNop: + return "NOP" + case TCPOptionKindMSS: + return "MSS" + case TCPOptionKindWindowScale: + return "WindowScale" + case TCPOptionKindSACKPermitted: + return "SACKPermitted" + case TCPOptionKindSACK: + return "SACK" + case TCPOptionKindEcho: + return "Echo" + case TCPOptionKindEchoReply: + return "EchoReply" + case TCPOptionKindTimestamps: + return "Timestamps" + case TCPOptionKindPartialOrderConnectionPermitted: + return "PartialOrderConnectionPermitted" + case TCPOptionKindPartialOrderServiceProfile: + return "PartialOrderServiceProfile" + case TCPOptionKindCC: + return "CC" + case TCPOptionKindCCNew: + return "CCNew" + case TCPOptionKindCCEcho: + return "CCEcho" + case TCPOptionKindAltChecksum: + return "AltChecksum" + case TCPOptionKindAltChecksumData: + return "AltChecksumData" + default: + return fmt.Sprintf("Unknown(%d)", k) + } +} + +type TCPOption struct { + OptionType TCPOptionKind + OptionLength uint8 + OptionData []byte +} + +func (t TCPOption) String() string { + hd := hex.EncodeToString(t.OptionData) + if len(hd) > 0 { + hd = " 0x" + hd + } + switch t.OptionType { + case TCPOptionKindMSS: + if len(t.OptionData) >= 2 { + return fmt.Sprintf("TCPOption(%s:%v%s)", + t.OptionType, + binary.BigEndian.Uint16(t.OptionData), + hd) + } + + case TCPOptionKindTimestamps: + if len(t.OptionData) == 8 { + return fmt.Sprintf("TCPOption(%s:%v/%v%s)", + t.OptionType, + binary.BigEndian.Uint32(t.OptionData[:4]), + binary.BigEndian.Uint32(t.OptionData[4:8]), + hd) + } + } + return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd) +} + +// LayerType returns gopacket.LayerTypeTCP +func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP } + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var optionLength int + for _, o := range t.Options { + switch o.OptionType { + case 0, 1: + optionLength += 1 + default: + optionLength += 2 + len(o.OptionData) + } + } + if opts.FixLengths { + if rem := optionLength % 4; rem != 0 { + t.Padding = lotsOfZeros[:4-rem] + } + t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4) + } + bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding)) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort)) + binary.BigEndian.PutUint32(bytes[4:], t.Seq) + binary.BigEndian.PutUint32(bytes[8:], t.Ack) + binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset()) + binary.BigEndian.PutUint16(bytes[14:], t.Window) + binary.BigEndian.PutUint16(bytes[18:], t.Urgent) + start := 20 + for _, o := range t.Options { + bytes[start] = byte(o.OptionType) + switch o.OptionType { + case 0, 1: + start++ + default: + if opts.FixLengths { + o.OptionLength = uint8(len(o.OptionData) + 2) + } + bytes[start+1] = o.OptionLength + copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData) + start += len(o.OptionData) + 2 + } + } + copy(bytes[start:], t.Padding) + if opts.ComputeChecksums { + // zero out checksum bytes in current serialization. + bytes[16] = 0 + bytes[17] = 0 + csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP) + if err != nil { + return err + } + t.Checksum = gopacket.FoldChecksum(csum) + } + binary.BigEndian.PutUint16(bytes[16:], t.Checksum) + return nil +} + +func (t *TCP) ComputeChecksum() (uint16, error) { + csum, err := t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP) + if err != nil { + return 0, err + } + return gopacket.FoldChecksum(csum), nil +} + +func (t *TCP) flagsAndOffset() uint16 { + f := uint16(t.DataOffset) << 12 + if t.FIN { + f |= 0x0001 + } + if t.SYN { + f |= 0x0002 + } + if t.RST { + f |= 0x0004 + } + if t.PSH { + f |= 0x0008 + } + if t.ACK { + f |= 0x0010 + } + if t.URG { + f |= 0x0020 + } + if t.ECE { + f |= 0x0040 + } + if t.CWR { + f |= 0x0080 + } + if t.NS { + f |= 0x0100 + } + return f +} + +func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data)) + } + tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2])) + tcp.sPort = data[0:2] + tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4])) + tcp.dPort = data[2:4] + tcp.Seq = binary.BigEndian.Uint32(data[4:8]) + tcp.Ack = binary.BigEndian.Uint32(data[8:12]) + tcp.DataOffset = data[12] >> 4 + tcp.FIN = data[13]&0x01 != 0 + tcp.SYN = data[13]&0x02 != 0 + tcp.RST = data[13]&0x04 != 0 + tcp.PSH = data[13]&0x08 != 0 + tcp.ACK = data[13]&0x10 != 0 + tcp.URG = data[13]&0x20 != 0 + tcp.ECE = data[13]&0x40 != 0 + tcp.CWR = data[13]&0x80 != 0 + tcp.NS = data[12]&0x01 != 0 + tcp.Window = binary.BigEndian.Uint16(data[14:16]) + tcp.Checksum = binary.BigEndian.Uint16(data[16:18]) + tcp.Urgent = binary.BigEndian.Uint16(data[18:20]) + if tcp.Options == nil { + // Pre-allocate to avoid allocating a slice. + tcp.Options = tcp.opts[:0] + } else { + tcp.Options = tcp.Options[:0] + } + tcp.Padding = tcp.Padding[:0] + if tcp.DataOffset < 5 { + return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset) + } + dataStart := int(tcp.DataOffset) * 4 + if dataStart > len(data) { + df.SetTruncated() + tcp.Payload = nil + tcp.Contents = data + return errors.New("TCP data offset greater than packet length") + } + tcp.Contents = data[:dataStart] + tcp.Payload = data[dataStart:] + // From here on, data points just to the header options. + data = data[20:dataStart] +OPTIONS: + for len(data) > 0 { + tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])}) + opt := &tcp.Options[len(tcp.Options)-1] + switch opt.OptionType { + case TCPOptionKindEndList: // End of options + opt.OptionLength = 1 + tcp.Padding = data[1:] + break OPTIONS + case TCPOptionKindNop: // 1 byte padding + opt.OptionLength = 1 + default: + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data)) + } + opt.OptionLength = data[1] + if opt.OptionLength < 2 { + return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength) + } else if int(opt.OptionLength) > len(data) { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data)) + } + opt.OptionData = data[2:opt.OptionLength] + } + data = data[opt.OptionLength:] + } + return nil +} + +func (t *TCP) CanDecode() gopacket.LayerClass { + return LayerTypeTCP +} + +func (t *TCP) NextLayerType() gopacket.LayerType { + lt := t.DstPort.LayerType() + if lt == gopacket.LayerTypePayload { + lt = t.SrcPort.LayerType() + } + return lt +} + +func decodeTCP(data []byte, p gopacket.PacketBuilder) error { + tcp := &TCP{} + err := tcp.DecodeFromBytes(data, p) + p.AddLayer(tcp) + p.SetTransportLayer(tcp) + if err != nil { + return err + } + if p.DecodeOptions().DecodeStreamsAsDatagrams { + return p.NextDecoder(tcp.NextLayerType()) + } else { + return p.NextDecoder(gopacket.LayerTypePayload) + } +} + +func (t *TCP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort) +} + +// For testing only +func (t *TCP) SetInternalPortsForTesting() { + t.sPort = make([]byte, 2) + t.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort)) +} + +func (t *TCP) VerifyChecksum() (error, gopacket.ChecksumVerificationResult) { + bytes := append(t.Contents, t.Payload...) + + existing := t.Checksum + verification, err := t.computeChecksum(bytes, IPProtocolTCP) + if err != nil { + return err, gopacket.ChecksumVerificationResult{} + } + correct := gopacket.FoldChecksum(verification - uint32(existing)) + return nil, gopacket.ChecksumVerificationResult{ + Valid: correct == existing, + Correct: uint32(correct), + Actual: uint32(existing), + } +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tcpip.go b/vendor/github.com/gopacket/gopacket/layers/tcpip.go new file mode 100644 index 000000000..90584b32a --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tcpip.go @@ -0,0 +1,85 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/gopacket/gopacket" +) + +// Checksum computation for TCP/UDP. +type tcpipchecksum struct { + pseudoheader tcpipPseudoHeader +} + +type tcpipPseudoHeader interface { + pseudoheaderChecksum() (uint32, error) +} + +func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo4(); err != nil { + return 0, err + } + csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8 + csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3]) + csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8 + csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3]) + return csum, nil +} + +func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo16(); err != nil { + return 0, err + } + for i := 0; i < 16; i += 2 { + csum += uint32(ip.SrcIP[i]) << 8 + csum += uint32(ip.SrcIP[i+1]) + csum += uint32(ip.DstIP[i]) << 8 + csum += uint32(ip.DstIP[i+1]) + } + return csum, nil +} + +// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the +// serialized TCP or UDP header plus its payload, with the checksum zero'd +// out. headerProtocol is the IP protocol number of the upper-layer header. +// The returned 32bit checksum may need to be folded. +func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint32, error) { + if c.pseudoheader == nil { + return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use") + } + length := uint32(len(headerAndPayload)) + csum, err := c.pseudoheader.pseudoheaderChecksum() + if err != nil { + return 0, err + } + csum += uint32(headerProtocol) + csum += length & 0xffff + csum += length >> 16 + + csum = gopacket.ComputeChecksum(headerAndPayload, csum) + return csum, nil +} + +// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it. +// This is needed for computing the checksum when serializing, since TCP/IP transport +// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it. +// The passed in layer must be an *IPv4 or *IPv6. +func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error { + switch v := l.(type) { + case *IPv4: + i.pseudoheader = v + case *IPv6: + i.pseudoheader = v + default: + return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType()) + } + return nil +} diff --git a/vendor/github.com/gopacket/gopacket/layers/test_creator.py b/vendor/github.com/gopacket/gopacket/layers/test_creator.py new file mode 100644 index 000000000..c92d2765a --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/test_creator.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# Copyright 2012 Google, Inc. All rights reserved. + +"""TestCreator creates test templates from pcap files.""" + +import argparse +import base64 +import glob +import re +import string +import subprocess +import sys + + +class Packet(object): + """Helper class encapsulating packet from a pcap file.""" + + def __init__(self, packet_lines): + self.packet_lines = packet_lines + self.data = self._DecodeText(packet_lines) + + @classmethod + def _DecodeText(cls, packet_lines): + packet_bytes = [] + # First line is timestamp and stuff, skip it. + # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?......... + + for line in packet_lines[1:]: + m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE) + if m is None: continue + for hexpart in m.group(1).split(): + packet_bytes.append(base64.b16decode(hexpart.upper())) + return ''.join(packet_bytes) + + def Test(self, name, link_type): + """Yields a test using this packet, as a set of lines.""" + yield '// testPacket%s is the packet:' % name + for line in self.packet_lines: + yield '// ' + line + yield 'var testPacket%s = []byte{' % name + data = list(self.data) + while data: + linebytes, data = data[:16], data[16:] + yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes]) + yield '}' + yield 'func TestPacket%s(t *testing.T) {' % name + yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type) + yield '\tif p.ErrorLayer() != nil {' + yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())' + yield '\t}' + yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type + yield '}' + yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name + yield '\tfor i := 0; i < b.N; i++ {' + yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type) + yield '\t}' + yield '}' + + + +def GetTcpdumpOutput(filename): + """Runs tcpdump on the given file, returning output as string.""" + return subprocess.check_output( + ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename]) + + +def TcpdumpOutputToPackets(output): + """Reads a pcap file with TCPDump, yielding Packet objects.""" + pdata = [] + for line in output.splitlines(): + if line[0] not in string.whitespace and pdata: + yield Packet(pdata) + pdata = [] + pdata.append(line) + if pdata: + yield Packet(pdata) + + +def main(): + class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter): + def _format_usage(self, usage, actions, groups, prefix=None): + header =('TestCreator creates gopacket tests using a pcap file.\n\n' + 'Tests are written to standard out... they can then be \n' + 'copied into the file of your choice and modified as \n' + 'you see.\n\n') + return header + argparse.ArgumentDefaultsHelpFormatter._format_usage( + self, usage, actions, groups, prefix) + + parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter) + parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)') + parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it') + parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process') + + args = parser.parse_args() + + for arg in args.files: + for path in glob.glob(arg): + for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))): + print '\n'.join(packet.Test( + args.name % i, args.link_type)) + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/gopacket/gopacket/layers/tls.go b/vendor/github.com/gopacket/gopacket/layers/tls.go new file mode 100644 index 000000000..b5c041f6a --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tls.go @@ -0,0 +1,283 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/gopacket/gopacket" +) + +// TLSType defines the type of data after the TLS Record +type TLSType uint8 + +// TLSType known values. +const ( + TLSChangeCipherSpec TLSType = 20 + TLSAlert TLSType = 21 + TLSHandshake TLSType = 22 + TLSApplicationData TLSType = 23 + TLSUnknown TLSType = 255 +) + +// String shows the register type nicely formatted +func (tt TLSType) String() string { + switch tt { + default: + return "Unknown" + case TLSChangeCipherSpec: + return "Change Cipher Spec" + case TLSAlert: + return "Alert" + case TLSHandshake: + return "Handshake" + case TLSApplicationData: + return "Application Data" + } +} + +// TLSVersion represents the TLS version in numeric format +type TLSVersion uint16 + +// Strings shows the TLS version nicely formatted +func (tv TLSVersion) String() string { + switch tv { + default: + return "Unknown" + case 0x0200: + return "SSL 2.0" + case 0x0300: + return "SSL 3.0" + case 0x0301: + return "TLS 1.0" + case 0x0302: + return "TLS 1.1" + case 0x0303: + return "TLS 1.2" + case 0x0304: + return "TLS 1.3" + } +} + +// TLS is specified in RFC 5246 +// +// TLS Record Protocol +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Content Type | +// +--+--+--+--+--+--+--+--+ +// | Version (major) | +// +--+--+--+--+--+--+--+--+ +// | Version (minor) | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ + +// TLS is actually a slide of TLSrecord structures +type TLS struct { + BaseLayer + + // TLS Records + ChangeCipherSpec []TLSChangeCipherSpecRecord + Handshake []TLSHandshakeRecord + AppData []TLSAppDataRecord + Alert []TLSAlertRecord +} + +// TLSRecordHeader contains all the information that each TLS Record types should have +type TLSRecordHeader struct { + ContentType TLSType + Version TLSVersion + Length uint16 +} + +// LayerType returns gopacket.LayerTypeTLS. +func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS } + +// decodeTLS decodes the byte slice into a TLS type. It also +// setups the application Layer in PacketBuilder. +func decodeTLS(data []byte, p gopacket.PacketBuilder) error { + t := &TLS{} + err := t.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(t) + p.SetApplicationLayer(t) + return nil +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + t.BaseLayer.Contents = data + t.BaseLayer.Payload = nil + + t.ChangeCipherSpec = t.ChangeCipherSpec[:0] + t.Handshake = t.Handshake[:0] + t.AppData = t.AppData[:0] + t.Alert = t.Alert[:0] + + return t.decodeTLSRecords(data, df) +} + +func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 5 { + df.SetTruncated() + return errors.New("TLS record too short") + } + + // since there are no further layers, the baselayer's content is + // pointing to this layer + // TODO: Consider removing this + t.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + var h TLSRecordHeader + h.ContentType = TLSType(data[0]) + h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3])) + h.Length = binary.BigEndian.Uint16(data[3:5]) + + if h.ContentType.String() == "Unknown" { + return errors.New("Unknown TLS record type") + } + + hl := 5 // header length + tl := hl + int(h.Length) + if len(data) < tl { + df.SetTruncated() + return errors.New("TLS packet length mismatch") + } + + switch h.ContentType { + default: + return errors.New("Unknown TLS record type") + case TLSChangeCipherSpec: + var r TLSChangeCipherSpecRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.ChangeCipherSpec = append(t.ChangeCipherSpec, r) + case TLSAlert: + var r TLSAlertRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Alert = append(t.Alert, r) + case TLSHandshake: + var r TLSHandshakeRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Handshake = append(t.Handshake, r) + case TLSApplicationData: + var r TLSAppDataRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.AppData = append(t.AppData, r) + } + + if len(data) == tl { + return nil + } + return t.decodeTLSRecords(data[tl:len(data)], df) +} + +// CanDecode implements gopacket.DecodingLayer. +func (t *TLS) CanDecode() gopacket.LayerClass { + return LayerTypeTLS +} + +// NextLayerType implements gopacket.DecodingLayer. +func (t *TLS) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord +func (t *TLS) Payload() []byte { + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (t *TLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + totalLength := 0 + for _, record := range t.ChangeCipherSpec { + if opts.FixLengths { + record.Length = 1 + } + totalLength += 5 + 1 // length of header + record + } + for range t.Handshake { + totalLength += 5 + // TODO + } + for _, record := range t.AppData { + if opts.FixLengths { + record.Length = uint16(len(record.Payload)) + } + totalLength += 5 + len(record.Payload) + } + for _, record := range t.Alert { + if len(record.EncryptedMsg) == 0 { + if opts.FixLengths { + record.Length = 2 + } + totalLength += 5 + 2 + } else { + if opts.FixLengths { + record.Length = uint16(len(record.EncryptedMsg)) + } + totalLength += 5 + len(record.EncryptedMsg) + } + } + data, err := b.PrependBytes(totalLength) + if err != nil { + return err + } + off := 0 + for _, record := range t.ChangeCipherSpec { + off = encodeHeader(record.TLSRecordHeader, data, off) + data[off] = byte(record.Message) + off++ + } + for _, record := range t.Handshake { + off = encodeHeader(record.TLSRecordHeader, data, off) + // TODO + } + for _, record := range t.AppData { + off = encodeHeader(record.TLSRecordHeader, data, off) + copy(data[off:], record.Payload) + off += len(record.Payload) + } + for _, record := range t.Alert { + off = encodeHeader(record.TLSRecordHeader, data, off) + if len(record.EncryptedMsg) == 0 { + data[off] = byte(record.Level) + data[off+1] = byte(record.Description) + off += 2 + } else { + copy(data[off:], record.EncryptedMsg) + off += len(record.EncryptedMsg) + } + } + return nil +} + +func encodeHeader(header TLSRecordHeader, data []byte, offset int) int { + data[offset] = byte(header.ContentType) + binary.BigEndian.PutUint16(data[offset+1:], uint16(header.Version)) + binary.BigEndian.PutUint16(data[offset+3:], header.Length) + + return offset + 5 +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_alert.go b/vendor/github.com/gopacket/gopacket/layers/tls_alert.go new file mode 100644 index 000000000..15cc2e16d --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tls_alert.go @@ -0,0 +1,165 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/gopacket/gopacket" +) + +// TLSAlertLevel defines the alert level data type +type TLSAlertLevel uint8 + +// TLSAlertDescr defines the alert descrption data type +type TLSAlertDescr uint8 + +const ( + TLSAlertWarning TLSAlertLevel = 1 + TLSAlertFatal TLSAlertLevel = 2 + TLSAlertUnknownLevel TLSAlertLevel = 255 + + TLSAlertCloseNotify TLSAlertDescr = 0 + TLSAlertUnexpectedMessage TLSAlertDescr = 10 + TLSAlertBadRecordMac TLSAlertDescr = 20 + TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21 + TLSAlertRecordOverflow TLSAlertDescr = 22 + TLSAlertDecompressionFailure TLSAlertDescr = 30 + TLSAlertHandshakeFailure TLSAlertDescr = 40 + TLSAlertNoCertificateRESERVED TLSAlertDescr = 41 + TLSAlertBadCertificate TLSAlertDescr = 42 + TLSAlertUnsupportedCertificate TLSAlertDescr = 43 + TLSAlertCertificateRevoked TLSAlertDescr = 44 + TLSAlertCertificateExpired TLSAlertDescr = 45 + TLSAlertCertificateUnknown TLSAlertDescr = 46 + TLSAlertIllegalParameter TLSAlertDescr = 47 + TLSAlertUnknownCa TLSAlertDescr = 48 + TLSAlertAccessDenied TLSAlertDescr = 49 + TLSAlertDecodeError TLSAlertDescr = 50 + TLSAlertDecryptError TLSAlertDescr = 51 + TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60 + TLSAlertProtocolVersion TLSAlertDescr = 70 + TLSAlertInsufficientSecurity TLSAlertDescr = 71 + TLSAlertInternalError TLSAlertDescr = 80 + TLSAlertUserCanceled TLSAlertDescr = 90 + TLSAlertNoRenegotiation TLSAlertDescr = 100 + TLSAlertUnsupportedExtension TLSAlertDescr = 110 + TLSAlertUnknownDescription TLSAlertDescr = 255 +) + +// TLS Alert +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Level | +// +--+--+--+--+--+--+--+--+ +// | Description | +// +--+--+--+--+--+--+--+--+ + +// TLSAlertRecord contains all the information that each Alert Record type should have +type TLSAlertRecord struct { + TLSRecordHeader + + Level TLSAlertLevel + Description TLSAlertDescr + + EncryptedMsg []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) < 2 { + df.SetTruncated() + return errors.New("TLS Alert packet too short") + } + + if t.Length == 2 { + t.Level = TLSAlertLevel(data[0]) + t.Description = TLSAlertDescr(data[1]) + } else { + t.Level = TLSAlertUnknownLevel + t.Description = TLSAlertUnknownDescription + t.EncryptedMsg = data + } + + return nil +} + +// Strings shows the TLS alert level nicely formatted +func (al TLSAlertLevel) String() string { + switch al { + default: + return fmt.Sprintf("Unknown(%d)", al) + case TLSAlertWarning: + return "Warning" + case TLSAlertFatal: + return "Fatal" + } +} + +// Strings shows the TLS alert description nicely formatted +func (ad TLSAlertDescr) String() string { + switch ad { + default: + return "Unknown" + case TLSAlertCloseNotify: + return "close_notify" + case TLSAlertUnexpectedMessage: + return "unexpected_message" + case TLSAlertBadRecordMac: + return "bad_record_mac" + case TLSAlertDecryptionFailedRESERVED: + return "decryption_failed_RESERVED" + case TLSAlertRecordOverflow: + return "record_overflow" + case TLSAlertDecompressionFailure: + return "decompression_failure" + case TLSAlertHandshakeFailure: + return "handshake_failure" + case TLSAlertNoCertificateRESERVED: + return "no_certificate_RESERVED" + case TLSAlertBadCertificate: + return "bad_certificate" + case TLSAlertUnsupportedCertificate: + return "unsupported_certificate" + case TLSAlertCertificateRevoked: + return "certificate_revoked" + case TLSAlertCertificateExpired: + return "certificate_expired" + case TLSAlertCertificateUnknown: + return "certificate_unknown" + case TLSAlertIllegalParameter: + return "illegal_parameter" + case TLSAlertUnknownCa: + return "unknown_ca" + case TLSAlertAccessDenied: + return "access_denied" + case TLSAlertDecodeError: + return "decode_error" + case TLSAlertDecryptError: + return "decrypt_error" + case TLSAlertExportRestrictionRESERVED: + return "export_restriction_RESERVED" + case TLSAlertProtocolVersion: + return "protocol_version" + case TLSAlertInsufficientSecurity: + return "insufficient_security" + case TLSAlertInternalError: + return "internal_error" + case TLSAlertUserCanceled: + return "user_canceled" + case TLSAlertNoRenegotiation: + return "no_renegotiation" + case TLSAlertUnsupportedExtension: + return "unsupported_extension" + } +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go b/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go new file mode 100644 index 000000000..cc6b095b5 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tls_appdata.go @@ -0,0 +1,34 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/gopacket/gopacket" +) + +// TLSAppDataRecord contains all the information that each AppData Record types should have +type TLSAppDataRecord struct { + TLSRecordHeader + Payload []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != int(t.Length) { + return errors.New("TLS Application Data length mismatch") + } + + t.Payload = data + return nil +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go b/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go new file mode 100644 index 000000000..5f9312ab4 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go @@ -0,0 +1,64 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/gopacket/gopacket" +) + +// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record +type TLSchangeCipherSpec uint8 + +const ( + TLSChangecipherspecMessage TLSchangeCipherSpec = 1 + TLSChangecipherspecUnknown TLSchangeCipherSpec = 255 +) + +// TLS Change Cipher Spec +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Message | +// +--+--+--+--+--+--+--+--+ + +// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record +type TLSChangeCipherSpecRecord struct { + TLSRecordHeader + + Message TLSchangeCipherSpec +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != 1 { + df.SetTruncated() + return errors.New("TLS Change Cipher Spec record incorrect length") + } + + t.Message = TLSchangeCipherSpec(data[0]) + if t.Message != TLSChangecipherspecMessage { + t.Message = TLSChangecipherspecUnknown + } + + return nil +} + +// String shows the message value nicely formatted +func (ccs TLSchangeCipherSpec) String() string { + switch ccs { + default: + return "Unknown" + case TLSChangecipherspecMessage: + return "Change Cipher Spec Message" + } +} diff --git a/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go b/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go new file mode 100644 index 000000000..dba0c4c75 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/tls_handshake.go @@ -0,0 +1,148 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/gopacket/gopacket" +) + +/*refer to https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.4*/ +const ( + TLSHandshakeHelloRequest = 0 + TLSHandshakeClientHello = 1 + TLSHandshakeServerHello = 2 + TLSHandsharkHelloVerirfyRequest = 3 + TLSHandshakeCertificate = 11 + TLSHandshakeServerKeyExchange = 12 + TLSHandshakeCertificateRequest = 13 + TLSHandshakeServerHelloDone = 14 + TLSHandshakeCertificateVerify = 15 + TLSHandshakeClientKeyExchange = 16 + TLSHandshakeFinished = 20 +) + +var handShakeTypeMap = map[uint8]string{ + TLSHandshakeHelloRequest: "Hello Request", + TLSHandshakeClientHello: "Client Hello", + TLSHandshakeServerHello: "Server Hello", + TLSHandsharkHelloVerirfyRequest: "Hello Verify Request", + TLSHandshakeCertificate: "Certificate", + TLSHandshakeServerKeyExchange: "Server Key Exchange", + TLSHandshakeCertificateRequest: "Certificate Request", + TLSHandshakeServerHelloDone: "Server Hello Done", + TLSHandshakeCertificateVerify: "Certificate Verify", + TLSHandshakeClientKeyExchange: "Client Key Exchange", + TLSHandshakeFinished: "Finished", +} + +type TLSHandshakeRecordClientHello struct { + HandshakeType uint8 + Length uint32 + ProtocolVersion TLSVersion + Random []uint8 + SessionIDLength uint8 + SessionID []uint8 + CipherSuitsLength uint16 + CipherSuits []uint8 + CompressionMethodsLength uint8 + CompressionMethods []uint8 + ExtensionsLength uint16 + Extensions []uint8 +} + +type TLSHandshakeRecordClientKeyChange struct { +} + +// TLSHandshakeRecord defines the structure of a Handshare Record +type TLSHandshakeRecord struct { + TLSRecordHeader + ClientHello TLSHandshakeRecordClientHello + ClientKeyChange TLSHandshakeRecordClientKeyChange +} + +func (t *TLSHandshakeRecordClientHello) decodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + t.HandshakeType = data[0] + d := make([]byte, 4) + for k, v := range data[1:4] { + d[k+1] = v + } + t.Length = binary.BigEndian.Uint32(d) + t.ProtocolVersion = TLSVersion(binary.BigEndian.Uint16(data[4:6])) + t.Random = data[6:38] + t.SessionIDLength = data[38] + t.SessionID = data[39 : 39+t.SessionIDLength] + t.CipherSuitsLength = binary.BigEndian.Uint16(data[39+t.SessionIDLength : 39+t.SessionIDLength+2]) + t.CipherSuits = data[39+t.SessionIDLength+2 : (39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength)] + t.CompressionMethodsLength = data[(39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength)] + t.CompressionMethods = data[(39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1 : (39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)] + t.ExtensionsLength = binary.BigEndian.Uint16(data[(39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength) : (39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)+2]) + t.Extensions = data[((39 + uint16(t.SessionIDLength) + 2 + t.CipherSuitsLength) + 1 + uint16(t.CompressionMethodsLength) + 2) : ((39+uint16(t.SessionIDLength)+2+t.CipherSuitsLength)+1+uint16(t.CompressionMethodsLength)+2)+t.ExtensionsLength] + return nil +} +func (t *TLSHandshakeRecordClientKeyChange) decodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + /*TBD*/ + return nil +} + +/** + * Checks whether a handshake message seems encrypted and cannot be dissected. + */ +func (t TLSHandshakeRecord) isEncryptedHandshakeMessage(h TLSRecordHeader, data []byte) bool { + if h.Length < 16 { + /* + * Encrypted data has additional overhead. For TLS 1.0/1.1 with stream + * and block ciphers, there is at least a MAC which is at minimum 16 + * bytes for MD5. In TLS 1.2, AEAD adds an explicit nonce and auth tag. + * For AES-GCM/CCM the auth tag is 16 bytes. AES_CCM_8 (RFC 6655) uses 8 + * byte auth tags, but the explicit nonce is also 8 (sums up to 16). + * + * So anything smaller than 16 bytes is assumed to be plaintext. + */ + return false + } + maybeType := data[0] + d := make([]byte, 4) + for k, v := range data[1:4] { + d[k+1] = v + } + if uint32(h.Length)-binary.BigEndian.Uint32(d) != 4 { + return true + } + if _, ok := handShakeTypeMap[maybeType]; !ok { + return true + } + return false +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if t.isEncryptedHandshakeMessage(h, data) { + fmt.Printf("encrypted message\n") + return nil + } + handshakeType := data[0] + switch handshakeType { + case TLSHandshakeClientHello: + t.ClientHello.decodeFromBytes(data, df) + case TLSHandshakeClientKeyExchange: + t.ClientKeyChange.decodeFromBytes(data, df) + default: + return errors.New("Unknown TLS handshake type") + // TODO + } + + return nil +} diff --git a/vendor/github.com/gopacket/gopacket/layers/udp.go b/vendor/github.com/gopacket/gopacket/layers/udp.go new file mode 100644 index 000000000..ed489130e --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/udp.go @@ -0,0 +1,158 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/gopacket/gopacket" +) + +// UDP is the layer for UDP headers. +type UDP struct { + BaseLayer + SrcPort, DstPort UDPPort + Length uint16 + Checksum uint16 + sPort, dPort []byte + tcpipchecksum +} + +// LayerType returns gopacket.LayerTypeUDP +func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP } + +func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data)) + } + udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2])) + udp.sPort = data[0:2] + udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4])) + udp.dPort = data[2:4] + udp.Length = binary.BigEndian.Uint16(data[4:6]) + udp.Checksum = binary.BigEndian.Uint16(data[6:8]) + udp.BaseLayer = BaseLayer{Contents: data[:8]} + switch { + case udp.Length >= 8: + hlen := int(udp.Length) + if hlen > len(data) { + df.SetTruncated() + hlen = len(data) + } + udp.Payload = data[8:hlen] + case udp.Length == 0: // Jumbogram, use entire rest of data + udp.Payload = data[8:] + default: + return fmt.Errorf("UDP packet too small: %d bytes", udp.Length) + } + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var jumbo bool + + payload := b.Bytes() + if _, ok := u.pseudoheader.(*IPv6); ok { + if len(payload)+8 > 65535 { + jumbo = true + } + } + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort)) + if opts.FixLengths { + if jumbo { + u.Length = 0 + } else { + u.Length = uint16(len(payload)) + 8 + } + } + binary.BigEndian.PutUint16(bytes[4:], u.Length) + if opts.ComputeChecksums { + // zero out checksum bytes + bytes[6] = 0 + bytes[7] = 0 + + csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP) + if err != nil { + return err + } + csumFolded := gopacket.FoldChecksum(csum) + + // RFC768: If the computed checksum is zero, it is transmitted as all ones (the + // equivalent in one's complement arithmetic). An all zero transmitted + // checksum value means that the transmitter generated no checksum. + if csumFolded == 0 { + csumFolded = 0xffff + } + u.Checksum = csumFolded + } + binary.BigEndian.PutUint16(bytes[6:], u.Checksum) + return nil +} + +func (u *UDP) CanDecode() gopacket.LayerClass { + return LayerTypeUDP +} + +// NextLayerType use the destination port to select the +// right next decoder. It tries first to decode via the +// destination port, then the source port. +func (u *UDP) NextLayerType() gopacket.LayerType { + if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload { + return lt + } + return u.SrcPort.LayerType() +} + +func decodeUDP(data []byte, p gopacket.PacketBuilder) error { + udp := &UDP{} + err := udp.DecodeFromBytes(data, p) + p.AddLayer(udp) + p.SetTransportLayer(udp) + if err != nil { + return err + } + return p.NextDecoder(udp.NextLayerType()) +} + +func (u *UDP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort) +} + +// For testing only +func (u *UDP) SetInternalPortsForTesting() { + u.sPort = make([]byte, 2) + u.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort)) +} + +func (u *UDP) VerifyChecksum() (error, gopacket.ChecksumVerificationResult) { + bytes := append(u.Contents, u.Payload...) + + existing := u.Checksum + verification, err := u.computeChecksum(bytes, IPProtocolUDP) + if err != nil { + return err, gopacket.ChecksumVerificationResult{} + } + correct := gopacket.FoldChecksum(verification - uint32(existing)) + return nil, gopacket.ChecksumVerificationResult{ + Valid: existing == 0 || correct == existing, + Correct: uint32(correct), + Actual: uint32(existing), + } +} diff --git a/vendor/github.com/gopacket/gopacket/layers/udplite.go b/vendor/github.com/gopacket/gopacket/layers/udplite.go new file mode 100644 index 000000000..6566e17b8 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/udplite.go @@ -0,0 +1,45 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + + "github.com/gopacket/gopacket" +) + +// UDPLite is the layer for UDP-Lite headers (rfc 3828). +type UDPLite struct { + BaseLayer + SrcPort, DstPort UDPLitePort + ChecksumCoverage uint16 + Checksum uint16 + sPort, dPort []byte +} + +// LayerType returns gopacket.LayerTypeUDPLite +func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite } + +func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error { + udp := &UDPLite{ + SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])), + sPort: data[0:2], + DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])), + dPort: data[2:4], + ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]), + Checksum: binary.BigEndian.Uint16(data[6:8]), + BaseLayer: BaseLayer{data[:8], data[8:]}, + } + p.AddLayer(udp) + p.SetTransportLayer(udp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +func (u *UDPLite) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort) +} diff --git a/vendor/github.com/gopacket/gopacket/layers/usb.go b/vendor/github.com/gopacket/gopacket/layers/usb.go new file mode 100644 index 000000000..b04791d5b --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/usb.go @@ -0,0 +1,293 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/gopacket/gopacket" +) + +type USBEventType uint8 + +const ( + USBEventTypeSubmit USBEventType = 'S' + USBEventTypeComplete USBEventType = 'C' + USBEventTypeError USBEventType = 'E' +) + +func (a USBEventType) String() string { + switch a { + case USBEventTypeSubmit: + return "SUBMIT" + case USBEventTypeComplete: + return "COMPLETE" + case USBEventTypeError: + return "ERROR" + default: + return "Unknown event type" + } +} + +type USBRequestBlockSetupRequest uint8 + +const ( + USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00 + USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01 + USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03 + USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05 + USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06 + USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07 + USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08 + USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09 + USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a +) + +func (a USBRequestBlockSetupRequest) String() string { + switch a { + case USBRequestBlockSetupRequestGetStatus: + return "GET_STATUS" + case USBRequestBlockSetupRequestClearFeature: + return "CLEAR_FEATURE" + case USBRequestBlockSetupRequestSetFeature: + return "SET_FEATURE" + case USBRequestBlockSetupRequestSetAddress: + return "SET_ADDRESS" + case USBRequestBlockSetupRequestGetDescriptor: + return "GET_DESCRIPTOR" + case USBRequestBlockSetupRequestSetDescriptor: + return "SET_DESCRIPTOR" + case USBRequestBlockSetupRequestGetConfiguration: + return "GET_CONFIGURATION" + case USBRequestBlockSetupRequestSetConfiguration: + return "SET_CONFIGURATION" + case USBRequestBlockSetupRequestSetIdle: + return "SET_IDLE" + default: + return "UNKNOWN" + } +} + +type USBTransportType uint8 + +const ( + USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive + USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream. + USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards. + USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations. + USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers. +) + +type USBDirectionType uint8 + +const ( + USBDirectionTypeUnknown USBDirectionType = iota + USBDirectionTypeIn + USBDirectionTypeOut +) + +func (a USBDirectionType) String() string { + switch a { + case USBDirectionTypeIn: + return "In" + case USBDirectionTypeOut: + return "Out" + default: + return "Unknown direction type" + } +} + +// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol. +type USB struct { + BaseLayer + ID uint64 + EventType USBEventType + TransferType USBTransportType + Direction USBDirectionType + EndpointNumber uint8 + DeviceAddress uint8 + BusID uint16 + TimestampSec int64 + TimestampUsec int32 + Setup bool + Data bool + Status int32 + UrbLength uint32 + UrbDataLength uint32 + + UrbInterval uint32 + UrbStartFrame uint32 + UrbCopyOfTransferFlags uint32 + IsoNumDesc uint32 +} + +func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB } + +func (m *USB) NextLayerType() gopacket.LayerType { + if m.Setup { + return LayerTypeUSBRequestBlockSetup + } else if m.Data { + } + + return m.TransferType.LayerType() +} + +func decodeUSB(data []byte, p gopacket.PacketBuilder) error { + d := &USB{} + + return decodingLayerDecoder(d, data, p) +} + +func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 40 { + df.SetTruncated() + return errors.New("USB < 40 bytes") + } + m.ID = binary.LittleEndian.Uint64(data[0:8]) + m.EventType = USBEventType(data[8]) + m.TransferType = USBTransportType(data[9]) + + m.EndpointNumber = data[10] & 0x7f + if data[10]&uint8(USBTransportTypeTransferIn) > 0 { + m.Direction = USBDirectionTypeIn + } else { + m.Direction = USBDirectionTypeOut + } + + m.DeviceAddress = data[11] + m.BusID = binary.LittleEndian.Uint16(data[12:14]) + + if uint(data[14]) == 0 { + m.Setup = true + } + + if uint(data[15]) == 0 { + m.Data = true + } + + m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24])) + m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28])) + m.Status = int32(binary.LittleEndian.Uint32(data[28:32])) + m.UrbLength = binary.LittleEndian.Uint32(data[32:36]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40]) + + m.Contents = data[:40] + m.Payload = data[40:] + + if m.Setup { + m.Payload = data[40:] + } else if m.Data { + m.Payload = data[uint32(len(data))-m.UrbDataLength:] + } + + // if 64 bit, dissect_linux_usb_pseudo_header_ext + if false { + m.UrbInterval = binary.LittleEndian.Uint32(data[40:44]) + m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52]) + m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56]) + m.Contents = data[:56] + m.Payload = data[56:] + } + + // crc5 or crc16 + // eop (end of packet) + + return nil +} + +type USBRequestBlockSetup struct { + BaseLayer + RequestType uint8 + Request USBRequestBlockSetupRequest + Value uint16 + Index uint16 + Length uint16 +} + +func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup } + +func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.RequestType = data[0] + m.Request = USBRequestBlockSetupRequest(data[1]) + m.Value = binary.LittleEndian.Uint16(data[2:4]) + m.Index = binary.LittleEndian.Uint16(data[4:6]) + m.Length = binary.LittleEndian.Uint16(data[6:8]) + m.Contents = data[:8] + m.Payload = data[8:] + return nil +} + +func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error { + d := &USBRequestBlockSetup{} + return decodingLayerDecoder(d, data, p) +} + +type USBControl struct { + BaseLayer +} + +func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl } + +func (m *USBControl) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error { + d := &USBControl{} + return decodingLayerDecoder(d, data, p) +} + +type USBInterrupt struct { + BaseLayer +} + +func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt } + +func (m *USBInterrupt) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error { + d := &USBInterrupt{} + return decodingLayerDecoder(d, data, p) +} + +type USBBulk struct { + BaseLayer +} + +func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk } + +func (m *USBBulk) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error { + d := &USBBulk{} + return decodingLayerDecoder(d, data, p) +} diff --git a/vendor/github.com/gopacket/gopacket/layers/vrrp.go b/vendor/github.com/gopacket/gopacket/layers/vrrp.go new file mode 100644 index 000000000..b512e6f82 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/vrrp.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "net" + + "github.com/gopacket/gopacket" +) + +/* + This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2. + https://tools.ietf.org/html/rfc3768#section-5 + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs| + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Adver Int | Checksum | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | . | + | . | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (n) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (2) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ + +type VRRPv2Type uint8 +type VRRPv2AuthType uint8 + +const ( + VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement +) + +// String conversions for VRRP message types +func (v VRRPv2Type) String() string { + switch v { + case VRRPv2Advertisement: + return "VRRPv2 Advertisement" + default: + return "" + } +} + +const ( + VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication + VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1 + VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2 +) + +func (v VRRPv2AuthType) String() string { + switch v { + case VRRPv2AuthNoAuth: + return "No Authentication" + case VRRPv2AuthReserved1: + return "Reserved" + case VRRPv2AuthReserved2: + return "Reserved" + default: + return "" + } +} + +// VRRPv2 represents an VRRP v2 message. +type VRRPv2 struct { + BaseLayer + Version uint8 // The version field specifies the VRRP protocol version of this packet (v2) + Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT + VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for + Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default) + CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement. + AuthType VRRPv2AuthType // identifies the authentication method being utilized + AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second + Checksum uint16 // used to detect data corruption in the VRRP message. + IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field. +} + +// LayerType returns LayerTypeVRRP for VRRP v2 message. +func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP } + +func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + v.BaseLayer = BaseLayer{Contents: data[:len(data)]} + v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2 + + v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement) + if v.Type != 1 { + // rfc3768: A packet with unknown type MUST be discarded. + return errors.New("Unrecognized VRRPv2 type field.") + } + + v.VirtualRtrID = data[1] + v.Priority = data[2] + + v.CountIPAddr = data[3] + if v.CountIPAddr < 1 { + return errors.New("VRRPv2 number of IP addresses is not valid.") + } + + v.AuthType = VRRPv2AuthType(data[4]) + v.AdverInt = uint8(data[5]) + v.Checksum = binary.BigEndian.Uint16(data[6:8]) + + // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field + // offset references the starting byte containing the list of ip addresses + offset := 8 + for i := uint8(0); i < v.CountIPAddr; i++ { + v.IPAddress = append(v.IPAddress, data[offset:offset+4]) + offset += 4 + } + + // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC + // + // 5.3.10. Authentication Data + // + // The authentication string is currently only used to maintain + // backwards compatibility with RFC 2338. It SHOULD be set to zero on + // transmission and ignored on reception. + return nil +} + +// CanDecode specifies the layer type in which we are attempting to unwrap. +func (v *VRRPv2) CanDecode() gopacket.LayerClass { + return LayerTypeVRRP +} + +// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0 +func (v *VRRPv2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// The VRRP packet does not include payload data. Setting byte slice to nil +func (v *VRRPv2) Payload() []byte { + return nil +} + +// decodeVRRP will parse VRRP v2 +func decodeVRRP(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 8 { + return errors.New("Not a valid VRRP packet. Packet length is too small.") + } + v := &VRRPv2{} + return decodingLayerDecoder(v, data, p) +} diff --git a/vendor/github.com/gopacket/gopacket/layers/vxlan.go b/vendor/github.com/gopacket/gopacket/layers/vxlan.go new file mode 100644 index 000000000..f7cdc040a --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers/vxlan.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/gopacket/gopacket" +) + +// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348 +// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// 0 8 16 24 32 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bit VXLAN Network Identifier | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// VXLAN is a VXLAN packet header +type VXLAN struct { + BaseLayer + ValidIDFlag bool // 'I' bit per RFC 7348 + VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348 + GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + GBPDontLearn bool // 'D' bit per Group Policy + GBPApplied bool // 'A' bit per Group Policy + GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy +} + +// LayerType returns LayerTypeVXLAN +func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN } + +// CanDecode returns the layer type this DecodingLayer can decode +func (vx *VXLAN) CanDecode() gopacket.LayerClass { + return LayerTypeVXLAN +} + +// NextLayerType retuns the next layer we should see after vxlan +func (vx *VXLAN) NextLayerType() gopacket.LayerType { + return LayerTypeEthernet +} + +// DecodeFromBytes takes a byte buffer and decodes +func (vx *VXLAN) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + return errors.New("vxlan packet too small") + } + // VNI is a 24bit number, Uint32 requires 32 bits + var buf [4]byte + copy(buf[1:], data[4:7]) + + // RFC 7348 https://tools.ietf.org/html/rfc7348 + vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348 + vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348 + + // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft + vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame. + vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet. + vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft + + // Layer information + const vxlanLength = 8 + vx.Contents = data[:vxlanLength] + vx.Payload = data[vxlanLength:] + + return nil + +} + +func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error { + vx := &VXLAN{} + err := vx.DecodeFromBytes(data, p) + if err != nil { + return err + } + + p.AddLayer(vx) + return p.NextDecoder(LinkTypeEthernet) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + + // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero + bytes[0] = 0 + bytes[1] = 0 + + if vx.ValidIDFlag { + bytes[0] |= 0x08 + } + if vx.GBPExtension { + bytes[0] |= 0x80 + } + if vx.GBPDontLearn { + bytes[1] |= 0x40 + } + if vx.GBPApplied { + bytes[1] |= 0x80 + } + + binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID) + if vx.VNI >= 1<<24 { + return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI) + } + binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8) + return nil +} diff --git a/vendor/github.com/gopacket/gopacket/layers_decoder.go b/vendor/github.com/gopacket/gopacket/layers_decoder.go new file mode 100644 index 000000000..8c1f108cf --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layers_decoder.go @@ -0,0 +1,101 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. + +package gopacket + +// Created by gen.go, don't edit manually +// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599 + +// LayersDecoder returns DecodingLayerFunc for specified +// DecodingLayerContainer, LayerType value to start decoding with and +// some DecodeFeedback. +func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc { + firstDec, ok := dl.Decoder(first) + if !ok { + return func([]byte, *[]LayerType) (LayerType, error) { + return first, nil + } + } + if dlc, ok := dl.(DecodingLayerSparse); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerArray); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerMap); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + dlc := dl + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } +} diff --git a/vendor/github.com/gopacket/gopacket/layertype.go b/vendor/github.com/gopacket/gopacket/layertype.go new file mode 100644 index 000000000..3abfee1e9 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/layertype.go @@ -0,0 +1,111 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "strconv" +) + +// LayerType is a unique identifier for each type of layer. This enumeration +// does not match with any externally available numbering scheme... it's solely +// usable/useful within this library as a means for requesting layer types +// (see Packet.Layer) and determining which types of layers have been decoded. +// +// New LayerTypes may be created by calling gopacket.RegisterLayerType. +type LayerType int64 + +// LayerTypeMetadata contains metadata associated with each LayerType. +type LayerTypeMetadata struct { + // Name is the string returned by each layer type's String method. + Name string + // Decoder is the decoder to use when the layer type is passed in as a + // Decoder. + Decoder Decoder +} + +type layerTypeMetadata struct { + inUse bool + LayerTypeMetadata +} + +// DecodersByLayerName maps layer names to decoders for those layers. +// This allows users to specify decoders by name to a program and have that +// program pick the correct decoder accordingly. +var DecodersByLayerName = map[string]Decoder{} + +const maxLayerType = 2000 + +var ltMeta [maxLayerType]layerTypeMetadata +var ltMetaMap = map[LayerType]layerTypeMetadata{} + +// RegisterLayerType creates a new layer type and registers it globally. +// The number passed in must be unique, or a runtime panic will occur. Numbers +// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be +// used for common application-specific types, and are very fast. Any other +// number (negative or >= 2000) may be used for uncommon application-specific +// types, and are somewhat slower (they require a map lookup over an array +// index). +func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + if ltMeta[num].inUse { + panic("Layer type already exists") + } + } else { + if ltMetaMap[LayerType(num)].inUse { + panic("Layer type already exists") + } + } + return OverrideLayerType(num, meta) +} + +// OverrideLayerType acts like RegisterLayerType, except that if the layer type +// has already been registered, it overrides the metadata with the passed-in +// metadata intead of panicing. +func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + ltMeta[num] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } else { + ltMetaMap[LayerType(num)] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } + DecodersByLayerName[meta.Name] = meta.Decoder + return LayerType(num) +} + +// Decode decodes the given data using the decoder registered with the layer +// type. +func (t LayerType) Decode(data []byte, c PacketBuilder) error { + var d Decoder + if 0 <= int(t) && int(t) < maxLayerType { + d = ltMeta[int(t)].Decoder + } else { + d = ltMetaMap[t].Decoder + } + if d != nil { + return d.Decode(data, c) + } + return fmt.Errorf("Layer type %v has no associated decoder", t) +} + +// String returns the string associated with this layer type. +func (t LayerType) String() (s string) { + if 0 <= int(t) && int(t) < maxLayerType { + s = ltMeta[int(t)].Name + } else { + s = ltMetaMap[t].Name + } + if s == "" { + s = strconv.Itoa(int(t)) + } + return +} diff --git a/vendor/github.com/gopacket/gopacket/packet.go b/vendor/github.com/gopacket/gopacket/packet.go new file mode 100644 index 000000000..93563437f --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/packet.go @@ -0,0 +1,1029 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "reflect" + "runtime/debug" + "strings" + "sync" + "syscall" + "time" +) + +const maximumMTU = 1500 + +var ( + ErrNoLayersAdded = errors.New("NextDecoder called, but no layers added yet") + poolPackedPool = &sync.Pool{ + New: func() interface{} { + b := make([]byte, maximumMTU) + return &b + }, + } +) + +// CaptureInfo provides standardized information about a packet captured off +// the wire or read from a file. +type CaptureInfo struct { + // Timestamp is the time the packet was captured, if that is known. + Timestamp time.Time + // CaptureLength is the total number of bytes read off of the wire. + CaptureLength int + // Length is the size of the original packet. Should always be >= + // CaptureLength. + Length int + // InterfaceIndex + InterfaceIndex int + // The packet source can place ancillary data of various types here. + // For example, the afpacket source can report the VLAN of captured + // packets this way. + AncillaryData []interface{} +} + +// PacketMetadata contains metadata for a packet. +type PacketMetadata struct { + CaptureInfo + // Truncated is true if packet decoding logic detects that there are fewer + // bytes in the packet than are detailed in various headers (for example, if + // the number of bytes in the IPv4 contents/payload is less than IPv4.Length). + // This is also set automatically for packets captured off the wire if + // CaptureInfo.CaptureLength < CaptureInfo.Length. + Truncated bool +} + +// Packet is the primary object used by gopacket. Packets are created by a +// Decoder's Decode call. A packet is made up of a set of Data, which +// is broken into a number of Layers as it is decoded. +type Packet interface { + //// Functions for outputting the packet as a human-readable string: + //// ------------------------------------------------------------------ + // String returns a human-readable string representation of the packet. + // It uses LayerString on each layer to output the layer. + String() string + // Dump returns a verbose human-readable string representation of the packet, + // including a hex dump of all layers. It uses LayerDump on each layer to + // output the layer. + Dump() string + + //// Functions for accessing arbitrary packet layers: + //// ------------------------------------------------------------------ + // Layers returns all layers in this packet, computing them as necessary + Layers() []Layer + // Layer returns the first layer in this packet of the given type, or nil + Layer(LayerType) Layer + // LayerClass returns the first layer in this packet of the given class, + // or nil. + LayerClass(LayerClass) Layer + + //// Functions for accessing specific types of packet layers. These functions + //// return the first layer of each type found within the packet. + //// ------------------------------------------------------------------ + // LinkLayer returns the first link layer in the packet + LinkLayer() LinkLayer + // NetworkLayer returns the first network layer in the packet + NetworkLayer() NetworkLayer + // TransportLayer returns the first transport layer in the packet + TransportLayer() TransportLayer + // ApplicationLayer returns the first application layer in the packet + ApplicationLayer() ApplicationLayer + // ErrorLayer is particularly useful, since it returns nil if the packet + // was fully decoded successfully, and non-nil if an error was encountered + // in decoding and the packet was only partially decoded. Thus, its output + // can be used to determine if the entire packet was able to be decoded. + ErrorLayer() ErrorLayer + + //// Functions for accessing data specific to the packet: + //// ------------------------------------------------------------------ + // Data returns the set of bytes that make up this entire packet. + Data() []byte + // Metadata returns packet metadata associated with this packet. + Metadata() *PacketMetadata + + //// Functions for verifying specific aspects of the packet: + //// ------------------------------------------------------------------ + // VerifyChecksums verifies the checksums of all layers in this packet, + // that have one, and returns all found checksum mismatches. + VerifyChecksums() (error, []ChecksumMismatch) +} + +type PooledPacket interface { + Packet + Dispose() +} + +// packet contains all the information we need to fulfill the Packet interface, +// and its two "subclasses" (yes, no such thing in Go, bear with me), +// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the +// various functions needed to access this information. +type packet struct { + // data contains the entire packet data for a packet + data []byte + // initialLayers is space for an initial set of layers already created inside + // the packet. + initialLayers [6]Layer + // layers contains each layer we've already decoded + layers []Layer + // last is the last layer added to the packet + last Layer + // metadata is the PacketMetadata for this packet + metadata PacketMetadata + + decodeOptions DecodeOptions + + // Pointers to the various important layers + link LinkLayer + network NetworkLayer + transport TransportLayer + application ApplicationLayer + failure ErrorLayer +} + +func (p *packet) SetTruncated() { + p.metadata.Truncated = true +} + +func (p *packet) SetLinkLayer(l LinkLayer) { + if p.link == nil { + p.link = l + } +} + +func (p *packet) SetNetworkLayer(l NetworkLayer) { + if p.network == nil { + p.network = l + } +} + +func (p *packet) SetTransportLayer(l TransportLayer) { + if p.transport == nil { + p.transport = l + } +} + +func (p *packet) SetApplicationLayer(l ApplicationLayer) { + if p.application == nil { + p.application = l + } +} + +func (p *packet) SetErrorLayer(l ErrorLayer) { + if p.failure == nil { + p.failure = l + } +} + +func (p *packet) AddLayer(l Layer) { + p.layers = append(p.layers, l) + p.last = l +} + +func (p *packet) DumpPacketData() { + fmt.Fprint(os.Stderr, p.packetDump()) + os.Stderr.Sync() +} + +func (p *packet) Metadata() *PacketMetadata { + return &p.metadata +} + +func (p *packet) Data() []byte { + return p.data +} + +func (p *packet) DecodeOptions() *DecodeOptions { + return &p.decodeOptions +} + +func (p *packet) VerifyChecksums() (error, []ChecksumMismatch) { + mismatches := make([]ChecksumMismatch, 0) + for i, l := range p.layers { + if lwc, ok := l.(LayerWithChecksum); ok { + // Verify checksum for that layer + err, res := lwc.VerifyChecksum() + if err != nil { + return fmt.Errorf("couldn't verify checksum for layer %d (%s): %w", + i+1, l.LayerType(), err), nil + } + + if !res.Valid { + mismatches = append(mismatches, ChecksumMismatch{ + ChecksumVerificationResult: res, + Layer: l, + LayerIndex: i, + }) + } + } + } + + return nil, mismatches +} + +func (p *packet) addFinalDecodeError(err error, stack []byte) { + fail := &DecodeFailure{err: err, stack: stack} + if p.last == nil { + fail.data = p.data + } else { + fail.data = p.last.LayerPayload() + } + p.AddLayer(fail) + p.SetErrorLayer(fail) +} + +func (p *packet) recoverDecodeError() { + if !p.decodeOptions.SkipDecodeRecovery { + if r := recover(); r != nil { + p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack()) + } + } +} + +type pooledPacket struct { + Packet + origData *[]byte +} + +func (p pooledPacket) Dispose() { + poolPackedPool.Put(p.origData) +} + +// LayerString outputs an individual layer as a string. The layer is output +// in a single line, with no trailing newline. This function is specifically +// designed to do the right thing for most layers... it follows the following +// rules: +// - If the Layer has a String function, just output that. +// - Otherwise, output all exported fields in the layer, recursing into +// exported slices and structs. +// +// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported +// and unexported fields... many times packet layers contain unexported stuff +// that would just mess up the output of the layer, see for example the +// Payload layer and it's internal 'data' field, which contains a large byte +// array that would really mess up formatting. +func LayerString(l Layer) string { + return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false)) +} + +// Dumper dumps verbose information on a value. If a layer type implements +// Dumper, then its LayerDump() string will include the results in its output. +type Dumper interface { + Dump() string +} + +// LayerDump outputs a very verbose string representation of a layer. Its +// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()). +// It contains newlines and ends with a newline. +func LayerDump(l Layer) string { + var b bytes.Buffer + b.WriteString(LayerString(l)) + b.WriteByte('\n') + if d, ok := l.(Dumper); ok { + dump := d.Dump() + if dump != "" { + b.WriteString(dump) + if dump[len(dump)-1] != '\n' { + b.WriteByte('\n') + } + } + } + b.WriteString(hex.Dump(l.LayerContents())) + return b.String() +} + +// layerString outputs, recursively, a layer in a "smart" way. See docs for +// LayerString for more details. +// +// Params: +// +// i - value to write out +// anonymous: if we're currently recursing an anonymous member of a struct +// writeSpace: if we've already written a value in a struct, and need to +// write a space before writing more. This happens when we write various +// anonymous values, and need to keep writing more. +func layerString(v reflect.Value, anonymous bool, writeSpace bool) string { + // Let String() functions take precedence. + if v.CanInterface() { + if s, ok := v.Interface().(fmt.Stringer); ok { + return s.String() + } + } + // Reflect, and spit out all the exported fields as key=value. + switch v.Type().Kind() { + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return "nil" + } + r := v.Elem() + return layerString(r, anonymous, writeSpace) + case reflect.Struct: + var b bytes.Buffer + typ := v.Type() + if !anonymous { + b.WriteByte('{') + } + for i := 0; i < v.NumField(); i++ { + // Check if this is upper-case. + ftype := typ.Field(i) + f := v.Field(i) + if ftype.Anonymous { + anonStr := layerString(f, true, writeSpace) + writeSpace = writeSpace || anonStr != "" + b.WriteString(anonStr) + } else if ftype.PkgPath == "" { // exported + if writeSpace { + b.WriteByte(' ') + } + writeSpace = true + fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace)) + } + } + if !anonymous { + b.WriteByte('}') + } + return b.String() + case reflect.Slice: + var b bytes.Buffer + b.WriteByte('[') + if v.Len() > 4 { + fmt.Fprintf(&b, "..%d..", v.Len()) + } else { + for j := 0; j < v.Len(); j++ { + if j != 0 { + b.WriteString(", ") + } + b.WriteString(layerString(v.Index(j), false, false)) + } + } + b.WriteByte(']') + return b.String() + } + return fmt.Sprintf("%v", v.Interface()) +} + +const ( + longBytesLength = 128 +) + +// LongBytesGoString returns a string representation of the byte slice shortened +// using the format '{ ... ( bytes)}' if it +// exceeds a predetermined length. Can be used to avoid filling the display with +// very long byte strings. +func LongBytesGoString(buf []byte) string { + if len(buf) < longBytesLength { + return fmt.Sprintf("%#v", buf) + } + s := fmt.Sprintf("%#v", buf[:longBytesLength-1]) + s = strings.TrimSuffix(s, "}") + return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf)) +} + +func baseLayerString(value reflect.Value) string { + t := value.Type() + content := value.Field(0) + c := make([]byte, content.Len()) + for i := range c { + c[i] = byte(content.Index(i).Uint()) + } + payload := value.Field(1) + p := make([]byte, payload.Len()) + for i := range p { + p[i] = byte(payload.Index(i).Uint()) + } + return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(), + LongBytesGoString(c), + LongBytesGoString(p)) +} + +func layerGoString(i interface{}, b *bytes.Buffer) { + if s, ok := i.(fmt.GoStringer); ok { + b.WriteString(s.GoString()) + return + } + + var v reflect.Value + var ok bool + if v, ok = i.(reflect.Value); !ok { + v = reflect.ValueOf(i) + } + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.Kind() == reflect.Ptr { + b.WriteByte('&') + } + layerGoString(v.Elem().Interface(), b) + case reflect.Struct: + t := v.Type() + b.WriteString(t.String()) + b.WriteByte('{') + for i := 0; i < v.NumField(); i++ { + if i > 0 { + b.WriteString(", ") + } + if t.Field(i).Name == "BaseLayer" { + fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i))) + } else if v.Field(i).Kind() == reflect.Struct { + fmt.Fprintf(b, "%s:", t.Field(i).Name) + layerGoString(v.Field(i), b) + } else if v.Field(i).Kind() == reflect.Ptr { + b.WriteByte('&') + layerGoString(v.Field(i), b) + } else { + fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i)) + } + } + b.WriteByte('}') + default: + fmt.Fprintf(b, "%#v", i) + } +} + +// LayerGoString returns a representation of the layer in Go syntax, +// taking care to shorten "very long" BaseLayer byte slices +func LayerGoString(l Layer) string { + b := new(bytes.Buffer) + layerGoString(l, b) + return b.String() +} + +func (p *packet) packetString() string { + var b bytes.Buffer + fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data())) + if p.metadata.Truncated { + b.WriteString(", truncated") + } + if p.metadata.Length > 0 { + fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength) + } + if !p.metadata.Timestamp.IsZero() { + fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp) + } + b.WriteByte('\n') + for i, l := range p.layers { + fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l)) + } + return b.String() +} + +func (p *packet) packetDump() string { + var b bytes.Buffer + fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data)) + for i, l := range p.layers { + fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l)) + } + return b.String() +} + +// eagerPacket is a packet implementation that does eager decoding. Upon +// initial construction, it decodes all the layers it can from packet data. +// eagerPacket implements Packet and PacketBuilder. +type eagerPacket struct { + packet +} + +var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type") + +func (p *eagerPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + if p.last == nil { + return ErrNoLayersAdded + } + d := p.last.LayerPayload() + if len(d) == 0 { + return nil + } + // Since we're eager, immediately call the next decoder. + return next.Decode(d, p) +} +func (p *eagerPacket) initialDecode(dec Decoder) { + defer p.recoverDecodeError() + err := dec.Decode(p.data, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *eagerPacket) LinkLayer() LinkLayer { + return p.link +} +func (p *eagerPacket) NetworkLayer() NetworkLayer { + return p.network +} +func (p *eagerPacket) TransportLayer() TransportLayer { + return p.transport +} +func (p *eagerPacket) ApplicationLayer() ApplicationLayer { + return p.application +} +func (p *eagerPacket) ErrorLayer() ErrorLayer { + return p.failure +} +func (p *eagerPacket) Layers() []Layer { + return p.layers +} +func (p *eagerPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + return nil +} +func (p *eagerPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + return nil +} +func (p *eagerPacket) String() string { return p.packetString() } +func (p *eagerPacket) Dump() string { return p.packetDump() } + +// lazyPacket does lazy decoding on its packet data. On construction it does +// no initial decoding. For each function call, it decodes only as many layers +// as are necessary to compute the return value for that function. +// lazyPacket implements Packet and PacketBuilder. +type lazyPacket struct { + packet + next Decoder +} + +func (p *lazyPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + p.next = next + return nil +} +func (p *lazyPacket) decodeNextLayer() { + if p.next == nil { + return + } + d := p.data + if p.last != nil { + d = p.last.LayerPayload() + } + next := p.next + p.next = nil + // We've just set p.next to nil, so if we see we have no data, this should be + // the final call we get to decodeNextLayer if we return here. + if len(d) == 0 { + return + } + defer p.recoverDecodeError() + err := next.Decode(d, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *lazyPacket) LinkLayer() LinkLayer { + for p.link == nil && p.next != nil { + p.decodeNextLayer() + } + return p.link +} +func (p *lazyPacket) NetworkLayer() NetworkLayer { + for p.network == nil && p.next != nil { + p.decodeNextLayer() + } + return p.network +} +func (p *lazyPacket) TransportLayer() TransportLayer { + for p.transport == nil && p.next != nil { + p.decodeNextLayer() + } + return p.transport +} +func (p *lazyPacket) ApplicationLayer() ApplicationLayer { + for p.application == nil && p.next != nil { + p.decodeNextLayer() + } + return p.application +} +func (p *lazyPacket) ErrorLayer() ErrorLayer { + for p.failure == nil && p.next != nil { + p.decodeNextLayer() + } + return p.failure +} +func (p *lazyPacket) Layers() []Layer { + for p.next != nil { + p.decodeNextLayer() + } + return p.layers +} +func (p *lazyPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if l.LayerType() == t { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) String() string { p.Layers(); return p.packetString() } +func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() } + +// DecodeOptions tells gopacket how to decode a packet. +type DecodeOptions struct { + // Lazy decoding decodes the minimum number of layers needed to return data + // for a packet at each function call. Be careful using this with concurrent + // packet processors, as each call to packet.* could mutate the packet, and + // two concurrent function calls could interact poorly. + Lazy bool + // NoCopy decoding doesn't copy its input buffer into storage that's owned by + // the packet. If you can guarantee that the bytes underlying the slice + // passed into NewPacket aren't going to be modified, this can be faster. If + // there's any chance that those bytes WILL be changed, this will invalidate + // your packets. + NoCopy bool + // Pool decoding only applies if NoCopy is false. + // Instead of always allocating new memory it takes the memory from a pool. + // NewPacket then will return a PooledPacket instead of a Packet. + // As soon as you're done with the PooledPacket you should call PooledPacket.Dispose() to return it to the pool. + Pool bool + // SkipDecodeRecovery skips over panic recovery during packet decoding. + // Normally, when packets decode, if a panic occurs, that panic is captured + // by a recover(), and a DecodeFailure layer is added to the packet detailing + // the issue. If this flag is set, panics are instead allowed to continue up + // the stack. + SkipDecodeRecovery bool + // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP + // decoder. If true, we should try to decode layers after TCP in single packets. + // This is disabled by default because the reassembly package drives the decoding + // of TCP payload data after reassembly. + DecodeStreamsAsDatagrams bool +} + +// Default decoding provides the safest (but slowest) method for decoding +// packets. It eagerly processes all layers (so it's concurrency-safe) and it +// copies its input buffer upon creation of the packet (so the packet remains +// valid if the underlying slice is modified. Both of these take time, +// though, so beware. If you can guarantee that the packet will only be used +// by one goroutine at a time, set Lazy decoding. If you can guarantee that +// the underlying slice won't change, set NoCopy decoding. +var Default = DecodeOptions{} + +// Lazy is a DecodeOptions with just Lazy set. +var Lazy = DecodeOptions{Lazy: true} + +// NoCopy is a DecodeOptions with just NoCopy set. +var NoCopy = DecodeOptions{NoCopy: true} + +// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set. +var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true} + +// NewPacket creates a new Packet object from a set of bytes. The +// firstLayerDecoder tells it how to interpret the first layer from the bytes, +// future layers will be generated from that first layer automatically. +func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) (p Packet) { + if !options.NoCopy { + var ( + poolMemory *[]byte + dataCopy []byte + ) + if options.Pool && len(data) <= maximumMTU { + poolMemory = poolPackedPool.Get().(*[]byte) + dataCopy = (*poolMemory)[:len(data)] + copy(dataCopy, data) + data = dataCopy + defer func() { + p = &pooledPacket{Packet: p, origData: poolMemory} + }() + } else { + dataCopy = make([]byte, len(data)) + copy(dataCopy, data) + data = dataCopy + } + } + if options.Lazy { + lp := &lazyPacket{ + packet: packet{data: data, decodeOptions: options}, + next: firstLayerDecoder, + } + lp.layers = lp.initialLayers[:0] + // Crazy craziness: + // If the following return statemet is REMOVED, and Lazy is FALSE, then + // eager packet processing becomes 17% FASTER. No, there is no logical + // explanation for this. However, it's such a hacky micro-optimization that + // we really can't rely on it. It appears to have to do with the size the + // compiler guesses for this function's stack space, since one symptom is + // that with the return statement in place, we more than double calls to + // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better + // over time and we get this optimization for free. Until then, we'll have + // to live with slower packet processing. + return lp + } + ep := &eagerPacket{ + packet: packet{data: data, decodeOptions: options}, + } + ep.layers = ep.initialLayers[:0] + ep.initialDecode(firstLayerDecoder) + return ep +} + +// PacketDataSource is an interface for some source of packet data. Users may +// create their own implementations, or use the existing implementations in +// gopacket/pcap (libpcap, allows reading from live interfaces or from +// pcap files) or gopacket/pfring (PF_RING, allows reading from live +// interfaces). +type PacketDataSource interface { + // ReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set +// of internal PacketDataSources, each of which will stop with io.EOF after +// reading a finite number of packets. The returned PacketDataSource will +// return all packets from the first finite source, followed by all packets from +// the second, etc. Once all finite sources have returned io.EOF, the returned +// source will as well. +func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource { + c := concat(pds) + return &c +} + +type concat []PacketDataSource + +func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) { + for len(*c) > 0 { + data, ci, err = (*c)[0].ReadPacketData() + if errors.Is(err, io.EOF) { + *c = (*c)[1:] + continue + } + return + } + return nil, CaptureInfo{}, io.EOF +} + +// ZeroCopyPacketDataSource is an interface to pull packet data from sources +// that allow data to be returned without copying to a user-controlled buffer. +// It's very similar to PacketDataSource, except that the caller must be more +// careful in how the returned buffer is handled. +type ZeroCopyPacketDataSource interface { + // ZeroCopyReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. Unlike with + // PacketDataSource's ReadPacketData, the slice returned here points + // to a buffer owned by the data source. In particular, the bytes in + // this buffer may be changed by future calls to + // ZeroCopyReadPacketData. Do not use the returned buffer after + // subsequent ZeroCopyReadPacketData calls. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +type PacketSourceOption interface { + apply(ps *PacketSource) +} + +type packetSourceOptionFunc func(ps *PacketSource) + +func (f packetSourceOptionFunc) apply(ps *PacketSource) { + f(ps) +} + +func WithLazy(lazy bool) packetSourceOptionFunc { + return func(ps *PacketSource) { + ps.Lazy = lazy + } +} + +func WithNoCopy(noCopy bool) packetSourceOptionFunc { + return func(ps *PacketSource) { + ps.NoCopy = noCopy + } +} + +func WithPool(pool bool) packetSourceOptionFunc { + return func(ps *PacketSource) { + ps.Pool = pool + } +} + +func WithSkipDecodeRecovery(skipDecodeRecovery bool) packetSourceOptionFunc { + return func(ps *PacketSource) { + ps.SkipDecodeRecovery = skipDecodeRecovery + } +} + +func WithDecodeStreamsAsDatagrams(decodeStreamsAsDatagrams bool) packetSourceOptionFunc { + return func(ps *PacketSource) { + ps.DecodeStreamsAsDatagrams = decodeStreamsAsDatagrams + } +} + +// PacketSource reads in packets from a PacketDataSource, decodes them, and +// returns them. +// +// There are currently two different methods for reading packets in through +// a PacketSource: +// +// # Reading With Packets Function +// +// This method is the most convenient and easiest to code, but lacks +// flexibility. Packets returns a 'chan Packet', then asynchronously writes +// packets into that channel. Packets uses a blocking channel, and closes +// it if an io.EOF is returned by the underlying PacketDataSource. All other +// PacketDataSource errors are ignored and discarded. +// +// for packet := range packetSource.Packets() { +// ... +// } +// +// # Reading With NextPacket Function +// +// This method is the most flexible, and exposes errors that may be +// encountered by the underlying PacketDataSource. It's also the fastest +// in a tight loop, since it doesn't have the overhead of a channel +// read/write. However, it requires the user to handle errors, most +// importantly the io.EOF error in cases where packets are being read from +// a file. +// +// for { +// packet, err := packetSource.NextPacket() +// if err == io.EOF { +// break +// } else if err != nil { +// log.Println("Error:", err) +// continue +// } +// handlePacket(packet) // Do something with each packet. +// } +type PacketSource struct { + zeroCopy bool + source func() (data []byte, ci CaptureInfo, err error) + decoder Decoder + // DecodeOptions is the set of options to use for decoding each piece + // of packet data. This can/should be changed by the user to reflect the + // way packets should be decoded. + DecodeOptions + c chan Packet +} + +// NewZeroCopyPacketSource creates a zero copy packet data source. +func NewZeroCopyPacketSource(source ZeroCopyPacketDataSource, decoder Decoder, opts ...PacketSourceOption) *PacketSource { + ps := &PacketSource{ + source: source.ZeroCopyReadPacketData, + decoder: decoder, + } + + for idx := range opts { + opts[idx].apply(ps) + } + + return ps +} + +// NewPacketSource creates a packet data source. +func NewPacketSource(source PacketDataSource, decoder Decoder, opts ...PacketSourceOption) *PacketSource { + ps := &PacketSource{ + source: source.ReadPacketData, + decoder: decoder, + } + + for idx := range opts { + opts[idx].apply(ps) + } + + return ps +} + +// NextPacket returns the next decoded packet from the PacketSource. On error, +// it returns a nil packet and a non-nil error. +func (p *PacketSource) NextPacket() (Packet, error) { + data, ci, err := p.source() + if err != nil { + return nil, err + } + packet := NewPacket(data, p.decoder, p.DecodeOptions) + m := packet.Metadata() + m.CaptureInfo = ci + m.Truncated = m.Truncated || ci.CaptureLength < ci.Length + return packet, nil +} + +// packetsToChannel reads in all packets from the packet source and sends them +// to the given channel. This routine terminates when a non-temporary error +// is returned by NextPacket(). +func (p *PacketSource) packetsToChannel(ctx context.Context) { + defer close(p.c) + for ctx.Err() == nil { + packet, err := p.NextPacket() + if err == nil { + select { + case p.c <- packet: + continue + case <-ctx.Done(): + return + } + } + + // if timeout error sleep briefly and retry + var netErr net.Error + if ok := errors.As(err, &netErr); ok && netErr.Timeout() { + time.Sleep(time.Millisecond * time.Duration(5)) + continue + } + + // Immediately break for known unrecoverable errors + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) || + errors.Is(err, io.ErrNoProgress) || errors.Is(err, io.ErrClosedPipe) || errors.Is(err, io.ErrShortBuffer) || + errors.Is(err, syscall.EBADF) || + strings.Contains(err.Error(), "use of closed file") { + break + } + + // Sleep briefly and try again + time.Sleep(time.Millisecond * time.Duration(5)) + } +} + +// Packets returns a channel of packets, allowing easy iterating over +// packets. Packets will be asynchronously read in from the underlying +// PacketDataSource and written to the returned channel. If the underlying +// PacketDataSource returns an io.EOF error, the channel will be closed. +// If any other error is encountered, it is ignored. +// +// for packet := range packetSource.Packets() { +// handlePacket(packet) // Do something with each packet +// } +// +// If called more than once, returns the same channel. +func (p *PacketSource) Packets() chan Packet { + return p.PacketsCtx(context.Background()) +} + +// PacketsCtx returns a channel of packets, allowing easy iterating over +// packets. Packets will be asynchronously read in from the underlying +// PacketDataSource and written to the returned channel. If the underlying +// PacketDataSource returns an io.EOF error, the channel will be closed. +// If any other error is encountered, it is ignored. +// The background Go routine will be canceled as soon as the given context +// returns an error either because it got canceled or it has reached its deadline. +// +// for packet := range packetSource.PacketsCtx(context.Background()) { +// handlePacket(packet) // Do something with each packet. +// } +// +// If called more than once, returns the same channel. +func (p *PacketSource) PacketsCtx(ctx context.Context) chan Packet { + if p.DecodeOptions.NoCopy && p.zeroCopy { + panic("PacketSource uses a zero copy datasource and NoCopy decoder option activated - Packets() uses a buffered channel hence packets are most likely overwritten") + } + + const defaultPacketChannelSize = 1000 + if p.c == nil { + p.c = make(chan Packet, defaultPacketChannelSize) + go p.packetsToChannel(ctx) + } + return p.c +} diff --git a/vendor/github.com/gopacket/gopacket/parser.go b/vendor/github.com/gopacket/gopacket/parser.go new file mode 100644 index 000000000..8bc6a68d4 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/parser.go @@ -0,0 +1,351 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// A container for single LayerType->DecodingLayer mapping. +type decodingLayerElem struct { + typ LayerType + dec DecodingLayer +} + +// DecodingLayer is an interface for packet layers that can decode themselves. +// +// The important part of DecodingLayer is that they decode themselves in-place. +// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to +// the new state defined by the data passed in. A returned error leaves the +// DecodingLayer in an unknown intermediate state, thus its fields should not be +// trusted. +// +// Because the DecodingLayer is resetting its own fields, a call to +// DecodeFromBytes should normally not require any memory allocation. +type DecodingLayer interface { + // DecodeFromBytes resets the internal state of this layer to the state + // defined by the passed-in bytes. Slices in the DecodingLayer may + // reference the passed-in data, so care should be taken to copy it + // first should later modification of data be required before the + // DecodingLayer is discarded. + DecodeFromBytes(data []byte, df DecodeFeedback) error + // CanDecode returns the set of LayerTypes this DecodingLayer can + // decode. For Layers that are also DecodingLayers, this will most + // often be that Layer's LayerType(). + CanDecode() LayerClass + // NextLayerType returns the LayerType which should be used to decode + // the LayerPayload. + NextLayerType() LayerType + // LayerPayload is the set of bytes remaining to decode after a call to + // DecodeFromBytes. + LayerPayload() []byte +} + +// DecodingLayerFunc decodes given packet and stores decoded LayerType +// values into specified slice. Returns either first encountered +// unsupported LayerType value or decoding error. In case of success, +// returns (LayerTypeZero, nil). +type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error) + +// DecodingLayerContainer stores all DecodingLayer-s and serves as a +// searching tool for DecodingLayerParser. +type DecodingLayerContainer interface { + // Put adds new DecodingLayer to container. The new instance of + // the same DecodingLayerContainer is returned so it may be + // implemented as a value receiver. + Put(DecodingLayer) DecodingLayerContainer + // Decoder returns DecodingLayer to decode given LayerType and + // true if it was found. If no decoder found, return false. + Decoder(LayerType) (DecodingLayer, bool) + // LayersDecoder returns DecodingLayerFunc which decodes given + // packet, starting with specified LayerType and DecodeFeedback. + LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc +} + +// DecodingLayerSparse is a sparse array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is addressed in an +// allocated slice by LayerType value itself. Though this is the +// fastest container it may be memory-consuming if used with big +// LayerType values. +type DecodingLayerSparse []DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer { + maxLayerType := LayerType(len(dl) - 1) + for _, typ := range d.CanDecode().LayerTypes() { + if typ > maxLayerType { + maxLayerType = typ + } + } + + if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 { + dl = append(dl, make([]DecodingLayer, extra)...) + } + + for _, typ := range d.CanDecode().LayerTypes() { + dl[typ] = d + } + return dl +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) { + if int64(typ) < int64(len(dl)) { + decoder := dl[typ] + return decoder, decoder != nil + } + return nil, false +} + +// DecodingLayerArray is an array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched linearly in +// an allocated slice in one-by-one fashion. +type DecodingLayerArray []decodingLayerElem + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer { +TYPES: + for _, typ := range d.CanDecode().LayerTypes() { + for i := range dl { + if dl[i].typ == typ { + dl[i].dec = d + continue TYPES + } + } + dl = append(dl, decodingLayerElem{typ, d}) + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) { + for i := range dl { + if dl[i].typ == typ { + return dl[i].dec, true + } + } + return nil, false +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// DecodingLayerMap is an map-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched in a map +// hashed by LayerType value. +type DecodingLayerMap map[LayerType]DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer { + for _, typ := range d.CanDecode().LayerTypes() { + if dl == nil { + dl = make(map[LayerType]DecodingLayer) + } + dl[typ] = d + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) { + d, ok := dl[typ] + return d, ok +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Static code check. +var ( + _ = []DecodingLayerContainer{ + DecodingLayerSparse(nil), + DecodingLayerMap(nil), + DecodingLayerArray(nil), + } +) + +// DecodingLayerParser parses a given set of layer types. See DecodeLayers for +// more information on how DecodingLayerParser should be used. +type DecodingLayerParser struct { + // DecodingLayerParserOptions is the set of options available to the + // user to define the parser's behavior. + DecodingLayerParserOptions + dlc DecodingLayerContainer + first LayerType + df DecodeFeedback + + decodeFunc DecodingLayerFunc + + // Truncated is set when a decode layer detects that the packet has been + // truncated. + Truncated bool +} + +// AddDecodingLayer adds a decoding layer to the parser. This adds support for +// the decoding layer's CanDecode layers to the parser... should they be +// encountered, they'll be parsed. +func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) { + l.SetDecodingLayerContainer(l.dlc.Put(d)) +} + +// SetTruncated is used by DecodingLayers to set the Truncated boolean in the +// DecodingLayerParser. Users should simply read Truncated after calling +// DecodeLayers. +func (l *DecodingLayerParser) SetTruncated() { + l.Truncated = true +} + +// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all +// of the given DecodingLayers with AddDecodingLayer. +// +// Each call to DecodeLayers will attempt to decode the given bytes first by +// treating them as a 'first'-type layer, then by using NextLayerType on +// subsequently decoded layers to find the next relevant decoder. Should a +// deoder not be available for the layer type returned by NextLayerType, +// decoding will stop. +// +// NewDecodingLayerParser uses DecodingLayerMap container by +// default. +func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser { + dlp := &DecodingLayerParser{first: first} + dlp.df = dlp // Cast this once to the interface + // default container + dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer))) + for _, d := range decoders { + dlc = dlc.Put(d) + } + + dlp.SetDecodingLayerContainer(dlc) + return dlp +} + +// SetDecodingLayerContainer specifies container with decoders. This +// call replaces all decoders already registered in given instance of +// DecodingLayerParser. +func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) { + l.dlc = dlc + l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df) +} + +// DecodeLayers decodes as many layers as possible from the given data. It +// initially treats the data as layer type 'typ', then uses NextLayerType on +// each subsequent decoded layer until it gets to a layer type it doesn't know +// how to parse. +// +// For each layer successfully decoded, DecodeLayers appends the layer type to +// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so +// there's no need to empty it yourself. +// +// This decoding method is about an order of magnitude faster than packet +// decoding, because it only decodes known layers that have already been +// allocated. This means it doesn't need to allocate each layer it returns... +// instead it overwrites the layers that already exist. +// +// Example usage: +// +// func main() { +// var eth layers.Ethernet +// var ip4 layers.IPv4 +// var ip6 layers.IPv6 +// var tcp layers.TCP +// var udp layers.UDP +// var payload gopacket.Payload +// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload) +// var source gopacket.PacketDataSource = getMyDataSource() +// decodedLayers := make([]gopacket.LayerType, 0, 10) +// for { +// data, _, err := source.ReadPacketData() +// if err != nil { +// fmt.Println("Error reading packet data: ", err) +// continue +// } +// fmt.Println("Decoding packet") +// err = parser.DecodeLayers(data, &decodedLayers) +// for _, typ := range decodedLayers { +// fmt.Println(" Successfully decoded layer type", typ) +// switch typ { +// case layers.LayerTypeEthernet: +// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC) +// case layers.LayerTypeIPv4: +// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP) +// case layers.LayerTypeIPv6: +// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP) +// case layers.LayerTypeTCP: +// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort) +// case layers.LayerTypeUDP: +// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort) +// } +// } +// if decodedLayers.Truncated { +// fmt.Println(" Packet has been truncated") +// } +// if err != nil { +// fmt.Println(" Error encountered:", err) +// } +// } +// } +// +// If DecodeLayers is unable to decode the next layer type, it will return the +// error UnsupportedLayerType. +func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) { + l.Truncated = false + if !l.IgnorePanic { + defer panicToError(&err) + } + typ, err := l.decodeFunc(data, decoded) + if typ != LayerTypeZero { + // no decoder + if l.IgnoreUnsupported { + return nil + } + return UnsupportedLayerType(typ) + } + return err +} + +// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers +// encounters a layer type that the DecodingLayerParser has no decoder for. +type UnsupportedLayerType LayerType + +// Error implements the error interface, returning a string to say that the +// given layer type is unsupported. +func (e UnsupportedLayerType) Error() string { + return fmt.Sprintf("No decoder for layer type %v", LayerType(e)) +} + +func panicToError(e *error) { + if r := recover(); r != nil { + *e = fmt.Errorf("panic: %v", r) + } +} + +// DecodingLayerParserOptions provides options to affect the behavior of a given +// DecodingLayerParser. +type DecodingLayerParserOptions struct { + // IgnorePanic determines whether a DecodingLayerParser should stop + // panics on its own (by returning them as an error from DecodeLayers) + // or should allow them to raise up the stack. Handling errors does add + // latency to the process of decoding layers, but is much safer for + // callers. IgnorePanic defaults to false, thus if the caller does + // nothing decode panics will be returned as errors. + IgnorePanic bool + // IgnoreUnsupported will stop parsing and return a nil error when it + // encounters a layer it doesn't have a parser for, instead of returning an + // UnsupportedLayerType error. If this is true, it's up to the caller to make + // sure that all expected layers have been parsed (by checking the decoded + // slice). + IgnoreUnsupported bool +} diff --git a/vendor/github.com/gopacket/gopacket/time.go b/vendor/github.com/gopacket/gopacket/time.go new file mode 100644 index 000000000..6d116cdfb --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/time.go @@ -0,0 +1,72 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "math" + "time" +) + +// TimestampResolution represents the resolution of timestamps in Base^Exponent. +type TimestampResolution struct { + Base, Exponent int +} + +func (t TimestampResolution) String() string { + return fmt.Sprintf("%d^%d", t.Base, t.Exponent) +} + +// ToDuration returns the smallest representable time difference as a time.Duration +func (t TimestampResolution) ToDuration() time.Duration { + if t.Base == 0 { + return 0 + } + if t.Exponent == 0 { + return time.Second + } + switch t.Base { + case 10: + return time.Duration(math.Pow10(t.Exponent + 9)) + case 2: + if t.Exponent < 0 { + return time.Second >> uint(-t.Exponent) + } + return time.Second << uint(t.Exponent) + default: + // this might loose precision + return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent))) + } +} + +// TimestampResolutionInvalid represents an invalid timestamp resolution +var TimestampResolutionInvalid = TimestampResolution{} + +// TimestampResolutionMillisecond is a resolution of 10^-3s +var TimestampResolutionMillisecond = TimestampResolution{10, -3} + +// TimestampResolutionMicrosecond is a resolution of 10^-6s +var TimestampResolutionMicrosecond = TimestampResolution{10, -6} + +// TimestampResolutionNanosecond is a resolution of 10^-9s +var TimestampResolutionNanosecond = TimestampResolution{10, -9} + +// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds +var TimestampResolutionNTP = TimestampResolution{2, -32} + +// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond +var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond + +// PacketSourceResolution is an interface for packet data sources that +// support reporting the timestamp resolution of the aqcuired timestamps. +// Returned timestamps will always have NanosecondTimestampResolution due +// to the use of time.Time, but scaling might have occured if acquired +// timestamps have a different resolution. +type PacketSourceResolution interface { + // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution. + Resolution() TimestampResolution +} diff --git a/vendor/github.com/gopacket/gopacket/writer.go b/vendor/github.com/gopacket/gopacket/writer.go new file mode 100644 index 000000000..9a463f047 --- /dev/null +++ b/vendor/github.com/gopacket/gopacket/writer.go @@ -0,0 +1,233 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// SerializableLayer allows its implementations to be written out as a set of bytes, +// so those bytes may be sent on the wire or otherwise used by the caller. +// SerializableLayer is implemented by certain Layer types, and can be encoded to +// bytes using the LayerWriter object. +type SerializableLayer interface { + // SerializeTo writes this layer to a slice, growing that slice if necessary + // to make it fit the layer's data. + // Args: + // b: SerializeBuffer to write this layer on to. When called, b.Bytes() + // is the payload this layer should wrap, if any. Note that this + // layer can either prepend itself (common), append itself + // (uncommon), or both (sometimes padding or footers are required at + // the end of packet data). It's also possible (though probably very + // rarely needed) to overwrite any bytes in the current payload. + // After this call, b.Bytes() should return the byte encoding of + // this layer wrapping the original b.Bytes() payload. + // opts: options to use while writing out data. + // Returns: + // error if a problem was encountered during encoding. If an error is + // returned, the bytes in data should be considered invalidated, and + // not used. + // + // SerializeTo calls SHOULD entirely ignore LayerContents and + // LayerPayload. It just serializes based on struct fields, neither + // modifying nor using contents/payload. + SerializeTo(b SerializeBuffer, opts SerializeOptions) error + // LayerType returns the type of the layer that is being serialized to the buffer + LayerType() LayerType +} + +// SerializeOptions provides options for behaviors that SerializableLayers may want to +// implement. +type SerializeOptions struct { + // FixLengths determines whether, during serialization, layers should fix + // the values for any length field that depends on the payload. + FixLengths bool + // ComputeChecksums determines whether, during serialization, layers + // should recompute checksums based on their payloads. + ComputeChecksums bool +} + +// SerializeBuffer is a helper used by gopacket for writing out packet layers. +// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes +// return byte slices before the current Bytes(), AppendBytes returns byte +// slices after. +// +// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if +// you want to make sure they're all zeros, set them as such. +// +// SerializeBuffer is specifically designed to handle packet writing, where unlike +// with normal writes it's easier to start writing at the inner-most layer and +// work out, meaning that we often need to prepend bytes. This runs counter to +// typical writes to byte slices using append(), where we only write at the end +// of the buffer. +// +// It can be reused via Clear. Note, however, that a Clear call will invalidate the +// byte slices returned by any previous Bytes() call (the same buffer is +// reused). +// +// 1. Reusing a write buffer is generally much faster than creating a new one, +// and with the default implementation it avoids additional memory allocations. +// 2. If a byte slice from a previous Bytes() call will continue to be used, +// it's better to create a new SerializeBuffer. +// +// The Clear method is specifically designed to minimize memory allocations for +// similar later workloads on the SerializeBuffer. IE: if you make a set of +// Prepend/Append calls, then clear, then make the same calls with the same +// sizes, the second round (and all future similar rounds) shouldn't allocate +// any new memory. +type SerializeBuffer interface { + // Bytes returns the contiguous set of bytes collected so far by Prepend/Append + // calls. The slice returned by Bytes will be modified by future Clear calls, + // so if you're planning on clearing this SerializeBuffer, you may want to copy + // Bytes somewhere safe first. + Bytes() []byte + // PrependBytes returns a set of bytes which prepends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call PrependBytes if they + // know they're going to immediately overwrite all bytes returned. + PrependBytes(num int) ([]byte, error) + // AppendBytes returns a set of bytes which appends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call AppendBytes if they + // know they're going to immediately overwrite all bytes returned. + AppendBytes(num int) ([]byte, error) + // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear, + // the byte slice returned by any previous call to Bytes() for this buffer + // should be considered invalidated. + Clear() error + // Layers returns all the Layers that have been successfully serialized into this buffer + // already. + Layers() []LayerType + // PushLayer adds the current Layer to the list of Layers that have been serialized + // into this buffer. + PushLayer(LayerType) +} + +type serializeBuffer struct { + data []byte + start int + prepended, appended int + layers []LayerType +} + +// NewSerializeBuffer creates a new instance of the default implementation of +// the SerializeBuffer interface. +func NewSerializeBuffer() SerializeBuffer { + return &serializeBuffer{} +} + +// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an +// expected number of bytes prepended/appended. This tends to decrease the +// number of memory allocations made by the buffer during writes. +func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer { + return &serializeBuffer{ + data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength), + start: expectedPrependLength, + prepended: expectedPrependLength, + appended: expectedAppendLength, + } +} + +func (w *serializeBuffer) Bytes() []byte { + return w.data[w.start:] +} + +func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + if w.start < num { + toPrepend := w.prepended + if toPrepend < num { + toPrepend = num + } + w.prepended += toPrepend + length := cap(w.data) + toPrepend + newData := make([]byte, length) + newStart := w.start + toPrepend + copy(newData[newStart:], w.data[w.start:]) + w.start = newStart + w.data = newData[:toPrepend+len(w.data)] + } + w.start -= num + return w.data[w.start : w.start+num], nil +} + +func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + initialLength := len(w.data) + if cap(w.data)-initialLength < num { + toAppend := w.appended + if toAppend < num { + toAppend = num + } + w.appended += toAppend + newData := make([]byte, cap(w.data)+toAppend) + copy(newData[w.start:], w.data[w.start:]) + w.data = newData[:initialLength] + } + // Grow the buffer. We know it'll be under capacity given above. + w.data = w.data[:initialLength+num] + return w.data[initialLength:], nil +} + +func (w *serializeBuffer) Clear() error { + w.start = w.prepended + w.data = w.data[:w.start] + w.layers = w.layers[:0] + return nil +} + +func (w *serializeBuffer) Layers() []LayerType { + return w.layers +} + +func (w *serializeBuffer) PushLayer(l LayerType) { + w.layers = append(w.layers, l) +} + +// SerializeLayers clears the given write buffer, then writes all layers into it so +// they correctly wrap each other. Note that by clearing the buffer, it +// invalidates all slices previously returned by w.Bytes() +// +// Example: +// +// buf := gopacket.NewSerializeBuffer() +// opts := gopacket.SerializeOptions{} +// gopacket.SerializeLayers(buf, opts, a, b, c) +// firstPayload := buf.Bytes() // contains byte representation of a(b(c)) +// gopacket.SerializeLayers(buf, opts, d, e, f) +// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf. +func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error { + w.Clear() + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + err := layer.SerializeTo(w, opts) + if err != nil { + return err + } + w.PushLayer(layer.LayerType()) + } + return nil +} + +// SerializePacket is a convenience function that calls SerializeLayers +// on packet's Layers(). +// It returns an error if one of the packet layers is not a SerializableLayer. +func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error { + sls := []SerializableLayer{} + for _, layer := range packet.Layers() { + sl, ok := layer.(SerializableLayer) + if !ok { + return fmt.Errorf("layer %s is not serializable", layer.LayerType().String()) + } + sls = append(sls, sl) + } + return SerializeLayers(buf, opts, sls...) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE new file mode 100644 index 000000000..364516251 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 000000000..b8fbb2b77 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@org_golang_google_grpc//grpclog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 000000000..3cd937295 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 000000000..c056bd305 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +//go:build gofuzz +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 000000000..65ffcf5cf --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,368 @@ +package httprule + +import ( + "errors" + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } + return v, nil +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +// eof is the terminal symbol which always appears at the end of token sequence. +const eof = "\u0000" + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return errors.New("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 000000000..5a814a000 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 000000000..a65d88eb8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go new file mode 100644 index 000000000..2f2b34243 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -0,0 +1,417 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +// header isn't present. If the value is 0 the sent `context` will not have a timeout. +var DefaultContextTimeout = 0 * time.Second + +// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. +// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. +var malformedHTTPHeaders = map[string]struct{}{ + "connection": {}, +} + +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + httpPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func isValidGRPCMetadataKey(key string) bool { + // Must be a valid gRPC "Header-Name" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means 0-9 a-z _ - . + // Only lowercase letters are valid in the wire protocol, but the client library will normalize + // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. + bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + validLowercaseLetter := ch >= 'a' && ch <= 'z' + validUppercaseLetter := ch >= 'A' && ch <= 'Z' + validDigit := ch >= '0' && ch <= '9' + validOther := ch == '.' || ch == '-' || ch == '_' + if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { + return false + } + } + return true +} + +func isValidGRPCMetadataTextValue(textValue string) bool { + // Must be a valid gRPC "ASCII-Value" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. + bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + if ch < 0x20 || ch > 0x7E { + return false + } + } + return true +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + if !isValidGRPCMetadataKey(h) { + grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) + continue + } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } else if !isValidGRPCMetadataTextValue(val) { + grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) + continue + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + xff := req.Header.Values(xForwardedFor) + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + xff = append(xff, remoteIP) + } + } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + return + } +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} + +// isMalformedHTTPHeader checks whether header belongs to the list of +// "malformed headers" and would be rejected by the gRPC server. +func isMalformedHTTPHeader(header string) bool { + _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] + return isMalformed +} + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go new file mode 100644 index 000000000..d7b15fcfb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Support for google.protobuf.wrappers on top of primitive types + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrapperspb.StringValue, error) { + return wrapperspb.String(val), nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) + return wrapperspb.Float(parsedVal), err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) + return wrapperspb.Double(parsedVal), err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) + return wrapperspb.Bool(parsedVal), err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) + return wrapperspb.Int32(parsedVal), err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) + return wrapperspb.UInt32(parsedVal), err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) + return wrapperspb.Int64(parsedVal), err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) + return wrapperspb.UInt64(parsedVal), err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) + return wrapperspb.Bytes(parsedVal), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go new file mode 100644 index 000000000..b6e5ddf7a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 000000000..01f573419 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,191 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + default: + grpclog.Warningf("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError + } +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(respRw) + if merr != nil { + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 000000000..9005d6a0b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,168 @@ +package runtime + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if errors.Is(err, io.EOF) { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok && len(m) > 0: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case ok && len(m) == 0: + fallthrough + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go new file mode 100644 index 000000000..9f50a569e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -0,0 +1,247 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + rc := http.NewResponseController(w) + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if errors.Is(err, io.EOF) { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + } + + var buf []byte + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) + switch { + case respRw == nil: + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() + default: + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Errorf("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) + return + } + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h)) + } + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + contentType := marshaler.ContentType(resp) + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + if rb, ok := respRw.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(respRw) + } + if err != nil { + grpclog.Errorf("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + + if _, err = w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } +} + +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go new file mode 100644 index 000000000..6de2e220c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -0,0 +1,32 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType(v) +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetData(), nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go new file mode 100644 index 000000000..fe52081ab --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go new file mode 100644 index 000000000..8376d1e0e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -0,0 +1,349 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} + +// ContentType always returns "application/json". +func (*JSONPb) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } + _, err = w.Write(buf) + return err + } + + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitUnpopulated { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, j.UnmarshalOptions, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder + protojson.UnmarshalOptions +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, unmarshaler, v) +} + +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, unmarshaler, v) + } + + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) +} + +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if rv.Kind() == reflect.Slice { + if rv.Type().Elem().Kind() == reflect.Uint8 { + var sl []byte + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.SetBytes(sl) + } + return nil + } + + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch v := repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go new file mode 100644 index 000000000..398c780dc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -0,0 +1,60 @@ +package runtime + +import ( + "errors" + "io" + + "google.golang.org/protobuf/proto" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType(_ interface{}) string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := io.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + if _, err := writer.Write(buffer); err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go new file mode 100644 index 000000000..2c0d25ff4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go new file mode 100644 index 000000000..0b051e6e8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -0,0 +1,109 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go new file mode 100644 index 000000000..60c2065dd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -0,0 +1,537 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // UnescapingModeAllExceptSlash unescapes URL path parameters except path + // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy +) + +var encodedPathSplitter = regexp.MustCompile("(/|%2F)") + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + middlewares []Middleware + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + outgoingTrailerMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc + streamErrorHandler StreamErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc + disablePathLengthFallback bool + unescapingMode UnescapingMode +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. +// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. +// Other headers are not added to the gRPC metadata. +func DefaultHeaderMatcher(key string) (string, bool) { + switch key = textproto.CanonicalMIMEHeaderKey(key); { + case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true + case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +func defaultOutgoingHeaderMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true +} + +func defaultOutgoingTrailerMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + for _, header := range fn.matchedMalformedHeaders() { + grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) + } + + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. +func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { + if fn == nil { + return nil + } + headers := make([]string, 0) + for header := range malformedHTTPHeaders { + out, accept := fn(header) + if accept && isMalformedHTTPHeader(out) { + headers = append(headers, out) + } + } + return headers +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingTrailerMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.errorHandler = fn + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. +// When called the handler will forward the request to the upstream grpc service health check (defined in the +// gRPC Health Checking Protocol). +// +// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how +// to setup the protocol in the grpc server. +// +// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. +func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { + return func(s *ServeMux) { + // error can be ignored since pattern is definitely valid + _ = s.HandlePath( + http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, + ) { + _, outboundMarshaler := MarshalerForRequest(s, r) + + resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + w.Header().Set("Content-Type", "application/json") + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + switch resp.GetStatus() { + case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: + err = status.Error(codes.Unavailable, resp.String()) + case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: + err = status.Error(codes.NotFound, resp.String()) + } + + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + _ = outboundMarshaler.NewEncoder(w).Encode(resp) + }) + } +} + +// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. +// +// See WithHealthEndpointAt for the general implementation. +func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { + return WithHealthEndpointAt(healthCheckClient, "/healthz") +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher + } + if serveMux.outgoingTrailerMatcher == nil { + serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) + } + s.Handle(meth, pattern, h) + return nil +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) + return + } + + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + r.Method = strings.ToUpper(override) + } + + var pathComponents []string + // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" + // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the + // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default + // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved + if s.unescapingMode == UnescapingModeAllCharacters { + pathComponents = encodedPathSplitter.Split(path[1:], -1) + } else { + pathComponents = strings.Split(path[1:], "/") + } + + lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + s.handleHandler(h, w, r, pathParams) + return + } + + // if no handler has found for the request, lookup for other methods + // to handle POST -> GET fallback if the request is subject to path + // length fallback. + // Note we are not eagerly checking the request here as we want to return the + // right HTTP status code, and we need to process the fallback candidates in + // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + // Also, only consider POST -> GET fallbacks, and avoid falling back to + // potentially dangerous operations like DELETE. + if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + s.handleHandler(h, w, r, pathParams) + return + } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) + return + } + } + + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go new file mode 100644 index 000000000..e54507145 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -0,0 +1,381 @@ +package runtime + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string +} + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { + if version != 1 { + grpclog.Errorf("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Errorf("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Error("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Errorf("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Errorf("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { + if p.verb != verb { + if p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + var err error + + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go new file mode 100644 index 000000000..d549407f2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "google.golang.org/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 000000000..fe634174b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,372 @@ +package runtime + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +// DefaultQueryParser is a QueryParameterParser which implements the default +// query parameters parsing behavior. +// +// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. +type DefaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil { + if f := msgValue.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + if err != nil { + if errors.Is(err, protoregistry.NotFound) { + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": + msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + case "google.protobuf.Value": + var v structpb.Value + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + case "google.protobuf.Struct": + var v structpb.Struct + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 000000000..b89409465 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go new file mode 100644 index 000000000..cf79a4d58 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go new file mode 100644 index 000000000..dfe7de486 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go new file mode 100644 index 000000000..01d26edae --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go @@ -0,0 +1,19 @@ +package utilities + +import ( + "bytes" + "io" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go new file mode 100644 index 000000000..d224ab776 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "flag" + "strings" +) + +// flagInterface is an cut down interface to `flag` +type flagInterface interface { + Var(value flag.Value, name string, usage string) +} + +// StringArrayFlag defines a flag with the specified name and usage string. +// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. +func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { + value := &StringArrayFlags{} + f.Var(value, name, usage) + return value +} + +// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` +type StringArrayFlags []string + +// String returns a string representation of `StringArrayFlags` +func (i *StringArrayFlags) String() string { + return strings.Join(*i, ",") +} + +// Set appends a value to `StringArrayFlags` +func (i *StringArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go new file mode 100644 index 000000000..dd99b0ed2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -0,0 +1,174 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + return k < len(sj) +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/heptiolabs/healthcheck/.gitignore b/vendor/github.com/heptiolabs/healthcheck/.gitignore new file mode 100644 index 000000000..61ead8666 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/.gitignore @@ -0,0 +1 @@ +/vendor diff --git a/vendor/github.com/heptiolabs/healthcheck/.travis.yml b/vendor/github.com/heptiolabs/healthcheck/.travis.yml new file mode 100644 index 000000000..df442613f --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/.travis.yml @@ -0,0 +1,13 @@ +language: go +go_import_path: github.com/heptiolabs/healthcheck +go: + - 1.9.x + +sudo: false + +install: + - go get -u github.com/golang/dep/cmd/dep + && dep ensure -vendor-only -v + +script: + - go test -v -cover . diff --git a/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md b/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..560c4004f --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/CODE_OF_CONDUCT.md @@ -0,0 +1,37 @@ +# Community Code of Conduct + +## Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering +an open and welcoming community, we pledge to respect all people who contribute +through reporting issues, posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free experience for +everyone, regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, body size, race, ethnicity, age, +religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic addresses, + without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are not +aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers +commit themselves to fairly and consistently applying these principles to every aspect +of managing this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer(s). + +This Code of Conduct is adapted from the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and [Contributor Covenant](http://contributor-covenant.org/version/1/2/0/), version 1.2.0. diff --git a/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md b/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md new file mode 100644 index 000000000..2c0e8806b --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/CONTRIBUTING.md @@ -0,0 +1,57 @@ +## DCO Sign off + +All authors to the project retain copyright to their work. However, to ensure +that they are only submitting work that they have rights to, we are requiring +everyone to acknowldge this by signing their work. + +Any copyright notices in this repos should specify the authors as "the contributors". + +To sign your work, just add a line like this at the end of your commit message: + +``` +Signed-off-by: Joe Beda +``` + +This can easily be done with the `--signoff` option to `git commit`. + +By doing this you state that you can certify the following (from https://developercertificate.org/): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` \ No newline at end of file diff --git a/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock b/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock new file mode 100644 index 000000000..28121b796 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/Gopkg.lock @@ -0,0 +1,87 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "925541529c1fa6821df4e44ce2723319eb2be768" + version = "v1.0.0" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "c3324c1198cf3374996e9d3098edd46a6b55afc9" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "6fb6fce6f8b75884b92e1889c150403fc0872c5e" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "7186fbf4eb22a031bcd4657c8adc3fc39acf6d8a" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert"] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + name = "gopkg.in/DATA-DOG/go-sqlmock.v1" + packages = ["."] + revision = "d76b18b42f285b792bf985118980ce9eacea9d10" + version = "v1.3.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "739553432f1d3b8bc633e5e10f18d31e57b1f8af15c9e916398429bd4d7bd826" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml b/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml new file mode 100644 index 000000000..25c33552e --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/Gopkg.toml @@ -0,0 +1,10 @@ +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. + +[[constraint]] + name = "github.com/prometheus/client_golang" + branch = "master" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" diff --git a/vendor/github.com/heptiolabs/healthcheck/LICENSE b/vendor/github.com/heptiolabs/healthcheck/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/heptiolabs/healthcheck/README.md b/vendor/github.com/heptiolabs/healthcheck/README.md new file mode 100644 index 000000000..1e916cf56 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/README.md @@ -0,0 +1,92 @@ +# healthcheck +[![Build Status](https://travis-ci.org/heptiolabs/healthcheck.svg?branch=master)](https://travis-ci.org/heptiolabs/healthcheck) +[![Go Report Card](https://goreportcard.com/badge/github.com/heptiolabs/healthcheck)](https://goreportcard.com/report/github.com/heptiolabs/healthcheck) +[![GoDoc](https://godoc.org/github.com/heptiolabs/healthcheck?status.svg)](https://godoc.org/github.com/heptiolabs/healthcheck) + +Healthcheck is a library for implementing Kubernetes [liveness and readiness](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) probe handlers in your Go application. + +## Features + + - Integrates easily with Kubernetes. This library explicitly separates liveness vs. readiness checks instead of lumping everything into a single category of check. + + - Optionally exposes each check as a [Prometheus gauge](https://prometheus.io/docs/concepts/metric_types/#gauge) metric. This allows for cluster-wide monitoring and alerting on individual checks. + + - Supports asynchronous checks, which run in a background goroutine at a fixed interval. These are useful for expensive checks that you don't want to add latency to the liveness and readiness endpoints. + + - Includes a small library of generically useful checks for validating upstream DNS, TCP, HTTP, and database dependencies as well as checking basic health of the Go runtime. + +## Usage + +See the [GoDoc examples](https://godoc.org/github.com/heptiolabs/healthcheck) for more detail. + + - Install with `go get` or your favorite Go dependency manager: `go get -u github.com/heptiolabs/healthcheck` + + - Import the package: `import "github.com/heptiolabs/healthcheck"` + + - Create a `healthcheck.Handler`: + ```go + health := healthcheck.NewHandler() + ``` + + - Configure some application-specific liveness checks (whether the app itself is unhealthy): + ```go + // Our app is not happy if we've got more than 100 goroutines running. + health.AddLivenessCheck("goroutine-threshold", healthcheck.GoroutineCountCheck(100)) + ``` + + - Configure some application-specific readiness checks (whether the app is ready to serve requests): + ```go + // Our app is not ready if we can't resolve our upstream dependency in DNS. + health.AddReadinessCheck( + "upstream-dep-dns", + healthcheck.DNSResolveCheck("upstream.example.com", 50*time.Millisecond)) + + // Our app is not ready if we can't connect to our database (`var db *sql.DB`) in <1s. + health.AddReadinessCheck("database", healthcheck.DatabasePingCheck(db, 1*time.Second)) + ``` + + - Expose the `/live` and `/ready` endpoints over HTTP (on port 8086): + ```go + go http.ListenAndServe("0.0.0.0:8086", health) + ``` + + - Configure your Kubernetes container with HTTP liveness and readiness probes see the ([Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/)) for more detail: + ```yaml + # this is a bare bones example + # copy and paste livenessProbe and readinessProbe as appropriate for your app + apiVersion: v1 + kind: Pod + metadata: + name: heptio-healthcheck-example + spec: + containers: + - name: liveness + image: your-registry/your-container + + # define a liveness probe that checks every 5 seconds, starting after 5 seconds + livenessProbe: + httpGet: + path: /live + port: 8086 + initialDelaySeconds: 5 + periodSeconds: 5 + + # define a readiness probe that checks every 5 seconds + readinessProbe: + httpGet: + path: /ready + port: 8086 + periodSeconds: 5 + ``` + + - If one of your readiness checks fails, Kubernetes will stop routing traffic to that pod within a few seconds (depending on `periodSeconds` and other factors). + + - If one of your liveness checks fails or your app becomes totally unresponsive, Kubernetes will restart your container. + + ## HTTP Endpoints + When you run `go http.ListenAndServe("0.0.0.0:8086", health)`, two HTTP endpoints are exposed: + + - **`/live`**: liveness endpoint (HTTP 200 if healthy, HTTP 503 if unhealthy) + - **`/ready`**: readiness endpoint (HTTP 200 if healthy, HTTP 503 if unhealthy) + +Pass the `?full=1` query parameter to see the full check results as JSON. These are omitted by default for performance. \ No newline at end of file diff --git a/vendor/github.com/heptiolabs/healthcheck/async.go b/vendor/github.com/heptiolabs/healthcheck/async.go new file mode 100644 index 000000000..2c6bb9f20 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/async.go @@ -0,0 +1,85 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "context" + "errors" + "time" +) + +// ErrNoData is returned if the first call of an Async() wrapped Check has not +// yet returned. +var ErrNoData = errors.New("no data yet") + +// Async converts a Check into an asynchronous check that runs in a background +// goroutine at a fixed interval. The check is called at a fixed rate, not with +// a fixed delay between invocations. If your check takes longer than the +// interval to execute, the next execution will happen immediately. +// +// Note: if you need to clean up the background goroutine, use AsyncWithContext(). +func Async(check Check, interval time.Duration) Check { + return AsyncWithContext(context.Background(), check, interval) +} + +// AsyncWithContext converts a Check into an asynchronous check that runs in a +// background goroutine at a fixed interval. The check is called at a fixed +// rate, not with a fixed delay between invocations. If your check takes longer +// than the interval to execute, the next execution will happen immediately. +// +// Note: if you don't need to cancel execution (because this runs forever), use Async() +func AsyncWithContext(ctx context.Context, check Check, interval time.Duration) Check { + // create a chan that will buffer the most recent check result + result := make(chan error, 1) + + // fill it with ErrNoData so we'll start in an initially failing state + // (we don't want to be ready/live until we've actually executed the check + // once, but that might be slow). + result <- ErrNoData + + // make a wrapper that runs the check, and swaps out the current head of + // the channel with the latest result + update := func() { + err := check() + <-result + result <- err + } + + // spawn a background goroutine to run the check + go func() { + // call once right away (time.Tick() doesn't always tick immediately + // but we want an initial result as soon as possible) + update() + + // loop forever or until the context is canceled + ticker := time.Tick(interval) + for { + select { + case <-ticker: + update() + case <-ctx.Done(): + return + } + } + }() + + // return a Check function that closes over our result and mutex + return func() error { + // peek at the head of the channel, then put it back + err := <-result + result <- err + return err + } +} diff --git a/vendor/github.com/heptiolabs/healthcheck/checks.go b/vendor/github.com/heptiolabs/healthcheck/checks.go new file mode 100644 index 000000000..4f18bb54f --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/checks.go @@ -0,0 +1,120 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "context" + "database/sql" + "fmt" + "net" + "net/http" + "runtime" + "time" +) + +// TCPDialCheck returns a Check that checks TCP connectivity to the provided +// endpoint. +func TCPDialCheck(addr string, timeout time.Duration) Check { + return func() error { + conn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return err + } + return conn.Close() + } +} + +// HTTPGetCheck returns a Check that performs an HTTP GET request against the +// specified URL. The check fails if the response times out or returns a non-200 +// status code. +func HTTPGetCheck(url string, timeout time.Duration) Check { + client := http.Client{ + Timeout: timeout, + // never follow redirects + CheckRedirect: func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + }, + } + return func() error { + resp, err := client.Get(url) + if err != nil { + return err + } + resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("returned status %d", resp.StatusCode) + } + return nil + } +} + +// DatabasePingCheck returns a Check that validates connectivity to a +// database/sql.DB using Ping(). +func DatabasePingCheck(database *sql.DB, timeout time.Duration) Check { + return func() error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if database == nil { + return fmt.Errorf("database is nil") + } + return database.PingContext(ctx) + } +} + +// DNSResolveCheck returns a Check that makes sure the provided host can resolve +// to at least one IP address within the specified timeout. +func DNSResolveCheck(host string, timeout time.Duration) Check { + resolver := net.Resolver{} + return func() error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + addrs, err := resolver.LookupHost(ctx, host) + if err != nil { + return err + } + if len(addrs) < 1 { + return fmt.Errorf("could not resolve host") + } + return nil + } +} + +// GoroutineCountCheck returns a Check that fails if too many goroutines are +// running (which could indicate a resource leak). +func GoroutineCountCheck(threshold int) Check { + return func() error { + count := runtime.NumGoroutine() + if count > threshold { + return fmt.Errorf("too many goroutines (%d > %d)", count, threshold) + } + return nil + } +} + +// GCMaxPauseCheck returns a Check that fails if any recent Go garbage +// collection pause exceeds the provided threshold. +func GCMaxPauseCheck(threshold time.Duration) Check { + thresholdNanoseconds := uint64(threshold.Nanoseconds()) + return func() error { + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + for _, pause := range stats.PauseNs { + if pause > thresholdNanoseconds { + return fmt.Errorf("recent GC cycle took %s > %s", time.Duration(pause), threshold) + } + } + return nil + } +} diff --git a/vendor/github.com/heptiolabs/healthcheck/doc.go b/vendor/github.com/heptiolabs/healthcheck/doc.go new file mode 100644 index 000000000..1920098bb --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/doc.go @@ -0,0 +1,24 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package healthcheck helps you implement Kubernetes liveness and readiness checks +for your application. It supports synchronous and asynchronous (background) +checks. It can optionally report each check's status as a set of Prometheus +gauge metrics for cluster-wide monitoring and alerting. + +It also includes a small library of generic checks for DNS, TCP, and HTTP +reachability as well as Goroutine usage. +*/ +package healthcheck diff --git a/vendor/github.com/heptiolabs/healthcheck/handler.go b/vendor/github.com/heptiolabs/healthcheck/handler.go new file mode 100644 index 000000000..6ea97407d --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/handler.go @@ -0,0 +1,103 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "encoding/json" + "net/http" + "sync" +) + +// basicHandler is a basic Handler implementation. +type basicHandler struct { + http.ServeMux + checksMutex sync.RWMutex + livenessChecks map[string]Check + readinessChecks map[string]Check +} + +// NewHandler creates a new basic Handler +func NewHandler() Handler { + h := &basicHandler{ + livenessChecks: make(map[string]Check), + readinessChecks: make(map[string]Check), + } + h.Handle("/live", http.HandlerFunc(h.LiveEndpoint)) + h.Handle("/ready", http.HandlerFunc(h.ReadyEndpoint)) + return h +} + +func (s *basicHandler) LiveEndpoint(w http.ResponseWriter, r *http.Request) { + s.handle(w, r, s.livenessChecks) +} + +func (s *basicHandler) ReadyEndpoint(w http.ResponseWriter, r *http.Request) { + s.handle(w, r, s.readinessChecks, s.livenessChecks) +} + +func (s *basicHandler) AddLivenessCheck(name string, check Check) { + s.checksMutex.Lock() + defer s.checksMutex.Unlock() + s.livenessChecks[name] = check +} + +func (s *basicHandler) AddReadinessCheck(name string, check Check) { + s.checksMutex.Lock() + defer s.checksMutex.Unlock() + s.readinessChecks[name] = check +} + +func (s *basicHandler) collectChecks(checks map[string]Check, resultsOut map[string]string, statusOut *int) { + s.checksMutex.RLock() + defer s.checksMutex.RUnlock() + for name, check := range checks { + if err := check(); err != nil { + *statusOut = http.StatusServiceUnavailable + resultsOut[name] = err.Error() + } else { + resultsOut[name] = "OK" + } + } +} + +func (s *basicHandler) handle(w http.ResponseWriter, r *http.Request, checks ...map[string]Check) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + checkResults := make(map[string]string) + status := http.StatusOK + for _, checks := range checks { + s.collectChecks(checks, checkResults, &status) + } + + // write out the response code and content type header + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(status) + + // unless ?full=1, return an empty body. Kubernetes only cares about the + // HTTP status code, so we won't waste bytes on the full body. + if r.URL.Query().Get("full") != "1" { + w.Write([]byte("{}\n")) + return + } + + // otherwise, write the JSON body ignoring any encoding errors (which + // shouldn't really be possible since we're encoding a map[string]string). + encoder := json.NewEncoder(w) + encoder.SetIndent("", " ") + encoder.Encode(checkResults) +} diff --git a/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go b/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go new file mode 100644 index 000000000..e95be0f04 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/metrics_handler.go @@ -0,0 +1,76 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +type metricsHandler struct { + handler Handler + registry prometheus.Registerer + namespace string +} + +// NewMetricsHandler returns a healthcheck Handler that also exposes metrics +// into the provided Prometheus registry. +func NewMetricsHandler(registry prometheus.Registerer, namespace string) Handler { + return &metricsHandler{ + handler: NewHandler(), + registry: registry, + namespace: namespace, + } +} + +func (h *metricsHandler) AddLivenessCheck(name string, check Check) { + h.handler.AddLivenessCheck(name, h.wrap(name, check)) +} + +func (h *metricsHandler) AddReadinessCheck(name string, check Check) { + h.handler.AddReadinessCheck(name, h.wrap(name, check)) +} + +func (h *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.handler.ServeHTTP(w, r) +} + +func (h *metricsHandler) LiveEndpoint(w http.ResponseWriter, r *http.Request) { + h.handler.LiveEndpoint(w, r) +} + +func (h *metricsHandler) ReadyEndpoint(w http.ResponseWriter, r *http.Request) { + h.handler.ReadyEndpoint(w, r) +} + +func (h *metricsHandler) wrap(name string, check Check) Check { + h.registry.MustRegister(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: h.namespace, + Subsystem: "healthcheck", + Name: "status", + Help: "Current check status (0 indicates success, 1 indicates failure)", + ConstLabels: prometheus.Labels{"check": name}, + }, + func() float64 { + if check() == nil { + return 0 + } + return 1 + }, + )) + return check +} diff --git a/vendor/github.com/heptiolabs/healthcheck/timeout.go b/vendor/github.com/heptiolabs/healthcheck/timeout.go new file mode 100644 index 000000000..86bf21d9a --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/timeout.go @@ -0,0 +1,52 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "fmt" + "time" +) + +// TimeoutError is the error returned when a Timeout-wrapped Check takes too long +type timeoutError time.Duration + +func (e timeoutError) Error() string { + return fmt.Sprintf("timed out after %s", time.Duration(e).String()) +} + +// Timeout returns whether this error is a timeout (always true for timeoutError) +func (e timeoutError) Timeout() bool { + return true +} + +// Temporary returns whether this error is temporary (always true for timeoutError) +func (e timeoutError) Temporary() bool { + return true +} + +// Timeout adds a timeout to a Check. If the underlying check takes longer than +// the timeout, it returns an error. +func Timeout(check Check, timeout time.Duration) Check { + return func() error { + c := make(chan error, 1) + go func() { c <- check() }() + select { + case err := <-c: + return err + case <-time.After(timeout): + return timeoutError(timeout) + } + } +} diff --git a/vendor/github.com/heptiolabs/healthcheck/types.go b/vendor/github.com/heptiolabs/healthcheck/types.go new file mode 100644 index 000000000..714294b28 --- /dev/null +++ b/vendor/github.com/heptiolabs/healthcheck/types.go @@ -0,0 +1,52 @@ +// Copyright 2017 by the contributors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package healthcheck + +import ( + "net/http" +) + +// Check is a health/readiness check. +type Check func() error + +// Handler is an http.Handler with additional methods that register health and +// readiness checks. It handles handle "/live" and "/ready" HTTP +// endpoints. +type Handler interface { + // The Handler is an http.Handler, so it can be exposed directly and handle + // /live and /ready endpoints. + http.Handler + + // AddLivenessCheck adds a check that indicates that this instance of the + // application should be destroyed or restarted. A failed liveness check + // indicates that this instance is unhealthy, not some upstream dependency. + // Every liveness check is also included as a readiness check. + AddLivenessCheck(name string, check Check) + + // AddReadinessCheck adds a check that indicates that this instance of the + // application is currently unable to serve requests because of an upstream + // or some transient failure. If a readiness check fails, this instance + // should no longer receiver requests, but should not be restarted or + // destroyed. + AddReadinessCheck(name string, check Check) + + // LiveEndpoint is the HTTP handler for just the /live endpoint, which is + // useful if you need to attach it into your own HTTP handler tree. + LiveEndpoint(http.ResponseWriter, *http.Request) + + // ReadyEndpoint is the HTTP handler for just the /ready endpoint, which is + // useful if you need to attach it into your own HTTP handler tree. + ReadyEndpoint(http.ResponseWriter, *http.Request) +} diff --git a/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml new file mode 100644 index 000000000..77285093d --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/.readthedocs.yaml @@ -0,0 +1,32 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # You can also specify other tool versions: + # nodejs: "19" + # rust: "1.64" + # golang: "1.19" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf +# - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt \ No newline at end of file diff --git a/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT new file mode 100644 index 000000000..c96a21a2a --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/LICENSE.TXT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 - 2024 IP2Location.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ip2location/ip2location-go/v9/README.md b/vendor/github.com/ip2location/ip2location-go/v9/README.md new file mode 100644 index 000000000..93876e20d --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/README.md @@ -0,0 +1,28 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/ip2location/ip2location-go/v9)](https://goreportcard.com/report/github.com/ip2location/ip2location-go/v9) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/ip2location/ip2location-go/v9)](https://pkg.go.dev/github.com/ip2location/ip2location-go/v9) + +# IP2Location Go Package + +This Go package provides a fast lookup of country, region, city, latitude, longitude, ZIP code, time zone, ISP, domain name, connection type, IDD code, area code, weather station code, station name, mcc, mnc, mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and autonomous system (AS) from IP address by using IP2Location database. This package uses a file based database available at IP2Location.com. This database simply contains IP blocks as keys, and other information such as country, region, city, latitude, longitude, ZIP code, time zone, ISP, domain name, connection type, IDD code, area code, weather station code, station name, mcc, mnc, mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and autonomous system (AS) as values. It supports both IP address in IPv4 and IPv6. + +This package can be used in many types of projects such as: + + - select the geographically closest mirror + - analyze your web server logs to determine the countries of your visitors + - credit card fraud detection + - software export controls + - display native language and currency + - prevent password sharing and abuse of service + - geotargeting in advertisement + +The database will be updated in monthly basis for the greater accuracy. Free LITE databases are available at https://lite.ip2location.com/ upon registration. + +The paid databases are available at https://www.ip2location.com under Premium subscription package. + +As an alternative, this package can also call the IP2Location Web Service. This requires an API key. If you don't have an existing API key, you can subscribe for one at the below: +https://www.ip2location.com/web-service/ip2location + +Developer Documentation +===================== + +To learn more about installation, usage, and code examples, please visit the developer documentation at [https://ip2location-go.readthedocs.io/en/latest/](https://ip2location-go.readthedocs.io/en/latest/). \ No newline at end of file diff --git a/vendor/github.com/ip2location/ip2location-go/v9/country.go b/vendor/github.com/ip2location/ip2location-go/v9/country.go new file mode 100644 index 000000000..202ce316c --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/country.go @@ -0,0 +1,129 @@ +package ip2location + +import ( + "encoding/csv" + "errors" + "os" +) + +// The CountryInfoRecord struct stores all of the available +// country info found in the country information CSV file. +type CountryInfoRecord struct { + Country_code string + Country_name string + Country_alpha3_code string + Country_numeric_code string + Capital string + Country_demonym string + Total_area string + Population string + Idd_code string + Currency_code string + Currency_name string + Currency_symbol string + Lang_code string + Lang_name string + Cctld string +} + +// The CI struct is the main object used to read the country information CSV. +type CI struct { + resultsArr []CountryInfoRecord + resultsMap map[string]CountryInfoRecord +} + +// OpenCountryInfo initializes with the path to the country information CSV file. +func OpenCountryInfo(csvFile string) (*CI, error) { + var ci = &CI{} + + _, err := os.Stat(csvFile) + if os.IsNotExist(err) { + return nil, errors.New("The CSV file '" + csvFile + "' is not found.") + } + + f, err := os.Open(csvFile) + if err != nil { + return nil, errors.New("Unable to read '" + csvFile + "'.") + } + + defer f.Close() + + csvReader := csv.NewReader(f) + data, err := csvReader.ReadAll() + if err != nil { + return nil, errors.New("Unable to read '" + csvFile + "'.") + } + + ci.resultsMap = make(map[string]CountryInfoRecord) + var headerArr []string + + for i, line := range data { + if i == 0 { // headers + for _, field := range line { + headerArr = append(headerArr, field) + } + } else { + var rec CountryInfoRecord + for j, field := range line { + switch headerArr[j] { + case "country_code": + rec.Country_code = field + case "country_name": + rec.Country_name = field + case "country_alpha3_code": + rec.Country_alpha3_code = field + case "country_numeric_code": + rec.Country_numeric_code = field + case "capital": + rec.Capital = field + case "country_demonym": + rec.Country_demonym = field + case "total_area": + rec.Total_area = field + case "population": + rec.Population = field + case "idd_code": + rec.Idd_code = field + case "currency_code": + rec.Currency_code = field + case "currency_name": + rec.Currency_name = field + case "currency_symbol": + rec.Currency_symbol = field + case "lang_code": + rec.Lang_code = field + case "lang_name": + rec.Lang_name = field + case "cctld": + rec.Cctld = field + } + } + if rec.Country_code == "" { + return nil, errors.New("Invalid country information CSV file.") + } + ci.resultsArr = append(ci.resultsArr, rec) + ci.resultsMap[rec.Country_code] = rec + } + } + return ci, nil +} + +// GetCountryInfo returns the country information for the specified country or all countries if not specified +func (c *CI) GetCountryInfo(countryCode ...string) ([]CountryInfoRecord, error) { + if len(c.resultsArr) == 0 { + return nil, errors.New("No record available.") + } + + if len(countryCode) == 1 { + cc := countryCode[0] + if rec, ok := c.resultsMap[cc]; ok { + var x []CountryInfoRecord + x = append(x, rec) + return x, nil // return record + } else { + return nil, errors.New("No record found.") + } + } else { + return c.resultsArr, nil // return all countries + } +} diff --git a/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go new file mode 100644 index 000000000..417536a73 --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/ip2location.go @@ -0,0 +1,1194 @@ +// This ip2location package provides a fast lookup of country, region, city, latitude, longitude, ZIP code, time zone, +// ISP, domain name, connection type, IDD code, area code, weather station code, station name, MCC, MNC, +// mobile brand, elevation, usage type, address type, IAB category, district, autonomous system number (ASN) and +// autonomous system (AS) from IP address by using IP2Location database. +package ip2location + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "lukechampine.com/uint128" + "math" + "math/big" + "net" + "os" + "strconv" + "unsafe" +) + +type DBReader interface { + io.ReadCloser + io.ReaderAt +} + +type ip2locationmeta struct { + databasetype uint8 + databasecolumn uint8 + databaseday uint8 + databasemonth uint8 + databaseyear uint8 + ipv4databasecount uint32 + ipv4databaseaddr uint32 + ipv6databasecount uint32 + ipv6databaseaddr uint32 + ipv4indexed bool + ipv6indexed bool + ipv4indexbaseaddr uint32 + ipv6indexbaseaddr uint32 + ipv4columnsize uint32 + ipv6columnsize uint32 + productcode uint8 + producttype uint8 + filesize uint32 +} + +// The IP2Locationrecord struct stores all of the available +// geolocation info found in the IP2Location database. +type IP2Locationrecord struct { + Country_short string + Country_long string + Region string + City string + Isp string + Latitude float32 + Longitude float32 + Domain string + Zipcode string + Timezone string + Netspeed string + Iddcode string + Areacode string + Weatherstationcode string + Weatherstationname string + Mcc string + Mnc string + Mobilebrand string + Elevation float32 + Usagetype string + Addresstype string + Category string + District string + Asn string + As string +} + +type DB struct { + f DBReader + meta ip2locationmeta + + country_position_offset uint32 + region_position_offset uint32 + city_position_offset uint32 + isp_position_offset uint32 + domain_position_offset uint32 + zipcode_position_offset uint32 + latitude_position_offset uint32 + longitude_position_offset uint32 + timezone_position_offset uint32 + netspeed_position_offset uint32 + iddcode_position_offset uint32 + areacode_position_offset uint32 + weatherstationcode_position_offset uint32 + weatherstationname_position_offset uint32 + mcc_position_offset uint32 + mnc_position_offset uint32 + mobilebrand_position_offset uint32 + elevation_position_offset uint32 + usagetype_position_offset uint32 + addresstype_position_offset uint32 + category_position_offset uint32 + district_position_offset uint32 + asn_position_offset uint32 + as_position_offset uint32 + + country_enabled bool + region_enabled bool + city_enabled bool + isp_enabled bool + domain_enabled bool + zipcode_enabled bool + latitude_enabled bool + longitude_enabled bool + timezone_enabled bool + netspeed_enabled bool + iddcode_enabled bool + areacode_enabled bool + weatherstationcode_enabled bool + weatherstationname_enabled bool + mcc_enabled bool + mnc_enabled bool + mobilebrand_enabled bool + elevation_enabled bool + usagetype_enabled bool + addresstype_enabled bool + category_enabled bool + district_enabled bool + asn_enabled bool + as_enabled bool + + metaok bool +} + +var defaultDB = &DB{} + +var country_position = [27]uint8{0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2} +var region_position = [27]uint8{0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3} +var city_position = [27]uint8{0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4} +var isp_position = [27]uint8{0, 0, 3, 0, 5, 0, 7, 5, 7, 0, 8, 0, 9, 0, 9, 0, 9, 0, 9, 7, 9, 0, 9, 7, 9, 9, 9} +var latitude_position = [27]uint8{0, 0, 0, 0, 0, 5, 5, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5} +var longitude_position = [27]uint8{0, 0, 0, 0, 0, 6, 6, 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6} +var domain_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 6, 8, 0, 9, 0, 10, 0, 10, 0, 10, 0, 10, 8, 10, 0, 10, 8, 10, 10, 10} +var zipcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 0, 7, 7, 7, 0, 7, 0, 7, 7, 7, 0, 7, 7, 7} +var timezone_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 7, 8, 8, 8, 7, 8, 0, 8, 8, 8, 0, 8, 8, 8} +var netspeed_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 11, 0, 11, 8, 11, 0, 11, 0, 11, 0, 11, 11, 11} +var iddcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 12, 0, 12, 0, 12, 9, 12, 0, 12, 12, 12} +var areacode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 13, 0, 13, 0, 13, 10, 13, 0, 13, 13, 13} +var weatherstationcode_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 14, 0, 14, 0, 14, 0, 14, 14, 14} +var weatherstationname_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 15, 0, 15, 0, 15, 0, 15, 15, 15} +var mcc_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 16, 0, 16, 9, 16, 16, 16} +var mnc_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 17, 0, 17, 10, 17, 17, 17} +var mobilebrand_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 18, 0, 18, 11, 18, 18, 18} +var elevation_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 19, 0, 19, 19, 19} +var usagetype_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 20, 20, 20} +var addresstype_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 21} +var category_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 22} +var district_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23} +var asn_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24} +var as_position = [27]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25} + +const api_version string = "9.7.0" + +var max_ipv4_range = uint128.From64(4294967295) +var max_ipv6_range = uint128.From64(0) +var from_v4mapped = uint128.From64(281470681743360) +var to_v4mapped = uint128.From64(281474976710655) +var from_6to4 = uint128.From64(0) +var to_6to4 = uint128.From64(0) +var from_teredo = uint128.From64(0) +var to_teredo = uint128.From64(0) +var last_32bits = uint128.From64(4294967295) + +const countryshort uint32 = 0x0000001 +const countrylong uint32 = 0x0000002 +const region uint32 = 0x0000004 +const city uint32 = 0x0000008 +const isp uint32 = 0x0000010 +const latitude uint32 = 0x0000020 +const longitude uint32 = 0x0000040 +const domain uint32 = 0x0000080 +const zipcode uint32 = 0x0000100 +const timezone uint32 = 0x0000200 +const netspeed uint32 = 0x0000400 +const iddcode uint32 = 0x0000800 +const areacode uint32 = 0x0001000 +const weatherstationcode uint32 = 0x0002000 +const weatherstationname uint32 = 0x0004000 +const mcc uint32 = 0x0008000 +const mnc uint32 = 0x0010000 +const mobilebrand uint32 = 0x0020000 +const elevation uint32 = 0x0040000 +const usagetype uint32 = 0x0080000 +const addresstype uint32 = 0x0100000 +const category uint32 = 0x0200000 +const district uint32 = 0x0400000 +const asn uint32 = 0x0800000 +const as uint32 = 0x1000000 + +const all uint32 = countryshort | countrylong | region | city | isp | latitude | longitude | domain | zipcode | timezone | netspeed | iddcode | areacode | weatherstationcode | weatherstationname | mcc | mnc | mobilebrand | elevation | usagetype | addresstype | category | district | asn | as + +const invalid_address string = "Invalid IP address." +const missing_file string = "Invalid database file." +const not_supported string = "This parameter is unavailable for selected data file. Please upgrade the data file." +const invalid_bin string = "Incorrect IP2Location BIN file format. Please make sure that you are using the latest IP2Location BIN file." + +func reverseBytes(s []byte) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} + +// get IP type and calculate IP number; calculates index too if exists +func (d *DB) checkip(ip string) (iptype uint32, ipnum uint128.Uint128, ipindex uint32) { + iptype = 0 + ipnum = uint128.From64(0) + ipnumtmp := uint128.From64(0) + ipindex = 0 + ipaddress := net.ParseIP(ip) + + if ipaddress != nil { + v4 := ipaddress.To4() + + if v4 != nil { + iptype = 4 + ipnum = uint128.From64(uint64(binary.BigEndian.Uint32(v4))) + } else { + v6 := ipaddress.To16() + + if v6 != nil { + iptype = 6 + reverseBytes(v6) + ipnum = uint128.FromBytes(v6) + + if ipnum.Cmp(from_v4mapped) >= 0 && ipnum.Cmp(to_v4mapped) <= 0 { + // ipv4-mapped ipv6 should treat as ipv4 and read ipv4 data section + iptype = 4 + ipnum = ipnum.Sub(from_v4mapped) + } else if ipnum.Cmp(from_6to4) >= 0 && ipnum.Cmp(to_6to4) <= 0 { + // 6to4 so need to remap to ipv4 + iptype = 4 + ipnum = ipnum.Rsh(80) + ipnum = ipnum.And(last_32bits) + } else if ipnum.Cmp(from_teredo) >= 0 && ipnum.Cmp(to_teredo) <= 0 { + // Teredo so need to remap to ipv4 + iptype = 4 + ipnum = uint128.Uint128{^ipnum.Lo, ^ipnum.Hi} + ipnum = ipnum.And(last_32bits) + } + } + } + } + if iptype == 4 { + if d.meta.ipv4indexed { + ipnumtmp = ipnum.Rsh(16) + ipnumtmp = ipnumtmp.Lsh(3) + ipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv4indexbaseaddr))).Lo) + } + } else if iptype == 6 { + if d.meta.ipv6indexed { + ipnumtmp = ipnum.Rsh(112) + ipnumtmp = ipnumtmp.Lsh(3) + ipindex = uint32(ipnumtmp.Add(uint128.From64(uint64(d.meta.ipv6indexbaseaddr))).Lo) + } + } + return +} + +// read byte +func (d *DB) readuint8(pos int64) (uint8, error) { + var retval uint8 + data := make([]byte, 1) + _, err := d.f.ReadAt(data, pos-1) + if err != nil { + return 0, err + } + retval = data[0] + return retval, nil +} + +// read row +func (d *DB) read_row(pos uint32, size uint32) ([]byte, error) { + pos2 := int64(pos) + data := make([]byte, size) + _, err := d.f.ReadAt(data, pos2-1) + if err != nil { + return nil, err + } + return data, nil +} + +// read unsigned 32-bit integer from slices +func (d *DB) readuint32_row(row []byte, pos uint32) uint32 { + var retval uint32 + data := row[pos : pos+4] + retval = binary.LittleEndian.Uint32(data) + return retval +} + +// read unsigned 32-bit integer +func (d *DB) readuint32(pos uint32) (uint32, error) { + pos2 := int64(pos) + var retval uint32 + data := make([]byte, 4) + _, err := d.f.ReadAt(data, pos2-1) + if err != nil { + return 0, err + } + buf := bytes.NewReader(data) + err = binary.Read(buf, binary.LittleEndian, &retval) + if err != nil { + fmt.Printf("binary read failed: %v", err) + } + return retval, nil +} + +// read unsigned 128-bit integer from slices +func (d *DB) readuint128_row(row []byte, pos uint32) uint128.Uint128 { + retval := uint128.From64(0) + data := row[pos : pos+16] + + // little endian to big endian + // for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { + // data[i], data[j] = data[j], data[i] + // } + retval = uint128.FromBytes(data) + return retval +} + +// read unsigned 128-bit integer +func (d *DB) readuint128(pos uint32) (uint128.Uint128, error) { + pos2 := int64(pos) + retval := uint128.From64(0) + data := make([]byte, 16) + _, err := d.f.ReadAt(data, pos2-1) + if err != nil { + return uint128.From64(0), err + } + + // little endian to big endian + // for i, j := 0, len(data)-1; i < j; i, j = i+1, j-1 { + // data[i], data[j] = data[j], data[i] + // } + retval = uint128.FromBytes(data) + return retval, nil +} + +// read string +func (d *DB) readstr(pos uint32) (string, error) { + pos2 := int64(pos) + readlen := 256 // max size of string field + 1 byte for the length + var retval string + data := make([]byte, readlen) + _, err := d.f.ReadAt(data, pos2) + if err != nil && err.Error() != "EOF" { // bypass EOF error coz we are reading 256 which may hit EOF + return "", err + } + strlen := data[0] + retval = convertBytesToString(data[1:(strlen + 1)]) + return retval, nil +} + +// read float from slices +func (d *DB) readfloat_row(row []byte, pos uint32) float32 { + var retval float32 + data := row[pos : pos+4] + bits := binary.LittleEndian.Uint32(data) + retval = math.Float32frombits(bits) + return retval +} + +func fatal(db *DB, err error) (*DB, error) { + _ = db.f.Close() + return nil, err +} + +// Open takes the path to the IP2Location BIN database file. It will read all the metadata required to +// be able to extract the embedded geolocation data, and return the underlining DB object. +func OpenDB(dbpath string) (*DB, error) { + f, err := os.Open(dbpath) + if err != nil { + return nil, err + } + + return OpenDBWithReader(f) +} + +// OpenDBWithReader takes a DBReader to the IP2Location BIN database file. It will read all the metadata required to +// be able to extract the embedded geolocation data, and return the underlining DB object. +func OpenDBWithReader(reader DBReader) (*DB, error) { + var db = &DB{} + + _max_ipv6_range := big.NewInt(0) + _max_ipv6_range.SetString("340282366920938463463374607431768211455", 10) + max_ipv6_range = uint128.FromBig(_max_ipv6_range) + + _from_6to4 := big.NewInt(0) + _from_6to4.SetString("42545680458834377588178886921629466624", 10) + from_6to4 = uint128.FromBig(_from_6to4) + + _to_6to4 := big.NewInt(0) + _to_6to4.SetString("42550872755692912415807417417958686719", 10) + to_6to4 = uint128.FromBig(_to_6to4) + + _from_teredo := big.NewInt(0) + _from_teredo.SetString("42540488161975842760550356425300246528", 10) + from_teredo = uint128.FromBig(_from_teredo) + + _to_teredo := big.NewInt(0) + _to_teredo.SetString("42540488241204005274814694018844196863", 10) + to_teredo = uint128.FromBig(_to_teredo) + + db.f = reader + + var row []byte + var err error + readlen := uint32(64) // 64-byte header + + row, err = db.read_row(1, readlen) + if err != nil { + return fatal(db, err) + } + db.meta.databasetype = row[0] + db.meta.databasecolumn = row[1] + db.meta.databaseyear = row[2] + db.meta.databasemonth = row[3] + db.meta.databaseday = row[4] + db.meta.ipv4databasecount = db.readuint32_row(row, 5) + db.meta.ipv4databaseaddr = db.readuint32_row(row, 9) + db.meta.ipv6databasecount = db.readuint32_row(row, 13) + db.meta.ipv6databaseaddr = db.readuint32_row(row, 17) + db.meta.ipv4indexbaseaddr = db.readuint32_row(row, 21) + db.meta.ipv6indexbaseaddr = db.readuint32_row(row, 25) + db.meta.productcode = row[29] + db.meta.producttype = row[30] + db.meta.filesize = db.readuint32_row(row, 31) + + // check if is correct BIN (should be 1 for IP2Location BIN file), also checking for zipped file (PK being the first 2 chars) + if (db.meta.productcode != 1 && db.meta.databaseyear >= 21) || (db.meta.databasetype == 80 && db.meta.databasecolumn == 75) { // only BINs from Jan 2021 onwards have this byte set + return fatal(db, errors.New(invalid_bin)) + } + + if db.meta.ipv4indexbaseaddr > 0 { + db.meta.ipv4indexed = true + } + + if db.meta.ipv6databasecount > 0 && db.meta.ipv6indexbaseaddr > 0 { + db.meta.ipv6indexed = true + } + + db.meta.ipv4columnsize = uint32(db.meta.databasecolumn << 2) // 4 bytes each column + db.meta.ipv6columnsize = uint32(16 + ((db.meta.databasecolumn - 1) << 2)) // 4 bytes each column, except IPFrom column which is 16 bytes + + dbt := db.meta.databasetype + + if country_position[dbt] != 0 { + db.country_position_offset = uint32(country_position[dbt]-2) << 2 + db.country_enabled = true + } + if region_position[dbt] != 0 { + db.region_position_offset = uint32(region_position[dbt]-2) << 2 + db.region_enabled = true + } + if city_position[dbt] != 0 { + db.city_position_offset = uint32(city_position[dbt]-2) << 2 + db.city_enabled = true + } + if isp_position[dbt] != 0 { + db.isp_position_offset = uint32(isp_position[dbt]-2) << 2 + db.isp_enabled = true + } + if domain_position[dbt] != 0 { + db.domain_position_offset = uint32(domain_position[dbt]-2) << 2 + db.domain_enabled = true + } + if zipcode_position[dbt] != 0 { + db.zipcode_position_offset = uint32(zipcode_position[dbt]-2) << 2 + db.zipcode_enabled = true + } + if latitude_position[dbt] != 0 { + db.latitude_position_offset = uint32(latitude_position[dbt]-2) << 2 + db.latitude_enabled = true + } + if longitude_position[dbt] != 0 { + db.longitude_position_offset = uint32(longitude_position[dbt]-2) << 2 + db.longitude_enabled = true + } + if timezone_position[dbt] != 0 { + db.timezone_position_offset = uint32(timezone_position[dbt]-2) << 2 + db.timezone_enabled = true + } + if netspeed_position[dbt] != 0 { + db.netspeed_position_offset = uint32(netspeed_position[dbt]-2) << 2 + db.netspeed_enabled = true + } + if iddcode_position[dbt] != 0 { + db.iddcode_position_offset = uint32(iddcode_position[dbt]-2) << 2 + db.iddcode_enabled = true + } + if areacode_position[dbt] != 0 { + db.areacode_position_offset = uint32(areacode_position[dbt]-2) << 2 + db.areacode_enabled = true + } + if weatherstationcode_position[dbt] != 0 { + db.weatherstationcode_position_offset = uint32(weatherstationcode_position[dbt]-2) << 2 + db.weatherstationcode_enabled = true + } + if weatherstationname_position[dbt] != 0 { + db.weatherstationname_position_offset = uint32(weatherstationname_position[dbt]-2) << 2 + db.weatherstationname_enabled = true + } + if mcc_position[dbt] != 0 { + db.mcc_position_offset = uint32(mcc_position[dbt]-2) << 2 + db.mcc_enabled = true + } + if mnc_position[dbt] != 0 { + db.mnc_position_offset = uint32(mnc_position[dbt]-2) << 2 + db.mnc_enabled = true + } + if mobilebrand_position[dbt] != 0 { + db.mobilebrand_position_offset = uint32(mobilebrand_position[dbt]-2) << 2 + db.mobilebrand_enabled = true + } + if elevation_position[dbt] != 0 { + db.elevation_position_offset = uint32(elevation_position[dbt]-2) << 2 + db.elevation_enabled = true + } + if usagetype_position[dbt] != 0 { + db.usagetype_position_offset = uint32(usagetype_position[dbt]-2) << 2 + db.usagetype_enabled = true + } + if addresstype_position[dbt] != 0 { + db.addresstype_position_offset = uint32(addresstype_position[dbt]-2) << 2 + db.addresstype_enabled = true + } + if category_position[dbt] != 0 { + db.category_position_offset = uint32(category_position[dbt]-2) << 2 + db.category_enabled = true + } + if district_position[dbt] != 0 { + db.district_position_offset = uint32(district_position[dbt]-2) << 2 + db.district_enabled = true + } + if asn_position[dbt] != 0 { + db.asn_position_offset = uint32(asn_position[dbt]-2) << 2 + db.asn_enabled = true + } + if as_position[dbt] != 0 { + db.as_position_offset = uint32(as_position[dbt]-2) << 2 + db.as_enabled = true + } + + db.metaok = true + + return db, nil +} + +// Open takes the path to the IP2Location BIN database file. It will read all the metadata required to +// be able to extract the embedded geolocation data. +// +// Deprecated: No longer being updated. +func Open(dbpath string) { + db, err := OpenDB(dbpath) + if err != nil { + return + } + + defaultDB = db +} + +// Close will close the file handle to the BIN file. +// +// Deprecated: No longer being updated. +func Close() { + defaultDB.Close() +} + +// Api_version returns the version of the component. +func Api_version() string { + return api_version +} + +// PackageVersion returns the database type. +func (d *DB) PackageVersion() string { + return strconv.Itoa(int(d.meta.databasetype)) +} + +// DatabaseVersion returns the database version. +func (d *DB) DatabaseVersion() string { + return "20" + strconv.Itoa(int(d.meta.databaseyear)) + "." + strconv.Itoa(int(d.meta.databasemonth)) + "." + strconv.Itoa(int(d.meta.databaseday)) +} + +// populate record with message +func loadmessage(mesg string) IP2Locationrecord { + var x IP2Locationrecord + + x.Country_short = mesg + x.Country_long = mesg + x.Region = mesg + x.City = mesg + x.Isp = mesg + x.Domain = mesg + x.Zipcode = mesg + x.Timezone = mesg + x.Netspeed = mesg + x.Iddcode = mesg + x.Areacode = mesg + x.Weatherstationcode = mesg + x.Weatherstationname = mesg + x.Mcc = mesg + x.Mnc = mesg + x.Mobilebrand = mesg + x.Usagetype = mesg + x.Addresstype = mesg + x.Category = mesg + x.District = mesg + x.Asn = mesg + x.As = mesg + + return x +} + +func handleError(rec IP2Locationrecord, err error) IP2Locationrecord { + if err != nil { + fmt.Print(err) + } + return rec +} + +// convertBytesToString provides a no-copy []byte to string conversion. +// This implementation is adopted by official strings.Builder. +// Reference: https://github.com/golang/go/issues/25484 +func convertBytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// Get_all will return all geolocation fields based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_all(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, all)) +} + +// Get_country_short will return the ISO-3166 country code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_country_short(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, countryshort)) +} + +// Get_country_long will return the country name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_country_long(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, countrylong)) +} + +// Get_region will return the region name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_region(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, region)) +} + +// Get_city will return the city name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_city(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, city)) +} + +// Get_isp will return the Internet Service Provider name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_isp(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, isp)) +} + +// Get_latitude will return the latitude based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_latitude(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, latitude)) +} + +// Get_longitude will return the longitude based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_longitude(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, longitude)) +} + +// Get_domain will return the domain name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_domain(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, domain)) +} + +// Get_zipcode will return the postal code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_zipcode(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, zipcode)) +} + +// Get_timezone will return the time zone based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_timezone(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, timezone)) +} + +// Get_netspeed will return the Internet connection speed based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_netspeed(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, netspeed)) +} + +// Get_iddcode will return the International Direct Dialing code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_iddcode(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, iddcode)) +} + +// Get_areacode will return the area code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_areacode(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, areacode)) +} + +// Get_weatherstationcode will return the weather station code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_weatherstationcode(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, weatherstationcode)) +} + +// Get_weatherstationname will return the weather station name based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_weatherstationname(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, weatherstationname)) +} + +// Get_mcc will return the mobile country code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_mcc(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, mcc)) +} + +// Get_mnc will return the mobile network code based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_mnc(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, mnc)) +} + +// Get_mobilebrand will return the mobile carrier brand based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_mobilebrand(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, mobilebrand)) +} + +// Get_elevation will return the elevation in meters based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_elevation(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, elevation)) +} + +// Get_usagetype will return the usage type based on the queried IP address. +// +// Deprecated: No longer being updated. +func Get_usagetype(ipaddress string) IP2Locationrecord { + return handleError(defaultDB.query(ipaddress, usagetype)) +} + +// Get_all will return all geolocation fields based on the queried IP address. +func (d *DB) Get_all(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, all) +} + +// Get_country_short will return the ISO-3166 country code based on the queried IP address. +func (d *DB) Get_country_short(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, countryshort) +} + +// Get_country_long will return the country name based on the queried IP address. +func (d *DB) Get_country_long(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, countrylong) +} + +// Get_region will return the region name based on the queried IP address. +func (d *DB) Get_region(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, region) +} + +// Get_city will return the city name based on the queried IP address. +func (d *DB) Get_city(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, city) +} + +// Get_isp will return the Internet Service Provider name based on the queried IP address. +func (d *DB) Get_isp(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, isp) +} + +// Get_latitude will return the latitude based on the queried IP address. +func (d *DB) Get_latitude(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, latitude) +} + +// Get_longitude will return the longitude based on the queried IP address. +func (d *DB) Get_longitude(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, longitude) +} + +// Get_domain will return the domain name based on the queried IP address. +func (d *DB) Get_domain(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, domain) +} + +// Get_zipcode will return the postal code based on the queried IP address. +func (d *DB) Get_zipcode(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, zipcode) +} + +// Get_timezone will return the time zone based on the queried IP address. +func (d *DB) Get_timezone(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, timezone) +} + +// Get_netspeed will return the Internet connection speed based on the queried IP address. +func (d *DB) Get_netspeed(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, netspeed) +} + +// Get_iddcode will return the International Direct Dialing code based on the queried IP address. +func (d *DB) Get_iddcode(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, iddcode) +} + +// Get_areacode will return the area code based on the queried IP address. +func (d *DB) Get_areacode(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, areacode) +} + +// Get_weatherstationcode will return the weather station code based on the queried IP address. +func (d *DB) Get_weatherstationcode(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, weatherstationcode) +} + +// Get_weatherstationname will return the weather station name based on the queried IP address. +func (d *DB) Get_weatherstationname(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, weatherstationname) +} + +// Get_mcc will return the mobile country code based on the queried IP address. +func (d *DB) Get_mcc(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, mcc) +} + +// Get_mnc will return the mobile network code based on the queried IP address. +func (d *DB) Get_mnc(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, mnc) +} + +// Get_mobilebrand will return the mobile carrier brand based on the queried IP address. +func (d *DB) Get_mobilebrand(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, mobilebrand) +} + +// Get_elevation will return the elevation in meters based on the queried IP address. +func (d *DB) Get_elevation(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, elevation) +} + +// Get_usagetype will return the usage type based on the queried IP address. +func (d *DB) Get_usagetype(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, usagetype) +} + +// Get_addresstype will return the address type based on the queried IP address. +func (d *DB) Get_addresstype(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, addresstype) +} + +// Get_category will return the category based on the queried IP address. +func (d *DB) Get_category(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, category) +} + +// Get_district will return the district name based on the queried IP address. +func (d *DB) Get_district(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, district) +} + +// Get_asn will return the autonomous system number (ASN) based on the queried IP address. +func (d *DB) Get_asn(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, asn) +} + +// Get_as will return the autonomous system (AS) based on the queried IP address. +func (d *DB) Get_as(ipaddress string) (IP2Locationrecord, error) { + return d.query(ipaddress, as) +} + +// main query +func (d *DB) query(ipaddress string, mode uint32) (IP2Locationrecord, error) { + x := loadmessage(not_supported) // default message + + // read metadata + if !d.metaok { + x = loadmessage(missing_file) + return x, nil + } + + // check IP type and return IP number & index (if exists) + iptype, ipno, ipindex := d.checkip(ipaddress) + + if iptype == 0 { + x = loadmessage(invalid_address) + return x, nil + } + + var err error + var colsize uint32 + var baseaddr uint32 + var low uint32 + var high uint32 + var mid uint32 + var rowoffset uint32 + var firstcol uint32 = 4 // 4 bytes for ip from + var row []byte + var fullrow []byte + var readlen uint32 + ipfrom := uint128.From64(0) + ipto := uint128.From64(0) + maxip := uint128.From64(0) + + if iptype == 4 { + baseaddr = d.meta.ipv4databaseaddr + high = d.meta.ipv4databasecount + maxip = max_ipv4_range + colsize = d.meta.ipv4columnsize + } else { + firstcol = 16 // 16 bytes for ip from + baseaddr = d.meta.ipv6databaseaddr + high = d.meta.ipv6databasecount + maxip = max_ipv6_range + colsize = d.meta.ipv6columnsize + } + + // reading index + if ipindex > 0 { + row, err = d.read_row(ipindex, 8) // 4 bytes each for IP From and IP To + if err != nil { + return x, err + } + low = d.readuint32_row(row, 0) + high = d.readuint32_row(row, 4) + } + + if ipno.Cmp(maxip) >= 0 { + ipno = ipno.Sub(uint128.From64(1)) + } + + for low <= high { + mid = ((low + high) >> 1) + rowoffset = baseaddr + (mid * colsize) + + // reading IP From + whole row + next IP From + readlen = colsize + firstcol + fullrow, err = d.read_row(rowoffset, readlen) + if err != nil { + return x, err + } + + if iptype == 4 { + ipfrom32 := d.readuint32_row(fullrow, 0) + ipfrom = uint128.From64(uint64(ipfrom32)) + + ipto32 := d.readuint32_row(fullrow, colsize) + ipto = uint128.From64(uint64(ipto32)) + } else { + ipfrom = d.readuint128_row(fullrow, 0) + + ipto = d.readuint128_row(fullrow, colsize) + } + + if ipno.Cmp(ipfrom) >= 0 && ipno.Cmp(ipto) < 0 { + rowlen := colsize - firstcol + row = fullrow[firstcol:(firstcol + rowlen)] // extract the actual row data + + if mode&countryshort == 1 && d.country_enabled { + if x.Country_short, err = d.readstr(d.readuint32_row(row, d.country_position_offset)); err != nil { + return x, err + } + } + + if mode&countrylong != 0 && d.country_enabled { + if x.Country_long, err = d.readstr(d.readuint32_row(row, d.country_position_offset) + 3); err != nil { + return x, err + } + } + + if mode®ion != 0 && d.region_enabled { + if x.Region, err = d.readstr(d.readuint32_row(row, d.region_position_offset)); err != nil { + return x, err + } + } + + if mode&city != 0 && d.city_enabled { + if x.City, err = d.readstr(d.readuint32_row(row, d.city_position_offset)); err != nil { + return x, err + } + } + + if mode&isp != 0 && d.isp_enabled { + if x.Isp, err = d.readstr(d.readuint32_row(row, d.isp_position_offset)); err != nil { + return x, err + } + } + + if mode&latitude != 0 && d.latitude_enabled { + x.Latitude = d.readfloat_row(row, d.latitude_position_offset) + } + + if mode&longitude != 0 && d.longitude_enabled { + x.Longitude = d.readfloat_row(row, d.longitude_position_offset) + } + + if mode&domain != 0 && d.domain_enabled { + if x.Domain, err = d.readstr(d.readuint32_row(row, d.domain_position_offset)); err != nil { + return x, err + } + } + + if mode&zipcode != 0 && d.zipcode_enabled { + if x.Zipcode, err = d.readstr(d.readuint32_row(row, d.zipcode_position_offset)); err != nil { + return x, err + } + } + + if mode&timezone != 0 && d.timezone_enabled { + if x.Timezone, err = d.readstr(d.readuint32_row(row, d.timezone_position_offset)); err != nil { + return x, err + } + } + + if mode&netspeed != 0 && d.netspeed_enabled { + if x.Netspeed, err = d.readstr(d.readuint32_row(row, d.netspeed_position_offset)); err != nil { + return x, err + } + } + + if mode&iddcode != 0 && d.iddcode_enabled { + if x.Iddcode, err = d.readstr(d.readuint32_row(row, d.iddcode_position_offset)); err != nil { + return x, err + } + } + + if mode&areacode != 0 && d.areacode_enabled { + if x.Areacode, err = d.readstr(d.readuint32_row(row, d.areacode_position_offset)); err != nil { + return x, err + } + } + + if mode&weatherstationcode != 0 && d.weatherstationcode_enabled { + if x.Weatherstationcode, err = d.readstr(d.readuint32_row(row, d.weatherstationcode_position_offset)); err != nil { + return x, err + } + } + + if mode&weatherstationname != 0 && d.weatherstationname_enabled { + if x.Weatherstationname, err = d.readstr(d.readuint32_row(row, d.weatherstationname_position_offset)); err != nil { + return x, err + } + } + + if mode&mcc != 0 && d.mcc_enabled { + if x.Mcc, err = d.readstr(d.readuint32_row(row, d.mcc_position_offset)); err != nil { + return x, err + } + } + + if mode&mnc != 0 && d.mnc_enabled { + if x.Mnc, err = d.readstr(d.readuint32_row(row, d.mnc_position_offset)); err != nil { + return x, err + } + } + + if mode&mobilebrand != 0 && d.mobilebrand_enabled { + if x.Mobilebrand, err = d.readstr(d.readuint32_row(row, d.mobilebrand_position_offset)); err != nil { + return x, err + } + } + + if mode&elevation != 0 && d.elevation_enabled { + res, err := d.readstr(d.readuint32_row(row, d.elevation_position_offset)) + if err != nil { + return x, err + } + + f, _ := strconv.ParseFloat(res, 32) + x.Elevation = float32(f) + } + + if mode&usagetype != 0 && d.usagetype_enabled { + if x.Usagetype, err = d.readstr(d.readuint32_row(row, d.usagetype_position_offset)); err != nil { + return x, err + } + } + + if mode&addresstype != 0 && d.addresstype_enabled { + if x.Addresstype, err = d.readstr(d.readuint32_row(row, d.addresstype_position_offset)); err != nil { + return x, err + } + } + + if mode&category != 0 && d.category_enabled { + if x.Category, err = d.readstr(d.readuint32_row(row, d.category_position_offset)); err != nil { + return x, err + } + } + + if mode&district != 0 && d.district_enabled { + if x.District, err = d.readstr(d.readuint32_row(row, d.district_position_offset)); err != nil { + return x, err + } + } + + if mode&asn != 0 && d.asn_enabled { + if x.Asn, err = d.readstr(d.readuint32_row(row, d.asn_position_offset)); err != nil { + return x, err + } + } + + if mode&as != 0 && d.as_enabled { + if x.As, err = d.readstr(d.readuint32_row(row, d.as_position_offset)); err != nil { + return x, err + } + } + + return x, nil + } else { + if ipno.Cmp(ipfrom) < 0 { + high = mid - 1 + } else { + low = mid + 1 + } + } + } + return x, nil +} + +func (d *DB) Close() { + _ = d.f.Close() +} + +// Printrecord is used to output the geolocation data for debugging purposes. +func Printrecord(x IP2Locationrecord) { + fmt.Printf("country_short: %s\n", x.Country_short) + fmt.Printf("country_long: %s\n", x.Country_long) + fmt.Printf("region: %s\n", x.Region) + fmt.Printf("city: %s\n", x.City) + fmt.Printf("isp: %s\n", x.Isp) + fmt.Printf("latitude: %f\n", x.Latitude) + fmt.Printf("longitude: %f\n", x.Longitude) + fmt.Printf("domain: %s\n", x.Domain) + fmt.Printf("zipcode: %s\n", x.Zipcode) + fmt.Printf("timezone: %s\n", x.Timezone) + fmt.Printf("netspeed: %s\n", x.Netspeed) + fmt.Printf("iddcode: %s\n", x.Iddcode) + fmt.Printf("areacode: %s\n", x.Areacode) + fmt.Printf("weatherstationcode: %s\n", x.Weatherstationcode) + fmt.Printf("weatherstationname: %s\n", x.Weatherstationname) + fmt.Printf("mcc: %s\n", x.Mcc) + fmt.Printf("mnc: %s\n", x.Mnc) + fmt.Printf("mobilebrand: %s\n", x.Mobilebrand) + fmt.Printf("elevation: %f\n", x.Elevation) + fmt.Printf("usagetype: %s\n", x.Usagetype) + fmt.Printf("addresstype: %s\n", x.Addresstype) + fmt.Printf("category: %s\n", x.Category) + fmt.Printf("district: %s\n", x.District) + fmt.Printf("asn: %s\n", x.Asn) + fmt.Printf("as: %s\n", x.As) +} diff --git a/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go b/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go new file mode 100644 index 000000000..4d8c5db00 --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/ip2locationwebservice.go @@ -0,0 +1,227 @@ +package ip2location + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" +) + +// The IP2LocationResult struct stores all of the available +// geolocation info found in the IP2Location Web Service. +type IP2LocationResult struct { + Response string `json:"response"` + CountryCode string `json:"country_code"` + CountryName string `json:"country_name"` + RegionName string `json:"region_name"` + CityName string `json:"city_name"` + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + ZipCode string `json:"zip_code"` + TimeZone string `json:"time_zone"` + Isp string `json:"isp"` + Domain string `json:"domain"` + NetSpeed string `json:"net_speed"` + IddCode string `json:"idd_code"` + AreaCode string `json:"area_code"` + WeatherStationCode string `json:"weather_station_code"` + WeatherStationName string `json:"weather_station_name"` + Mcc string `json:"mcc"` + Mnc string `json:"mnc"` + MobileBrand string `json:"mobile_brand"` + Elevation int `json:"elevation"` + UsageType string `json:"usage_type"` + AddressType string `json:"address_type"` + Category string `json:"category"` + CategoryName string `json:"category_name"` + Geotargeting struct { + Metro string `json:"metro"` + } `json:"geotargeting"` + Continent struct { + Name string `json:"name"` + Code string `json:"code"` + Hemisphere []string `json:"hemisphere"` + } `json:"continent"` + Country struct { + Name string `json:"name"` + Alpha3Code string `json:"alpha3_code"` + NumericCode string `json:"numeric_code"` + Demonym string `json:"demonym"` + Flag string `json:"flag"` + Capital string `json:"capital"` + TotalArea string `json:"total_area"` + Population string `json:"population"` + Currency struct { + Code string `json:"code"` + Name string `json:"name"` + Symbol string `json:"symbol"` + } `json:"currency"` + Language struct { + Code string `json:"code"` + Name string `json:"name"` + } `json:"language"` + IddCode string `json:"idd_code"` + Tld string `json:"tld"` + IsEu bool `json:"is_eu"` + } `json:"country"` + CountryGroupings []struct { + Acronym string `json:"acronym"` + Name string `json:"name"` + } `json:"country_groupings"` + Region struct { + Name string `json:"name"` + Code string `json:"code"` + } `json:"region"` + City struct { + Name string `json:"name"` + } `json:"city"` + TimeZoneInfo struct { + Olson string `json:"olson"` + CurrentTime string `json:"current_time"` + GmtOffset int `json:"gmt_offset"` + IsDst string `json:"is_dst"` + Sunrise string `json:"sunrise"` + Sunset string `json:"sunset"` + } `json:"time_zone_info"` + CreditsConsumed int `json:"credits_consumed"` +} + +// The IP2LocationCreditResult struct stores the +// credit balance for the IP2Location Web Service. +type IP2LocationCreditResult struct { + Response int `json:"response"` +} + +// The WS struct is the main object used to query the IP2Location Web Service. +type WS struct { + apiKey string + apiPackage string + useSSL bool +} + +var regexAPIKey = regexp.MustCompile(`^[\dA-Z]{10}$`) +var regexAPIPackage = regexp.MustCompile(`^WS\d+$`) + +const baseURL = "api.ip2location.com/v2/" +const msgInvalidAPIKey = "Invalid API key." +const msgInvalidAPIPackage = "Invalid package name." + +// OpenWS initializes with the web service API key, API package and whether to use SSL +func OpenWS(apikey string, apipackage string, usessl bool) (*WS, error) { + var ws = &WS{} + ws.apiKey = apikey + ws.apiPackage = apipackage + ws.useSSL = usessl + + err := ws.checkParams() + + if err != nil { + return nil, err + } + + return ws, nil +} + +func (w *WS) checkParams() error { + if !regexAPIKey.MatchString(w.apiKey) { + return errors.New(msgInvalidAPIKey) + } + + if !regexAPIPackage.MatchString(w.apiPackage) { + return errors.New(msgInvalidAPIPackage) + } + + return nil +} + +// LookUp will return all geolocation fields based on the queried IP address, addon, lang +func (w *WS) LookUp(ipAddress string, addOn string, lang string) (IP2LocationResult, error) { + var res IP2LocationResult + err := w.checkParams() + + if err != nil { + return res, err + } + + protocol := "https" + + if !w.useSSL { + protocol = "http" + } + + // lang param not supported yet due to the inconsistent data type being returned by the API + myUrl := protocol + "://" + baseURL + "?key=" + w.apiKey + "&package=" + w.apiPackage + "&ip=" + url.QueryEscape(ipAddress) + "&addon=" + url.QueryEscape(addOn) + + resp, err := http.Get(myUrl) + + if err != nil { + return res, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + bodyBytes, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return res, err + } + + err = json.Unmarshal(bodyBytes, &res) + + if err != nil { + return res, err + } + + return res, nil + } + + return res, errors.New("Error HTTP " + strconv.Itoa(int(resp.StatusCode))) +} + +// GetCredit will return the web service credit balance. +func (w *WS) GetCredit() (IP2LocationCreditResult, error) { + var res IP2LocationCreditResult + err := w.checkParams() + + if err != nil { + return res, err + } + + protocol := "https" + + if !w.useSSL { + protocol = "http" + } + + myUrl := protocol + "://" + baseURL + "?key=" + w.apiKey + "&check=true" + + resp, err := http.Get(myUrl) + + if err != nil { + return res, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + bodyBytes, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return res, err + } + + err = json.Unmarshal(bodyBytes, &res) + + if err != nil { + return res, err + } + + return res, nil + } + + return res, errors.New("Error HTTP " + strconv.Itoa(int(resp.StatusCode))) +} diff --git a/vendor/github.com/ip2location/ip2location-go/v9/iptools.go b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go new file mode 100644 index 000000000..9808698a3 --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/iptools.go @@ -0,0 +1,491 @@ +package ip2location + +import ( + "encoding/hex" + "errors" + "fmt" + "math" + "math/big" + "net" + "regexp" + "sort" + "strconv" + "strings" +) + +// The IPTools struct is the main object to access the IP address tools +type IPTools struct { + max_ipv4_range *big.Int + max_ipv6_range *big.Int +} + +// OpenTools initializes some variables +func OpenTools() *IPTools { + var t = &IPTools{} + t.max_ipv4_range = big.NewInt(4294967295) + t.max_ipv6_range = big.NewInt(0) + t.max_ipv6_range.SetString("340282366920938463463374607431768211455", 10) + return t +} + +// IsIPv4 returns true if the IP address provided is an IPv4. +func (t *IPTools) IsIPv4(IP string) bool { + ipaddr := net.ParseIP(IP) + + if ipaddr == nil { + return false + } + + v4 := ipaddr.To4() + + if v4 == nil { + return false + } + + return true +} + +// IsIPv6 returns true if the IP address provided is an IPv6. +func (t *IPTools) IsIPv6(IP string) bool { + if t.IsIPv4(IP) { + return false + } + + ipaddr := net.ParseIP(IP) + + if ipaddr == nil { + return false + } + + v6 := ipaddr.To16() + + if v6 == nil { + return false + } + + return true +} + +// IPv4ToDecimal returns the IP number for the supplied IPv4 address. +func (t *IPTools) IPv4ToDecimal(IP string) (*big.Int, error) { + if !t.IsIPv4(IP) { + return nil, errors.New("Not a valid IPv4 address.") + } + + ipnum := big.NewInt(0) + ipaddr := net.ParseIP(IP) + + if ipaddr != nil { + v4 := ipaddr.To4() + + if v4 != nil { + ipnum.SetBytes(v4) + } + } + + return ipnum, nil +} + +// IPv6ToDecimal returns the IP number for the supplied IPv6 address. +func (t *IPTools) IPv6ToDecimal(IP string) (*big.Int, error) { + if !t.IsIPv6(IP) { + return nil, errors.New("Not a valid IPv6 address.") + } + + ipnum := big.NewInt(0) + ipaddr := net.ParseIP(IP) + + if ipaddr != nil { + v6 := ipaddr.To16() + + if v6 != nil { + ipnum.SetBytes(v6) + } + } + + return ipnum, nil +} + +// DecimalToIPv4 returns the IPv4 address for the supplied IP number. +func (t *IPTools) DecimalToIPv4(IPNum *big.Int) (string, error) { + if IPNum.Cmp(big.NewInt(0)) < 0 || IPNum.Cmp(t.max_ipv4_range) > 0 { + return "", errors.New("Invalid IP number.") + } + + buf := make([]byte, 4) + bytes := IPNum.FillBytes(buf) + + ip := net.IP(bytes) + return ip.String(), nil +} + +// DecimalToIPv6 returns the IPv6 address for the supplied IP number. +func (t *IPTools) DecimalToIPv6(IPNum *big.Int) (string, error) { + if IPNum.Cmp(big.NewInt(0)) < 0 || IPNum.Cmp(t.max_ipv6_range) > 0 { + return "", errors.New("Invalid IP number.") + } + + buf := make([]byte, 16) + bytes := IPNum.FillBytes(buf) + + ip := net.IP(bytes) + return ip.String(), nil +} + +// CompressIPv6 returns the compressed form of the supplied IPv6 address. +func (t *IPTools) CompressIPv6(IP string) (string, error) { + if !t.IsIPv6(IP) { + return "", errors.New("Not a valid IPv6 address.") + } + + ipaddr := net.ParseIP(IP) + + if ipaddr == nil { + return "", errors.New("Not a valid IPv6 address.") + } + + return ipaddr.String(), nil +} + +// ExpandIPv6 returns the expanded form of the supplied IPv6 address. +func (t *IPTools) ExpandIPv6(IP string) (string, error) { + if !t.IsIPv6(IP) { + return "", errors.New("Not a valid IPv6 address.") + } + + ipaddr := net.ParseIP(IP) + + ipstr := hex.EncodeToString(ipaddr) + re := regexp.MustCompile(`(.{4})`) + ipstr = re.ReplaceAllString(ipstr, "$1:") + ipstr = strings.TrimSuffix(ipstr, ":") + + return ipstr, nil +} + +// IPv4ToCIDR returns the CIDR for the supplied IPv4 range. +func (t *IPTools) IPv4ToCIDR(IPFrom string, IPTo string) ([]string, error) { + if !t.IsIPv4(IPFrom) || !t.IsIPv4(IPTo) { + return nil, errors.New("Not a valid IPv4 address.") + } + + startipbig, _ := t.IPv4ToDecimal(IPFrom) + endipbig, _ := t.IPv4ToDecimal(IPTo) + startip := startipbig.Uint64() + endip := endipbig.Uint64() + var result []string + var maxsize float64 + var maxdiff float64 + + for endip >= startip { + maxsize = 32 + + for maxsize > 0 { + mask := math.Pow(2, 32) - math.Pow(2, 32-(maxsize-1)) + maskbase := startip & uint64(mask) + + if maskbase != startip { + break + } + + maxsize = maxsize - 1 + } + + x := math.Log(float64(endip)-float64(startip)+1) / math.Log(2) + maxdiff = 32 - math.Floor(x) + + if maxsize < maxdiff { + maxsize = maxdiff + } + + bn := big.NewInt(0) + + bn.SetString(fmt.Sprintf("%v", startip), 10) + + ip, _ := t.DecimalToIPv4(bn) + result = append(result, ip+"/"+fmt.Sprintf("%v", maxsize)) + startip = startip + uint64(math.Pow(2, 32-maxsize)) + } + + return result, nil +} + +// converts IPv6 address to binary string representation. +func (t *IPTools) ipToBinary(ip string) (string, error) { + if !t.IsIPv6(ip) { + return "", errors.New("Not a valid IPv6 address.") + } + + ipaddr := net.ParseIP(ip) + + binstr := "" + for i, j := 0, len(ipaddr); i < j; i = i + 1 { + binstr += fmt.Sprintf("%08b", ipaddr[i]) + } + + return binstr, nil +} + +// converts binary string representation to IPv6 address. +func (t *IPTools) binaryToIP(binstr string) (string, error) { + re := regexp.MustCompile(`^[01]{128}$`) + if !re.MatchString(binstr) { + return "", errors.New("Not a valid binary string.") + } + + re2 := regexp.MustCompile(`(.{8})`) + + bytes := make([]byte, 16) + i := 0 + matches := re2.FindAllStringSubmatch(binstr, -1) + for _, v := range matches { + x, _ := strconv.ParseUint(v[1], 2, 8) + bytes[i] = byte(x) + i = i + 1 + } + + ipaddr := net.IP(bytes) + + return ipaddr.String(), nil +} + +// returns the min and max for the array +func (t *IPTools) minMax(array []int) (int, int) { + var max int = array[0] + var min int = array[0] + for _, value := range array { + if max < value { + max = value + } + if min > value { + min = value + } + } + return min, max +} + +// IPv6ToCIDR returns the CIDR for the supplied IPv6 range. +func (t *IPTools) IPv6ToCIDR(IPFrom string, IPTo string) ([]string, error) { + if !t.IsIPv6(IPFrom) || !t.IsIPv6(IPTo) { + return nil, errors.New("Not a valid IPv6 address.") + } + + ipfrombin, err := t.ipToBinary(IPFrom) + + if err != nil { + return nil, errors.New("Not a valid IPv6 address.") + } + + iptobin, err := t.ipToBinary(IPTo) + + if err != nil { + return nil, errors.New("Not a valid IPv6 address.") + } + + var result []string + + networksize := 0 + shift := 0 + unpadded := "" + padded := "" + networks := make(map[string]int) + n := 0 + + if ipfrombin == iptobin { + result = append(result, IPFrom+"/128") + return result, nil + } + + if ipfrombin > iptobin { + tmp := ipfrombin + ipfrombin = iptobin + iptobin = tmp + } + + for { + if string(ipfrombin[len(ipfrombin)-1]) == "1" { + unpadded = ipfrombin[networksize:128] + padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces + padded = strings.ReplaceAll(padded, " ", "0") // replace spaces + networks[padded] = 128 - networksize + n = strings.LastIndex(ipfrombin, "0") + if n == 0 { + ipfrombin = "" + } else { + ipfrombin = ipfrombin[0:n] + } + ipfrombin = ipfrombin + "1" + ipfrombin = fmt.Sprintf("%-128s", ipfrombin) // pad right with spaces + ipfrombin = strings.ReplaceAll(ipfrombin, " ", "0") // replace spaces + } + + if string(iptobin[len(iptobin)-1]) == "0" { + unpadded = iptobin[networksize:128] + padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces + padded = strings.ReplaceAll(padded, " ", "0") // replace spaces + networks[padded] = 128 - networksize + n = strings.LastIndex(iptobin, "1") + if n == 0 { + iptobin = "" + } else { + iptobin = iptobin[0:n] + } + iptobin = iptobin + "0" + iptobin = fmt.Sprintf("%-128s", iptobin) // pad right with spaces + iptobin = strings.ReplaceAll(iptobin, " ", "1") // replace spaces + } + + if iptobin < ipfrombin { + // special logic for Go due to lack of do-while + if ipfrombin >= iptobin { + break + } + continue + } + + values := []int{strings.LastIndex(ipfrombin, "0"), strings.LastIndex(iptobin, "1")} + _, max := t.minMax(values) + shift = 128 - max + unpadded = ipfrombin[0 : 128-shift] + ipfrombin = fmt.Sprintf("%0128s", unpadded) + unpadded = iptobin[0 : 128-shift] + iptobin = fmt.Sprintf("%0128s", unpadded) + + networksize = networksize + shift + + if ipfrombin == iptobin { + unpadded = ipfrombin[networksize:128] + padded = fmt.Sprintf("%-128s", unpadded) // pad right with spaces + padded = strings.ReplaceAll(padded, " ", "0") // replace spaces + networks[padded] = 128 - networksize + } + + if ipfrombin >= iptobin { + break + } + } + + keys := make([]string, 0, len(networks)) + for k := range networks { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + str, _ := t.binaryToIP(k) + result = append(result, str+"/"+fmt.Sprintf("%d", networks[k])) + } + + return result, nil +} + +// CIDRToIPv4 returns the IPv4 range for the supplied CIDR. +func (t *IPTools) CIDRToIPv4(CIDR string) ([]string, error) { + if strings.Index(CIDR, "/") == -1 { + return nil, errors.New("Not a valid CIDR.") + } + + re := regexp.MustCompile(`^[0-9]{1,2}$`) + arr := strings.Split(CIDR, "/") + + if len(arr) != 2 || !t.IsIPv4(arr[0]) || !re.MatchString(arr[1]) { + return nil, errors.New("Not a valid CIDR.") + } + + ip := arr[0] + + prefix, err := strconv.Atoi(arr[1]) + if err != nil || prefix > 32 { + return nil, errors.New("Not a valid CIDR.") + } + + ipstartbn, err := t.IPv4ToDecimal(ip) + if err != nil { + return nil, errors.New("Not a valid CIDR.") + } + ipstartlong := ipstartbn.Int64() + + ipstartlong = ipstartlong & (-1 << (32 - prefix)) + + bn := big.NewInt(0) + bn.SetString(strconv.Itoa(int(ipstartlong)), 10) + + ipstart, _ := t.DecimalToIPv4(bn) + + var total int64 = 1 << (32 - prefix) + + ipendlong := ipstartlong + total - 1 + + if ipendlong > 4294967295 { + ipendlong = 4294967295 + } + + bn.SetString(strconv.Itoa(int(ipendlong)), 10) + ipend, _ := t.DecimalToIPv4(bn) + + result := []string{ipstart, ipend} + + return result, nil +} + +// CIDRToIPv6 returns the IPv6 range for the supplied CIDR. +func (t *IPTools) CIDRToIPv6(CIDR string) ([]string, error) { + if strings.Index(CIDR, "/") == -1 { + return nil, errors.New("Not a valid CIDR.") + } + + re := regexp.MustCompile(`^[0-9]{1,3}$`) + arr := strings.Split(CIDR, "/") + + if len(arr) != 2 || !t.IsIPv6(arr[0]) || !re.MatchString(arr[1]) { + return nil, errors.New("Not a valid CIDR.") + } + + ip := arr[0] + + prefix, err := strconv.Atoi(arr[1]) + if err != nil || prefix > 128 { + return nil, errors.New("Not a valid CIDR.") + } + + expand, _ := t.ExpandIPv6(ip) + parts := strings.Split(expand, ":") + + bitStart := strings.Repeat("1", prefix) + strings.Repeat("0", 128-prefix) + bitEnd := strings.Repeat("0", prefix) + strings.Repeat("1", 128-prefix) + + n := 16 // split string into 16-char parts + floors := []string{} + for i := 0; i < len(bitStart); i += n { + end := i + n + if end > len(bitStart) { + end = len(bitStart) + } + floors = append(floors, bitStart[i:end]) + } + ceilings := []string{} + for i := 0; i < len(bitEnd); i += n { + end := i + n + if end > len(bitEnd) { + end = len(bitEnd) + } + ceilings = append(ceilings, bitEnd[i:end]) + } + + start := []string{} + end := []string{} + + for i := 0; i < 8; i += 1 { + p, _ := strconv.ParseUint(parts[i], 16, 64) + f, _ := strconv.ParseUint(floors[i], 2, 64) + c, _ := strconv.ParseUint(ceilings[i], 2, 64) + start = append(start, strconv.FormatUint(p&f, 16)) + end = append(end, strconv.FormatUint(p|c, 16)) + } + + hexstartaddress, _ := t.ExpandIPv6(strings.Join(start, ":")) + hexendaddress, _ := t.ExpandIPv6(strings.Join(end, ":")) + result := []string{hexstartaddress, hexendaddress} + + return result, nil +} diff --git a/vendor/github.com/ip2location/ip2location-go/v9/region.go b/vendor/github.com/ip2location/ip2location-go/v9/region.go new file mode 100644 index 000000000..c06e3303e --- /dev/null +++ b/vendor/github.com/ip2location/ip2location-go/v9/region.go @@ -0,0 +1,96 @@ +package ip2location + +import ( + "encoding/csv" + "errors" + "os" + "strings" +) + +// The RegionInfoRecord struct stores all of the available +// region info found in the region information CSV file. +type RegionInfoRecord struct { + Country_code string + Name string + Code string +} + +// The RI struct is the main object used to read the region information CSV. +type RI struct { + resultsMap map[string][]RegionInfoRecord +} + +// OpenRegionInfo initializes with the path to the region information CSV file. +func OpenRegionInfo(csvFile string) (*RI, error) { + var ri = &RI{} + + _, err := os.Stat(csvFile) + if os.IsNotExist(err) { + return nil, errors.New("The CSV file '" + csvFile + "' is not found.") + } + + f, err := os.Open(csvFile) + if err != nil { + return nil, errors.New("Unable to read '" + csvFile + "'.") + } + + defer f.Close() + + csvReader := csv.NewReader(f) + data, err := csvReader.ReadAll() + if err != nil { + return nil, errors.New("Unable to read '" + csvFile + "'.") + } + + ri.resultsMap = make(map[string][]RegionInfoRecord) + var headerArr []string + var resultsArr []RegionInfoRecord + + for i, line := range data { + if i == 0 { // headers + for _, field := range line { + headerArr = append(headerArr, field) + } + } else { + var rec RegionInfoRecord + for j, field := range line { + switch headerArr[j] { + case "country_code": + rec.Country_code = field + case "subdivision_name": + rec.Name = field + case "code": + rec.Code = field + } + } + if rec.Name == "" { + return nil, errors.New("Invalid region information CSV file.") + } + resultsArr = append(resultsArr, rec) + } + } + for _, elem := range resultsArr { + if _, ok := ri.resultsMap[elem.Country_code]; !ok { + var arr []RegionInfoRecord + ri.resultsMap[elem.Country_code] = arr + } + ri.resultsMap[elem.Country_code] = append(ri.resultsMap[elem.Country_code], elem) + } + return ri, nil +} + +// GetRegionCode returns the region code for the specified country and region name +func (r *RI) GetRegionCode(countryCode string, regionName string) (string, error) { + if len(r.resultsMap) == 0 { + return "", errors.New("No record available.") + } + + if arr, ok := r.resultsMap[countryCode]; ok { + for _, elem := range arr { + if strings.ToUpper(elem.Name) == strings.ToUpper(regionName) { + return elem.Code, nil + } + } + } + return "", errors.New("No record found.") +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 000000000..66d1657d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1017 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 && d.level > -MinCustomWindowSize { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize: + d.w.logNewTablePenalty = 7 + d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize} + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = 32 + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = windowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("flate: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize") + } + var dw Writer + if err := dw.d.init(w, -windowSize); err != nil { + return nil, err + } + return &dw, nil +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 000000000..bb36351a5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 000000000..c8124b5c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 000000000..f70594c34 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,1182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 000000000..be7b58b47 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,417 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 000000000..6c05ba8c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,159 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 000000000..93f1aea10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 000000000..2f410d64f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,829 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = new([huffmanNumChunks]uint16) + } + + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// Reader is the actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +type step uint8 + +const ( + copyData step = iota + 1 + nextBlock + huffmanBytesBuffer + huffmanBytesReader + huffmanBufioReader + huffmanStringsReader + huffmanGenericReader +) + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step step + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + + f.doStep() + + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// WriteTo implements the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.doStep() + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = nextBlock +} + +func (f *decompressor) doStep() { + switch f.step { + case copyData: + f.copyData() + case nextBlock: + f.nextBlock() + case huffmanBytesBuffer: + f.huffmanBytesBuffer() + case huffmanBytesReader: + f.huffmanBytesReader() + case huffmanBufioReader: + f.huffmanBufioReader() + case huffmanStringsReader: + f.huffmanStringsReader() + case huffmanGenericReader: + f.huffmanGenericReader() + default: + panic("BUG: unexpected step state") + } +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 000000000..2b2f993f7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,1283 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() { + switch f.r.(type) { + case *bytes.Buffer: + f.huffmanBytesBuffer() + case *bytes.Reader: + f.huffmanBytesReader() + case *bufio.Reader: + f.huffmanBufioReader() + case *strings.Reader: + f.huffmanStringsReader() + case Reader: + f.huffmanGenericReader() + default: + f.huffmanGenericReader() + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 000000000..703b9a89a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,241 @@ +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 000000000..876dfbe30 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,214 @@ +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 000000000..7aa2b72a1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,241 @@ +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 000000000..23c08b325 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,221 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 000000000..1f61ec182 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,708 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// fastEncL5Window is a level 5 encoder, +// but with a custom window size. +type fastEncL5Window struct { + hist []byte + cur int32 + maxOffset int32 + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + maxMatchOffset := e.maxOffset + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} + +// Reset the encoding table. +func (e *fastEncL5Window) Reset() { + // We keep the same allocs, since we are compressing the same block sizes. + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= int32(bufferReset) { + e.cur += e.maxOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +func (e *fastEncL5Window) addBlock(src []byte) int32 { + // check if we have space already + maxMatchOffset := e.maxOffset + + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < int(maxMatchOffset*2) { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > e.maxOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 000000000..f1e9d98fa --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,325 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go new file mode 100644 index 000000000..4bd388584 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s new file mode 100644 index 000000000..0782b86e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go new file mode 100644 index 000000000..ad5cd814b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 000000000..6ed28061b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 000000000..1b7a2cbd7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 000000000..f3d4139ef --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,318 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 000000000..d818790c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,379 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 000000000..00a0a2c38 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,380 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "compress/gzip" + "encoding/binary" + "hash/crc32" + "io" + "time" + + "github.com/klauspost/compress/flate" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = gzip.ErrChecksum + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = gzip.ErrHeader +) + +var le = binary.LittleEndian + +// noEOF converts io.EOF to io.ErrUnexpectedEOF. +func noEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +// +// Strings must be UTF-8 encoded and may only contain Unicode code points +// U+0001 through U+00FF, due to limitations of the GZIP file format. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header // valid after NewReader or Reader.Reset + r flate.Reader + br *bufio.Reader + decompressor io.ReadCloser + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// +// It is the caller's responsibility to call Close on the Reader when done. +// +// The Reader.Header fields will be valid in the Reader returned. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + if err := z.Reset(r); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + *z = Reader{ + decompressor: z.decompressor, + multistream: true, + br: z.br, + } + if rr, ok := r.(flate.Reader); ok { + z.r = rr + } else { + // Reuse if we can. + if z.br != nil { + z.br.Reset(r) + } else { + z.br = bufio.NewReader(r) + } + z.r = z.br + } + z.Header, z.err = z.readHeader() + return z.err +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// readString reads a NUL-terminated string from z.r. +// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and +// will output a string encoded using UTF-8. +// This method always updates z.digest with the data read. +func (z *Reader) readString() (string, error) { + var err error + needConv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + // Reserved FLG bits must be zero. + if flg>>5 != 0 { + return hdr, ErrHeader + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + for n == 0 { + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + } + + return n, nil +} + +type crcer interface { + io.Writer + Sum32() uint32 + Reset() +} +type crcUpdater struct { + z *Reader +} + +func (c *crcUpdater) Write(p []byte) (int, error) { + c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p) + return len(p), nil +} + +func (c *crcUpdater) Sum32() uint32 { + return c.z.digest +} + +func (c *crcUpdater) Reset() { + c.z.digest = 0 +} + +// WriteTo support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crcer(crc32.NewIEEE()) + if z.digest != 0 { + crcWriter = &crcUpdater{z: z} + } + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 000000000..5bc720593 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,290 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "hash/crc32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly + + // StatelessCompression will do compression but without maintaining any state + // between Write calls. + // There will be no memory kept between Write calls, + // but compression and speed will be suboptimal. + // Because of this, the size of actual Write calls will affect output size. + StatelessCompression = -3 +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + err error + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + wroteHeader bool + closed bool + buf [10]byte +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < StatelessCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow. +const MinCustomWindowSize = flate.MinCustomWindowSize + +// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow. +const MaxCustomWindowSize = flate.MaxCustomWindowSize + +// NewWriterWindow returns a new Writer compressing data with a custom window size. +// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize. +func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) { + if windowSize < MinCustomWindowSize { + return nil, errors.New("gzip: requested window size less than MinWindowSize") + } + if windowSize > MaxCustomWindowSize { + return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize") + } + + z := new(Writer) + z.init(w, -windowSize) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if level != StatelessCompression { + if compressor != nil { + compressor.Reset(w) + } + } + + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + + if z.compressor == nil && z.level != StatelessCompression { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + if z.level == StatelessCompression { + return len(p), flate.StatelessDeflate(z.w, p, false, nil) + } + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed || z.level == StatelessCompression { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + if z.level == StatelessCompression { + z.err = flate.StatelessDeflate(z.w, nil, true, nil) + } else { + z.err = z.compressor.Close() + } + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 000000000..affbbbb59 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 000000000..f5e240dcd --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 000000000..3a89c6e3e --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 000000000..1d2d645bd --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 000000000..8284bb081 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,1120 @@ +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Concurrent Snappy/S2 stream decompression +* Skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Block Dictionary support +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(buf) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | + +### Legend + +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +### Concurrent Stream Decompression + +For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) +that will decode a full stream using multiple goroutines. + +Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3: + +| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | +|-------------------------------------------|------------|------------|------------|------------|-------------| +| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | +| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | +| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | +| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | +| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | + +Scaling can be expected to be pretty linear until memory bandwidth is saturated. + +For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s + +* 10gb.tar +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +## Dictionaries + +*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for +neither encoding nor decoding. Performance improvements can be expected in the future.* + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +The same dictionary *must* be used for both encoding and decoding. +S2 does not keep track of whether the same dictionary is used, +and using the wrong dictionary will most often not result in an error when decompressing. + +Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. +This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries +and treat the blocks similarly. + +Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), +the same usage scenario applies to S2 dictionaries. + +> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +S2 further limits the dictionary to only be enabled on the first 64KB of a block. +This will remove any negative (speed) impacts of the dictionaries on bigger blocks. + +### Compression + +Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) +and a 64KB dictionary trained with zStandard the following sizes can be achieved. + +| | Default | Better | Best | +|--------------------|------------------|------------------|-----------------------| +| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | +| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | + +So for highly repetitive content, this case provides an almost 3x reduction in size. + +For less uniform data we will use the Go source code tree. +Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: + +| | Default | Better | Best | +|--------------------|-------------------|-------------------|-------------------| +| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | +| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | +| Saving/file | 362 bytes | 428 bytes | 472 bytes | + + +### Creating Dictionaries + +There are no tools to create dictionaries in S2. +However, there are multiple ways to create a useful dictionary: + +#### Using a Sample File + +If your input is very uniform, you can just use a sample file as the dictionary. + +For example in the `github_users_sample_set` above, the average compression only goes up from +10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. + +```Go + // Read a sample + sample, err := os.ReadFile("sample.json") + + // Create a dictionary. + dict := s2.MakeDict(sample, nil) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // To encode: + encoded := dict.Encode(nil, file) + + // To decode: + decoded, err := dict.Decode(nil, file) +``` + +#### Using Zstandard + +Zstandard dictionaries can easily be converted to S2 dictionaries. + +This can be helpful to generate dictionaries for files that don't have a fixed structure. + + +Example, with training set files placed in `./training-set`: + +`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` + +This will create a dictionary of 64KB, that can be converted to a dictionary like this: + +```Go + // Decode the Zstandard dictionary. + insp, err := zstd.InspectDictionary(zdict) + if err != nil { + panic(err) + } + + // We are only interested in the contents. + // Assume that files start with "// Copyright (c) 2023". + // Search for the longest match for that. + // This may save a few bytes. + dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // We can now encode using this dictionary + encodedWithDict := dict.Encode(nil, payload) + + // To decode content: + decoded, err := dict.Decode(nil, encodedWithDict) +``` + +It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. + +This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. + +Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. +This can be omitted, at the expense of a few bytes. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +To decode from any given uncompressed offset `(wantOffset)`: + +* Iterate entries until `entry[n].UncompressedOffset > wantOffset`. +* Start decoding from `entry[n-1].CompressedOffset`. +* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. + +See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. + +Default streaming block size is 1MB. + +# Dictionary Encoding + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +A dictionary provides an initial repeat value that can be used to point to a common header. + +Other than that the dictionary contains values that can be used as back-references. + +Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. + +## Format + +Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). + +Encoding: `[repeat value (uvarint)][dictionary content...]` + +Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. +This value is an offset into the dictionary content and not a back-reference offset, +so setting this to 0 will make the repeat value point to the first value of the dictionary. + +The value must be less than the dictionary length-8 + +## Encoding + +From the decoder point of view the dictionary content is seen as preceding the encoded content. + +`[dictionary content][decoded output]` + +Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. + +Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. +However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. + +The first match can be a repeat value, which will use the repeat offset stored in the dictionary. + +When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, +neither by a copy nor repeat operations. +If the boundary is crossed while copying from the dictionary, the operation should complete, +but the next instruction is not allowed to reference the dictionary. + +Valid blocks encoded *without* a dictionary can be decoded with any dictionary. +There are no checks whether the supplied dictionary is the correct for a block. +Because of this there is no overhead by using a dictionary. + +## Example + +This is the dictionary content. Elements are separated by `[]`. + +Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. + +Initial repeat offset is set at 10, which is the letter `2`. + +Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` + +Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` + +Output: `10 bananas which were brown were added` + + +## Streams + +For streams each block can use the dictionary. + +The dictionary cannot not currently be provided on the stream. + + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 000000000..264ffd0a9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,443 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "strconv" + + "github.com/klauspost/compress/internal/race" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2DecodeDict(dst, src []byte, dict *Dict) int { + if dict == nil { + return s2Decode(dst, src) + } + const debug = false + const debugErrs = debug + + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := len(dict.dict) - dict.repeat + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + startOff := len(dict.dict) - offset + d + if startOff < 0 || startOff+length > len(dict.dict) { + if debugErrs { + fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) + } + copy(dst[d:d+length], dict.dict[startOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + rOff := len(dict.dict) - (offset - d) + if debug { + fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) + } + if rOff+length > len(dict.dict) { + if debugErrs { + fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + if rOff < 0 { + if debugErrs { + fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + copy(dst[d:d+length], dict.dict[rOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + if debugErrs { + fmt.Println("wanted length", len(dst), "got", d) + } + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s new file mode 100644 index 000000000..9b105e03c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -0,0 +1,568 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF + +loop: + // for s < len(src) + CMPQ R_SRC, R_SEND + JEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, R_LEN + CMPL R_LEN, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ R_LEN, $16 + JGT callMemmove + CMPQ R_TMP0, $16 + JLT callMemmove + CMPQ R_TMP1, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ R_LEN, R_TMP0 + JGT errCorrupt + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND + JA errCorrupt + + // case x == 60: + CMPL R_LEN, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(R_SRC), R_LEN + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(R_SRC), R_LEN + JMP doLit + +tagLit62Plus: + CMPL R_LEN, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(R_SRC), R_LEN + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(R_SRC), R_OFF + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(R_SRC), R_OFF + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // length = 4 + int(src[s-2])>>2&0x7 + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF + JLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ R_LEN, $16 + JGT slowForwardCopy + CMPQ R_OFF, $8 + JLT slowForwardCopy + CMPQ R_TMP2, $16 + JLT slowForwardCopy + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 + JGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMPQ R_TMP2, $8 + JGE fixUpSlowForwardCopy + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ R_LEN, $0 + JLE loop + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ R_DST, R_DEND + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s new file mode 100644 index 000000000..78e463f34 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -0,0 +1,574 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF + +loop: + // for s < len(src) + CMP R_SEND, R_SRC + BEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 + MOVW $1, R1 + CMPW R1, R_TMP1 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R_SRC, R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R_LEN + BGT callMemmove + CMP $16, R_TMP0 + BLT callMemmove + CMP $16, R_TMP1 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R_TMP0, R_LEN + BGT errCorrupt + CMP R_TMP1, R_LEN + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() + + // case x == 60: + MOVW $61, R1 + CMPW R1, R_LEN + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R_SRC), R_LEN + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R_SRC), R_LEN + B doLit + +tagLit62Plus: + CMPW $62, R_LEN + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R_SRC), R_LEN + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R_SRC), R_OFF + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R_SRC), R_OFF + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 + BLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + + // if length > len(dst)-d { etc } + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R_LEN + BGT slowForwardCopy + CMP $8, R_OFF + BLT slowForwardCopy + CMP $16, R_TMP2 + BLT slowForwardCopy + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN + BGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMP $8, R_TMP2 + BGE fixUpSlowForwardCopy + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R_LEN + BLE loop + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R_DEND, R_DST + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go new file mode 100644 index 000000000..cb3576edd --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !noasm + +package s2 + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 000000000..2cb55c2c7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,292 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go new file mode 100644 index 000000000..f125ad096 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/dict.go @@ -0,0 +1,350 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "sync" +) + +const ( + // MinDictSize is the minimum dictionary size when repeat has been read. + MinDictSize = 16 + + // MaxDictSize is the maximum dictionary size when repeat has been read. + MaxDictSize = 65536 + + // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. + MaxDictSrcOffset = 65535 +) + +// Dict contains a dictionary that can be used for encoding and decoding s2 +type Dict struct { + dict []byte + repeat int // Repeat as index of dict + + fast, better, best sync.Once + fastTable *[1 << 14]uint16 + + betterTableShort *[1 << 14]uint16 + betterTableLong *[1 << 17]uint16 + + bestTableShort *[1 << 16]uint32 + bestTableLong *[1 << 19]uint32 +} + +// NewDict will read a dictionary. +// It will return nil if the dictionary is invalid. +func NewDict(dict []byte) *Dict { + if len(dict) == 0 { + return nil + } + var d Dict + // Repeat is the first value of the dict + r, n := binary.Uvarint(dict) + if n <= 0 { + return nil + } + dict = dict[n:] + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize || len(dict) > MaxDictSize { + return nil + } + d.repeat = int(r) + if d.repeat > len(dict) { + return nil + } + return &d +} + +// Bytes will return a serialized version of the dictionary. +// The output can be sent to NewDict. +func (d *Dict) Bytes() []byte { + dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) + return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) +} + +// MakeDict will create a dictionary. +// 'data' must be at least MinDictSize. +// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. +// If searchStart is set the start repeat value will be set to the last +// match of this content. +// If no matches are found, it will attempt to find shorter matches. +// This content should match the typical start of a block. +// If at least 4 bytes cannot be matched, repeat is set to start of block. +func MakeDict(data []byte, searchStart []byte) *Dict { + if len(data) == 0 { + return nil + } + if len(data) > MaxDictSize { + data = data[len(data)-MaxDictSize:] + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize { + return nil + } + + // Find the longest match possible, last entry if multiple. + for s := len(searchStart); s > 4; s-- { + if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { + d.repeat = idx + break + } + } + + return &d +} + +// MakeDictManual will create a dictionary. +// 'data' must be at least MinDictSize and less than or equal to MaxDictSize. +// A manual first repeat index into data must be provided. +// It must be less than len(data)-8. +func MakeDictManual(data []byte, firstIdx uint16) *Dict { + if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize { + return nil + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + + d.repeat = int(firstIdx) + return &d +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockDictGo(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBetterDict(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBest(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func (d *Dict) Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2DecodeDict(dst, src[s:], d) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +func (d *Dict) initFast() { + d.fast.Do(func() { + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8-2; i += 3 { + x0 := load64(d.dict, i) + h0 := hash6(x0, tableBits) + h1 := hash6(x0>>8, tableBits) + h2 := hash6(x0>>16, tableBits) + table[h0] = uint16(i) + table[h1] = uint16(i + 1) + table[h2] = uint16(i + 2) + } + d.fastTable = &table + }) +} + +func (d *Dict) initBetter() { + d.better.Do(func() { + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + lTable[hash7(cv, lTableBits)] = uint16(i) + sTable[hash4(cv, sTableBits)] = uint16(i) + } + d.betterTableShort = &sTable + d.betterTableLong = &lTable + }) +} + +func (d *Dict) initBest() { + d.best.Do(func() { + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + lTable[hashL] = uint32(i) | candidateL<<16 + sTable[hashS] = uint32(i) | candidateS<<16 + } + d.bestTableShort = &sTable + d.bestTableLong = &lTable + }) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 000000000..0c9088adf --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,393 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "math" + "math/bits" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EstimateBlockSize will perform a very fast compression +// without outputting the result and return the compressed output size. +// The function returns -1 if no improvement could be achieved. +// Using actual compression will most often produce better compression than the estimate. +func EstimateBlockSize(src []byte) (d int) { + if len(src) <= inputMargin || int64(len(src)) > 0xffffffff { + return -1 + } + if len(src) <= 1024 { + d = calcBlockSizeSmall(src) + } else { + d = calcBlockSize(src) + } + + if d == 0 { + return -1 + } + // Size of the varint encoded block size. + d += (bits.Len64(uint64(len(src))) + 7) / 7 + + if d >= len(src) { + return -1 + } + return d +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src, nil) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits) + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +// Half the size on 32 bit systems. +const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + // Also includes negative. + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + return int(n) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 000000000..997704569 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,1068 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/bits" +) + +func load32(b []byte, i int) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockGo(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + dict.initFast() + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form can start with a dict entry (copy or repeat). + s := 0 + + // Convert dict repeat to offset + repeat := len(dict.dict) - dict.repeat + cv := load64(src, 0) + + // While in dict +searchDict: + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + if nextS > sLimit { + if debug { + fmt.Println("slimit reached", s, nextS) + } + break searchDict + } + candidateDict := int(dict.fastTable[hash0]) + candidateDict2 := int(dict.fastTable[hash1]) + candidate2 := int(table[hash1]) + candidate := int(table[hash0]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + + if repeat > s { + candidate := len(dict.dict) - repeat + s + if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + continue + } + } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + if debug { + fmt.Println("emitted reg repeat", s-base, "s:", s) + } + cv = load64(src, s) + continue searchDict + } + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue searchDict + } + // Start with table. These matches will always be closer. + if uint32(cv) == load32(src, candidate) { + goto emitMatch + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + goto emitMatch + } + + // Check dict. Dicts have longer offsets, so we want longer matches. + if cv == load64(dict.dict, candidateDict) { + table[hash2] = uint32(s + 2) + goto emitDict + } + + candidateDict = int(dict.fastTable[hash2]) + // Check if upper 7 bytes match + if candidateDict2 >= 1 { + if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { + table[hash2] = uint32(s + 2) + candidateDict = candidateDict2 + s++ + goto emitDict + } + } + + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + goto emitMatch + } + if candidateDict >= 2 { + // Check if upper 6 bytes match + if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { + s += 2 + goto emitDict + } + } + + cv = load64(src, nextS) + s = nextS + continue searchDict + + emitDict: + { + if debug { + if load32(dict.dict, candidateDict) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { + candidateDict-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = s + (len(dict.dict)) - candidateDict + + // Extend the 4-byte match as long as possible. + s += 4 + candidateDict += 4 + for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateDict += 8 + } + + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], repeat, s-base) + } else { + // Split to ensure we don't start a copy within next block + d += emitCopy(dst[d:], repeat, 4) + d += emitRepeat(dst[d:], repeat, s-base-4) + } + if false { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index and continue loop to try new candidate. + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>8, tableBits) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s - 1) + cv = load64(src, s) + } + continue + } + emitMatch: + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + if debug { + fmt.Println("non-dict matching at", s, "repeat:", repeat) + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + if debug { + fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 000000000..4f45206a4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,162 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +import "github.com/klauspost/compress/internal/race" + +const hasAmd64Asm = true + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + return encodeBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockAsm8B(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + return encodeBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBetterBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBetterBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBetterBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBetterBlockAsm8B(dst, src) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 000000000..47bac7423 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,796 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + + debug = false + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + sLimitDict := len(src) - inputMargin + if sLimitDict > MaxDictSrcOffset-inputMargin { + sLimitDict = MaxDictSrcOffset - inputMargin + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + repeat := 1 + if dict != nil { + dict.initBest() + s = 0 + repeat = len(dict.dict) - dict.repeat + } + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep, dict bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + if dict != nil && s >= MaxDictSrcOffset { + dict = nil + if repeat > s { + repeat = math.MinInt32 + } + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + matchDict := func(candidate, s int, first uint32, rep bool) match { + if s >= MaxDictSrcOffset { + return match{offset: candidate, s: s} + } + // Calculate offset as if in continuous array with s + offset := -len(dict.dict) + candidate + if best.length != 0 && best.s-best.offset == s-offset && !rep { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + + if load32(dict.dict, candidate) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} + s += 4 + if !rep { + for s < sLimitDict && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } else { + for s < len(src) && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } + m.length -= candidate + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + if s > 0 { + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + } + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) + } + { + if (dict == nil || repeat <= s) && repeat > 0 { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + } else if s-repeat < -4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + candidate++ + best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) + } + + if best.length > 0 { + hashS := hash4(cv>>8, sTableBits) + // s+1 + nextShort := sTable[hashS] + s := s + 1 + cv := load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong := lTable[hashL] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at + 1 + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + + // s+2 + if true { + hashS := hash4(cv>>8, sTableBits) + + nextShort = sTable[hashS] + s++ + cv = load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong = lTable[hashL] + + if (dict == nil || repeat <= s) && repeat > 0 { + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) + } else if repeat-s > 4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + } + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at +2 + // Very small gain + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + } + // Search for a match at best match end, see if that is better. + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning + // Load initial values + cv = load64(src, sBack) + + // Grab candidates... + next := lTable[hash8(load64(src, sAt), lTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep && !best.dict { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + if debug && nextEmit != base { + fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 || best.dict { + if debug { + fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match without dict cannot be a repeat. + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + } else { + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopyNoRepeatSize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + if offset < 2048 { + // Emit 8 bytes, then rest as repeats... + return 2 + emitRepeatSize(offset, length-8) + } + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitCopyNoRepeatSize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeatSize(offset, length int) int { + if offset >= 65536 { + return 5 + 5*(length/64) + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + 3*(length/60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 000000000..544cb1e17 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,1106 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "fmt" + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = (s-nextEmit)>>7 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + dict.initBetter() + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 0 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := len(dict.dict) - dict.repeat + + // While in dict +searchDict: + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + break searchDict + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + dictL := int(dict.betterTableLong[hashL]) + dictS := int(dict.betterTableShort[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if s != 0 { + if cv == valLong { + goto emitMatch + } + if cv == valShort { + candidateL = candidateS + goto emitMatch + } + } + + // Check dict repeat. + if repeat >= s+4 { + candidate := len(dict.dict) - repeat + s + if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + continue + } + } + // Don't try to find match at s==0 + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + goto emitMatch + } + + // Long dict... + if uint32(cv) == load32(dict.dict, dictL) { + candidateL = dictL + goto emitDict + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + // Use our short candidate. + candidateL = candidateS + goto emitMatch + } + if uint32(cv) == load32(dict.dict, dictS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + candidateL = dictS + goto emitDict + } + cv = load64(src, nextS) + s = nextS + } + emitDict: + { + if debug { + if load32(dict.dict, candidateL) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + offset := s + (len(dict.dict)) - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if repeat == offset { + if debug { + fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], offset, s-base) + } else { + // Split to ensure we don't start a copy within next block. + d += emitCopy(dst[d:], offset, 4) + d += emitRepeat(dst[d:], offset, s-base-4) + } + repeat = offset + } + if false { + // Validate match. + if s <= candidateL { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + continue + } + emitMatch: + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + if repeat == offset { + if debug { + fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) + } + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 000000000..6b393c34d --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,729 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "bytes" + "math/bits" +) + +const hasAmd64Asm = false + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} + +// input must be > inputMargin +func calcBlockSize(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 13 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// length must be > inputMargin. +func calcBlockSizeSmall(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 9 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralSize(lit []byte) int { + if len(lit) == 0 { + return 0 + } + switch { + case len(lit) <= 60: + return len(lit) + 1 + case len(lit) <= 1<<8: + return len(lit) + 2 + case len(lit) <= 1<<16: + return len(lit) + 3 + case len(lit) <= 1<<24: + return len(lit) + 4 + default: + return len(lit) + 5 + } +} + +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockAsm should be unreachable") +} + +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockSnappyAsm should be unreachable") +} + +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockAsm should be unreachable") +} + +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockSnappyAsm should be unreachable") +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 000000000..297e41501 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,228 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +package s2 + +func _dummy_() + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int + +// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSize(src []byte) int + +// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 1024 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSizeSmall(src []byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int + +// cvtLZ4Block converts an LZ4 block to S2 +// +//go:noescape +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to S2 +// +//go:noescape +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4Block converts an LZ4 block to Snappy +// +//go:noescape +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to Snappy +// +//go:noescape +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 000000000..2ff5b3340 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,21277 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func _dummy_() +TEXT ·_dummy_(SB), $0 +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + RET + +// func encodeBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + XORQ 8(BX)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm + +matchlen_bsf_16repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match8_repeat_extend_encodeBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeBlockAsm + +matchlen_bsf_8_repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match4_repeat_extend_encodeBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm + JB repeat_extend_forward_end_encodeBlockAsm + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_match1_repeat_extend_encodeBlockAsm: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm + CMPL BX, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm + CMPL BX, $0x0100ffff + JB repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL BX, BX + JZ repeat_end_emit_encodeBlockAsm + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + MOVL SI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +long_offset_short_repeat_as_copy_encodeBlockAsm: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm + CMPL DI, $0x01000000 + JB four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeBlockAsm: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm + +matchlen_bsf_16match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match8_match_nolit_encodeBlockAsm: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeBlockAsm + +matchlen_bsf_8_match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match4_match_nolit_encodeBlockAsm: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm + JB match_nolit_end_encodeBlockAsm + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm + +matchlen_match1_match_nolit_encodeBlockAsm: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeBlockAsm + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + MOVL BX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +long_offset_short_match_nolit_encodeBlockAsm: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm + INCL CX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm4MB(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm4MB: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 4(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm4MB: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm4MB + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + XORQ 8(BX)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_16repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match8_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm4MB + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match4_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm4MB + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm4MB + JB repeat_extend_forward_end_encodeBlockAsm4MB + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match1_repeat_extend_encodeBlockAsm4MB: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm4MB + CMPL BX, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL BX, BX + JZ repeat_end_emit_encodeBlockAsm4MB + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +long_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm4MB + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm4MB + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm4MB + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match8_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm4MB + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match4_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm4MB + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm4MB + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm4MB + JB match_nolit_end_encodeBlockAsm4MB + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm4MB + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_match1_match_nolit_encodeBlockAsm4MB: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm4MB: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm4MB + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm4MB + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +long_offset_short_match_nolit_encodeBlockAsm4MB: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL CX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x18, R10 + IMULQ R8, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm12B + JB three_bytes_repeat_emit_encodeBlockAsm12B + +three_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm12B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + XORQ 8(BX)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match8_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm12B + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match4_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm12B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm12B + JB repeat_extend_forward_end_encodeBlockAsm12B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match1_repeat_extend_encodeBlockAsm12B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm12B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +long_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm12B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm12B + JB three_bytes_match_emit_encodeBlockAsm12B + +three_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm12B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match8_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm12B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match4_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm12B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm12B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm12B + JB match_nolit_end_encodeBlockAsm12B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm12B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_match1_match_nolit_encodeBlockAsm12B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm12B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm12B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +long_offset_short_match_nolit_encodeBlockAsm12B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL CX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm12B + JB three_bytes_emit_remainder_encodeBlockAsm12B + +three_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm10B + JB three_bytes_repeat_emit_encodeBlockAsm10B + +three_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm10B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + XORQ 8(BX)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match8_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm10B + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match4_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm10B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm10B + JB repeat_extend_forward_end_encodeBlockAsm10B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match1_repeat_extend_encodeBlockAsm10B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm10B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +long_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm10B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm10B + JB three_bytes_match_emit_encodeBlockAsm10B + +three_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm10B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match8_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm10B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match4_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm10B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm10B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm10B + JB match_nolit_end_encodeBlockAsm10B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm10B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_match1_match_nolit_encodeBlockAsm10B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm10B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm10B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +long_offset_short_match_nolit_encodeBlockAsm10B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL CX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm10B + JB three_bytes_emit_remainder_encodeBlockAsm10B + +three_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm8B + JB three_bytes_repeat_emit_encodeBlockAsm8B + +three_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm8B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + XORQ 8(BX)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match8_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm8B + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match4_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm8B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm8B + JB repeat_extend_forward_end_encodeBlockAsm8B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match1_repeat_extend_encodeBlockAsm8B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm8B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +long_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm8B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm8B + JB three_bytes_match_emit_encodeBlockAsm8B + +three_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm8B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match8_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm8B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match4_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm8B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm8B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm8B + JB match_nolit_end_encodeBlockAsm8B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm8B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_match1_match_nolit_encodeBlockAsm8B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm8B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm8B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +long_offset_short_match_nolit_encodeBlockAsm8B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm8B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL CX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm8B + JB three_bytes_emit_remainder_encodeBlockAsm8B + +three_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match8_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match4_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm + JB match_nolit_end_encodeBetterBlockAsm + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_match1_match_nolit_encodeBetterBlockAsm: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R11, $0x01 + JA match_length_ok_encodeBetterBlockAsm + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + MOVL DI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +long_offset_short_match_nolit_encodeBetterBlockAsm: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm4MB: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match8_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + JB match_nolit_end_encodeBetterBlockAsm4MB + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R11, $0x01 + JA match_length_ok_encodeBetterBlockAsm4MB + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +long_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + JB match_nolit_end_encodeBetterBlockAsm12B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeBetterBlockAsm12B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm12B + JB three_bytes_match_emit_encodeBetterBlockAsm12B + +three_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +long_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x34, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm12B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeBetterBlockAsm12B + +three_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + JB match_nolit_end_encodeBetterBlockAsm10B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeBetterBlockAsm10B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm10B + JB three_bytes_match_emit_encodeBetterBlockAsm10B + +three_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +long_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x36, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm10B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeBetterBlockAsm10B + +three_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + JB match_nolit_end_encodeBetterBlockAsm8B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeBetterBlockAsm8B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm8B + JB three_bytes_match_emit_encodeBetterBlockAsm8B + +three_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +long_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ DI, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ DI, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R8), R9 + MOVL R9, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R8), R9 + MOVL -4(R8)(DI*1), R8 + MOVL R9, (AX) + MOVL R8, -4(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R12 + SUBQ R9, R12 + DECQ R10 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R12*1), R9 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R9 + ADDQ $0x20, R12 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R12*1), X4 + MOVOU -16(R8)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ DI, R12 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x38, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm8B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeBetterBlockAsm8B + +three_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL BX, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + JB repeat_extend_forward_end_encodeSnappyBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL BX, BX + JZ repeat_end_emit_encodeSnappyBlockAsm + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm + JB match_nolit_end_encodeSnappyBlockAsm + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBlockAsm: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL CX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm64K: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm64K: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K + JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K + +three_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + JB repeat_extend_forward_end_encodeSnappyBlockAsm64K + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBlockAsm64K + +three_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + JB match_nolit_end_encodeSnappyBlockAsm64K + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL CX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x18, R10 + IMULQ R8, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B + +three_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + JB repeat_extend_forward_end_encodeSnappyBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBlockAsm12B + +three_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + JB match_nolit_end_encodeSnappyBlockAsm12B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL CX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B + +three_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + JB repeat_extend_forward_end_encodeSnappyBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBlockAsm10B + +three_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + JB match_nolit_end_encodeSnappyBlockAsm10B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL CX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B + +three_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + JB repeat_extend_forward_end_encodeSnappyBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBlockAsm8B + +three_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + JB match_nolit_end_encodeSnappyBlockAsm8B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL CX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + JB match_nolit_end_encodeSnappyBetterBlockAsm + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL R11, $0x01 + JA match_length_ok_encodeSnappyBetterBlockAsm + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 262168(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 262168(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + JB match_nolit_end_encodeSnappyBetterBlockAsm64K + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K + +three_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x30, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 262168(SP)(R10*4) + MOVL R13, 262168(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x30, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + JB match_nolit_end_encodeSnappyBetterBlockAsm12B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x34, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + JB match_nolit_end_encodeSnappyBetterBlockAsm10B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x36, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R8)(R11*1), R10 + MOVQ 8(R8)(R11*1), R12 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + XORQ 8(R9)(R11*1), R12 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R11), R11 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R11), R11 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + JB match_nolit_end_encodeSnappyBetterBlockAsm8B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x38, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func calcBlockSize(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSize(SB), $32792-32 + XORQ AX, AX + MOVQ $0x00000100, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_calcBlockSize: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSize + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSize: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x33, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSize + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSize + +repeat_extend_back_loop_calcBlockSize: + CMPL SI, BX + JBE repeat_extend_back_end_calcBlockSize + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSize + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSize + +repeat_extend_back_end_calcBlockSize: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 5(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +repeat_dst_size_check_calcBlockSize: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSize + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_calcBlockSize + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_calcBlockSize + +four_bytes_repeat_emit_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_calcBlockSize + +three_bytes_repeat_emit_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_calcBlockSize + +two_bytes_repeat_emit_calcBlockSize: + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_calcBlockSize + JMP memmove_long_repeat_emit_calcBlockSize + +one_byte_repeat_emit_calcBlockSize: + ADDQ $0x01, AX + +memmove_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSize + +memmove_long_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX + +emit_literal_done_repeat_emit_calcBlockSize: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_calcBlockSize: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSize + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_calcBlockSize + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_calcBlockSize + +matchlen_bsf_16repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match8_repeat_extend_calcBlockSize: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSize + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_calcBlockSize + +matchlen_bsf_8_repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match4_repeat_extend_calcBlockSize: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSize + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSize + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_calcBlockSize: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSize + JB repeat_extend_forward_end_calcBlockSize + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSize + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_calcBlockSize + +matchlen_match1_repeat_extend_calcBlockSize: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSize + LEAL 1(R10), R10 + +repeat_extend_forward_end_calcBlockSize: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_calcBlockSize + +four_bytes_loop_back_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_calcBlockSize + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_calcBlockSize + JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize + +four_bytes_remain_repeat_as_copy_calcBlockSize: + TESTL BX, BX + JZ repeat_end_emit_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP repeat_end_emit_calcBlockSize + +two_byte_offset_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSize + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_calcBlockSize + +two_byte_offset_short_repeat_as_copy_calcBlockSize: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSize + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_calcBlockSize + ADDQ $0x02, AX + JMP repeat_end_emit_calcBlockSize + +emit_copy_three_repeat_as_copy_calcBlockSize: + ADDQ $0x03, AX + +repeat_end_emit_calcBlockSize: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSize + +no_repeat_found_calcBlockSize: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSize + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSize + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSize + MOVL 20(SP), CX + JMP search_loop_calcBlockSize + +candidate3_match_calcBlockSize: + ADDL $0x02, CX + JMP candidate_match_calcBlockSize + +candidate2_match_calcBlockSize: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_calcBlockSize: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSize + +match_extend_back_loop_calcBlockSize: + CMPL CX, SI + JBE match_extend_back_end_calcBlockSize + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSize + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSize + JMP match_extend_back_loop_calcBlockSize + +match_extend_back_end_calcBlockSize: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +match_dst_size_check_calcBlockSize: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSize + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_calcBlockSize + CMPL SI, $0x00000100 + JB two_bytes_match_emit_calcBlockSize + CMPL SI, $0x00010000 + JB three_bytes_match_emit_calcBlockSize + CMPL SI, $0x01000000 + JB four_bytes_match_emit_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_match_emit_calcBlockSize + +four_bytes_match_emit_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_match_emit_calcBlockSize + +three_bytes_match_emit_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_match_emit_calcBlockSize + +two_bytes_match_emit_calcBlockSize: + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_match_emit_calcBlockSize + JMP memmove_long_match_emit_calcBlockSize + +one_byte_match_emit_calcBlockSize: + ADDQ $0x01, AX + +memmove_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSize + +memmove_long_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX + +emit_literal_done_match_emit_calcBlockSize: +match_nolit_loop_calcBlockSize: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_calcBlockSize: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSize + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_calcBlockSize + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_calcBlockSize + +matchlen_bsf_16match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_calcBlockSize + +matchlen_match8_match_nolit_calcBlockSize: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSize + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_calcBlockSize + +matchlen_bsf_8_match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSize + +matchlen_match4_match_nolit_calcBlockSize: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSize + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSize + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSize: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSize + JB match_nolit_end_calcBlockSize + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSize + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_calcBlockSize + +matchlen_match1_match_nolit_calcBlockSize: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSize + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSize: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_calcBlockSize + +four_bytes_loop_back_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_calcBlockSize + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_calcBlockSize + JMP four_bytes_loop_back_match_nolit_calcBlockSize + +four_bytes_remain_match_nolit_calcBlockSize: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_calcBlockSize + +two_byte_offset_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSize + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_calcBlockSize + +two_byte_offset_short_match_nolit_calcBlockSize: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSize + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_calcBlockSize + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_calcBlockSize + +emit_copy_three_match_nolit_calcBlockSize: + ADDQ $0x03, AX + +match_nolit_emitcopy_end_calcBlockSize: + CMPL CX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +match_nolit_dst_ok_calcBlockSize: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x33, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x33, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSize + INCL CX + JMP search_loop_calcBlockSize + +emit_remainder_calcBlockSize: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSize: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSize + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JB one_byte_emit_remainder_calcBlockSize + CMPL CX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x00010000 + JB three_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x01000000 + JB four_bytes_emit_remainder_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_calcBlockSize + +four_bytes_emit_remainder_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_calcBlockSize + +three_bytes_emit_remainder_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_calcBlockSize + +two_bytes_emit_remainder_calcBlockSize: + ADDQ $0x02, AX + CMPL CX, $0x40 + JB memmove_emit_remainder_calcBlockSize + JMP memmove_long_emit_remainder_calcBlockSize + +one_byte_emit_remainder_calcBlockSize: + ADDQ $0x01, AX + +memmove_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSize + +memmove_long_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX + +emit_literal_done_emit_remainder_calcBlockSize: + MOVQ AX, ret+24(FP) + RET + +// func calcBlockSizeSmall(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSizeSmall(SB), $2072-32 + XORQ AX, AX + MOVQ $0x00000010, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_calcBlockSizeSmall: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSizeSmall + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSizeSmall: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x37, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSizeSmall + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSizeSmall + +repeat_extend_back_loop_calcBlockSizeSmall: + CMPL SI, BX + JBE repeat_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSizeSmall + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSizeSmall + +repeat_extend_back_end_calcBlockSizeSmall: + MOVL SI, BX + SUBL 12(SP), BX + LEAQ 3(AX)(BX*1), BX + CMPQ BX, (SP) + JB repeat_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +repeat_dst_size_check_calcBlockSizeSmall: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_calcBlockSizeSmall + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSizeSmall + JB three_bytes_repeat_emit_calcBlockSizeSmall + +three_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +two_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_calcBlockSizeSmall + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +one_byte_repeat_emit_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSizeSmall + +memmove_long_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + +emit_literal_done_repeat_emit_calcBlockSizeSmall: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + XORQ 8(BX)(R10*1), R11 + JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_16repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match8_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_8_repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match4_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSizeSmall + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSizeSmall + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSizeSmall + JB repeat_extend_forward_end_calcBlockSizeSmall + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSizeSmall + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match1_repeat_extend_calcBlockSizeSmall: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSizeSmall + LEAL 1(R10), R10 + +repeat_extend_forward_end_calcBlockSizeSmall: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_calcBlockSizeSmall: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall + +two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: + MOVL BX, SI + SHLL $0x02, SI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall + ADDQ $0x02, AX + JMP repeat_end_emit_calcBlockSizeSmall + +emit_copy_three_repeat_as_copy_calcBlockSizeSmall: + ADDQ $0x03, AX + +repeat_end_emit_calcBlockSizeSmall: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSizeSmall + +no_repeat_found_calcBlockSizeSmall: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSizeSmall + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSizeSmall + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSizeSmall + MOVL 20(SP), CX + JMP search_loop_calcBlockSizeSmall + +candidate3_match_calcBlockSizeSmall: + ADDL $0x02, CX + JMP candidate_match_calcBlockSizeSmall + +candidate2_match_calcBlockSizeSmall: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_calcBlockSizeSmall: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSizeSmall + +match_extend_back_loop_calcBlockSizeSmall: + CMPL CX, SI + JBE match_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSizeSmall + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSizeSmall + JMP match_extend_back_loop_calcBlockSizeSmall + +match_extend_back_end_calcBlockSizeSmall: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_dst_size_check_calcBlockSizeSmall: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSizeSmall + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_calcBlockSizeSmall + CMPL SI, $0x00000100 + JB two_bytes_match_emit_calcBlockSizeSmall + JB three_bytes_match_emit_calcBlockSizeSmall + +three_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_match_emit_calcBlockSizeSmall + +two_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_match_emit_calcBlockSizeSmall + JMP memmove_long_match_emit_calcBlockSizeSmall + +one_byte_match_emit_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSizeSmall + +memmove_long_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + +emit_literal_done_match_emit_calcBlockSizeSmall: +match_nolit_loop_calcBlockSizeSmall: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + +matchlen_loopback_16_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSizeSmall + MOVQ (DI)(R9*1), R8 + MOVQ 8(DI)(R9*1), R10 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + XORQ 8(BX)(R9*1), R10 + JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall + LEAL -16(SI), SI + LEAL 16(R9), R9 + JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall + +matchlen_bsf_16match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL 8(R9)(R10*1), R9 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match8_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSizeSmall + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + LEAL -8(SI), SI + LEAL 8(R9), R9 + JMP matchlen_match4_match_nolit_calcBlockSizeSmall + +matchlen_bsf_8_match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match4_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSizeSmall + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSizeSmall + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSizeSmall + JB match_nolit_end_calcBlockSizeSmall + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSizeSmall + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_calcBlockSizeSmall + +matchlen_match1_match_nolit_calcBlockSizeSmall: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSizeSmall + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSizeSmall: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_calcBlockSizeSmall: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_calcBlockSizeSmall + +two_byte_offset_short_match_nolit_calcBlockSizeSmall: + MOVL R9, BX + SHLL $0x02, BX + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSizeSmall + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_calcBlockSizeSmall + +emit_copy_three_match_nolit_calcBlockSizeSmall: + ADDQ $0x03, AX + +match_nolit_emitcopy_end_calcBlockSizeSmall: + CMPL CX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_nolit_dst_ok_calcBlockSizeSmall: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x37, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x37, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSizeSmall + INCL CX + JMP search_loop_calcBlockSizeSmall + +emit_remainder_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JB one_byte_emit_remainder_calcBlockSizeSmall + CMPL CX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSizeSmall + JB three_bytes_emit_remainder_calcBlockSizeSmall + +three_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +two_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL CX, $0x40 + JB memmove_emit_remainder_calcBlockSizeSmall + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +one_byte_emit_remainder_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSizeSmall + +memmove_long_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + +emit_literal_done_emit_remainder_calcBlockSizeSmall: + MOVQ AX, ret+24(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JB one_byte_standalone + CMPL SI, $0x00000100 + JB two_bytes_standalone + CMPL SI, $0x00010000 + JB three_bytes_standalone + CMPL SI, $0x01000000 + JB four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JB repeat_three_standalone + CMPL DX, $0x00010100 + JB repeat_four_standalone + CMPL DX, $0x0100ffff + JB repeat_five_standalone + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone + CMPL DX, $0x40 + JBE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone + CMPL CX, $0x00000800 + JAE long_offset_short_standalone + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB CL, 1(AX) + MOVL CX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + SUBL $0x08, DX + + // emitRepeat + LEAL -4(DX), DX + JMP cant_repeat_two_offset_standalone_emit_copy_short_2b + +emit_repeat_again_standalone_emit_copy_short_2b: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short_2b + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short_2b + +cant_repeat_two_offset_standalone_emit_copy_short_2b: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short_2b + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short_2b + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short_2b + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short_2b + +repeat_five_standalone_emit_copy_short_2b: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short_2b: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short_2b: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short_2b: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +long_offset_short_standalone: + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +two_byte_offset_short_standalone: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JBE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone_snappy + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + +matchlen_loopback_16_standalone: + CMPL DX, $0x10 + JB matchlen_match8_standalone + MOVQ (AX)(SI*1), BX + MOVQ 8(AX)(SI*1), DI + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + XORQ 8(CX)(SI*1), DI + JNZ matchlen_bsf_16standalone + LEAL -16(DX), DX + LEAL 16(SI), SI + JMP matchlen_loopback_16_standalone + +matchlen_bsf_16standalone: +#ifdef GOAMD64_v3 + TZCNTQ DI, DI + +#else + BSFQ DI, DI + +#endif + SARQ $0x03, DI + LEAL 8(SI)(DI*1), SI + JMP gen_match_len_end + +matchlen_match8_standalone: + CMPL DX, $0x08 + JB matchlen_match4_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + LEAL -8(DX), DX + LEAL 8(SI), SI + JMP matchlen_match4_standalone + +matchlen_bsf_8_standalone: +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x01 + JE matchlen_match1_standalone + JB gen_match_len_end + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL 2(SI), SI + SUBL $0x02, DX + JZ gen_match_len_end + +matchlen_match1_standalone: + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET + +// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4_s2_loop: + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ AX, CX + JAE lz4_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4_s2_ll_end + +lz4_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4_s2_ll_loop + +lz4_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x04, R10 + CMPQ R8, BX + JAE lz4_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_s2 + +four_bytes_lz4_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_s2 + +three_bytes_lz4_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_s2 + +two_bytes_lz4_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4_s2 + JMP memmove_long_lz4_s2 + +one_byte_lz4_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 + +emit_lit_memmove_lz4_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4_s2: + MOVQ R11, AX + JMP lz4_s2_lits_emit_done + +memmove_long_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4_s2large_big_loop_back + +emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4_s2_lits_emit_done: + MOVQ R8, DX + +lz4_s2_lits_done: + CMPQ DX, BX + JNE lz4_s2_match + CMPQ R10, $0x04 + JEQ lz4_s2_done + JMP lz4_s2_corrupt + +lz4_s2_match: + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4_s2_corrupt + CMPQ R9, SI + JA lz4_s2_corrupt + CMPQ R10, $0x13 + JNE lz4_s2_ml_done + +lz4_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ R8, $0xff + JEQ lz4_s2_ml_loop + +lz4_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +lz4_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +lz4_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4s_s2_loop: + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ AX, CX + JAE lz4s_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4s_s2_ll_end + +lz4s_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4s_s2_ll_loop + +lz4s_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x03, R10 + CMPQ R8, BX + JAE lz4s_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4s_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4s_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4s_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4s_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4s_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4s_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_s2 + +four_bytes_lz4s_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_s2 + +three_bytes_lz4s_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_s2 + +two_bytes_lz4s_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4s_s2 + JMP memmove_long_lz4s_s2 + +one_byte_lz4s_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 + +emit_lit_memmove_lz4s_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4s_s2: + MOVQ R11, AX + JMP lz4s_s2_lits_emit_done + +memmove_long_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4s_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back + +emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4s_s2_lits_emit_done: + MOVQ R8, DX + +lz4s_s2_lits_done: + CMPQ DX, BX + JNE lz4s_s2_match + CMPQ R10, $0x03 + JEQ lz4s_s2_done + JMP lz4s_s2_corrupt + +lz4s_s2_match: + CMPQ R10, $0x03 + JEQ lz4s_s2_loop + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4s_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4s_s2_corrupt + CMPQ R9, SI + JA lz4s_s2_corrupt + CMPQ R10, $0x12 + JNE lz4s_s2_ml_done + +lz4s_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ R8, $0xff + JEQ lz4s_s2_ml_loop + +lz4s_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4s_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +lz4s_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +lz4s_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4_snappy_loop: + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ AX, CX + JAE lz4_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4_snappy_ll_end + +lz4_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4_snappy_ll_loop + +lz4_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x04, R9 + CMPQ DI, BX + JAE lz4_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_snappy + +four_bytes_lz4_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_snappy + +three_bytes_lz4_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_snappy + +two_bytes_lz4_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4_snappy + JMP memmove_long_lz4_snappy + +one_byte_lz4_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4_snappy: + MOVQ R10, AX + JMP lz4_snappy_lits_emit_done + +memmove_long_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4_snappy_lits_emit_done: + MOVQ DI, DX + +lz4_snappy_lits_done: + CMPQ DX, BX + JNE lz4_snappy_match + CMPQ R9, $0x04 + JEQ lz4_snappy_done + JMP lz4_snappy_corrupt + +lz4_snappy_match: + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4_snappy_corrupt + CMPQ R8, SI + JA lz4_snappy_corrupt + CMPQ R9, $0x13 + JNE lz4_snappy_ml_done + +lz4_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4_snappy_ml_loop + +lz4_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4_snappy_loop + +lz4_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4s_snappy_loop: + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ AX, CX + JAE lz4s_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4s_snappy_ll_end + +lz4s_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4s_snappy_ll_loop + +lz4s_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x03, R9 + CMPQ DI, BX + JAE lz4s_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4s_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4s_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4s_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4s_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4s_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4s_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_snappy + +four_bytes_lz4s_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_snappy + +three_bytes_lz4s_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_snappy + +two_bytes_lz4s_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4s_snappy + JMP memmove_long_lz4s_snappy + +one_byte_lz4s_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4s_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4s_snappy: + MOVQ R10, AX + JMP lz4s_snappy_lits_emit_done + +memmove_long_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4s_snappy_lits_emit_done: + MOVQ DI, DX + +lz4s_snappy_lits_done: + CMPQ DX, BX + JNE lz4s_snappy_match + CMPQ R9, $0x03 + JEQ lz4s_snappy_done + JMP lz4s_snappy_corrupt + +lz4s_snappy_match: + CMPQ R9, $0x03 + JEQ lz4s_snappy_loop + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4s_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4s_snappy_corrupt + CMPQ R8, SI + JA lz4s_snappy_corrupt + CMPQ R9, $0x12 + JNE lz4s_snappy_ml_done + +lz4s_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4s_snappy_ml_loop + +lz4s_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4s_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4s_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4s_snappy_loop + +lz4s_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 000000000..4229957b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,602 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "sort" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 + // If distance is less than this, we do not add the entry. + minIndexDist = 1 << 20 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.uncompressedOffset+minIndexDist > uncompressedOffset { + // Only add entry if distance is large enough. + return nil + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + if len(i.info) > 200 { + n := sort.Search(len(i.info), func(n int) bool { + return i.info[n].uncompressedOffset > offset + }) + if n == 0 { + n = 1 + } + return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + type offset struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []offset `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} + +// RemoveIndexHeaders will trim all headers and trailers from a given index. +// This is expected to save 20 bytes. +// These can be restored using RestoreIndexHeaders. +// This removes a layer of security, but is the most compact representation. +// Returns nil if headers contains errors. +// The returned slice references the provided slice. +func RemoveIndexHeaders(b []byte) []byte { + const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 + if len(b) <= save { + return nil + } + if b[0] != ChunkTypeIndex { + return nil + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return nil + } + b = b[:chunkLen] + + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return nil + } + b = b[len(S2IndexHeader):] + if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { + return nil + } + b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) + + if len(b) < 4 { + return nil + } + return b[:len(b)-4] +} + +// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. +// No error checking is performed on the input. +// If a 0 length slice is sent, it is returned without modification. +func RestoreIndexHeaders(in []byte) []byte { + if len(in) == 0 { + return in + } + b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + b = append(b, in...) + + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + chunkLen := len(b) - skippableFrameHeader + b[1] = uint8(chunkLen >> 0) + b[2] = uint8(chunkLen >> 8) + b[3] = uint8(chunkLen >> 16) + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go new file mode 100644 index 000000000..46ed908e3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go @@ -0,0 +1,585 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// LZ4Converter provides conversion from LZ4 blocks as defined here: +// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md +type LZ4Converter struct { +} + +// ErrDstTooSmall is returned when provided destination is too small. +var ErrDstTooSmall = errors.New("s2: destination too small") + +// ConvertBlock will convert an LZ4 block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4 block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat16(dst []byte, offset uint16, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat16(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint16 +// 4 <= length && length <= math.MaxUint32 +func emitCopy16(dst []byte, offset uint16, length int) int { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat16(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralGo(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go new file mode 100644 index 000000000..000f39719 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go @@ -0,0 +1,467 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "fmt" +) + +// LZ4sConverter provides conversion from LZ4s. +// (Intel modified LZ4 Blocks) +// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf +// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. +// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. +// The LZ4s block returned by the Intel® QAT hardware can be used by an external +// software post-processing to generate other compressed data formats. +// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses +// the same high-level formatting as LZ4 block format with the following encoding changes: +// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. +// ONLY "Min match of 4 bytes" is supported. +type LZ4sConverter struct { +} + +// ConvertBlock will convert an LZ4s block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4s block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go new file mode 100644 index 000000000..8372d752f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -0,0 +1,1075 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "sync" +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil + } +} + +// ReaderIgnoreCRC will make the reader skip CRC calculation and checks. +func ReaderIgnoreCRC() ReaderOption { + return func(r *Reader) error { + r.ignoreCRC = true + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0xff - 0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool + ignoreCRC bool +} + +// GetBufferCapacity returns the capacity of the internal buffer. +// This might be useful to know when reusing the same reader in combination +// with the lazy buffer option. +func (r *Reader) GetBufferCapacity() int { + return cap(r.buf) +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + if cap(r.buf) >= n { + return true + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.blockStart = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("internal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// DecodeConcurrent will decode the full stream to w. +// This function should not be combined with reading, seeking or other operations. +// Up to 'concurrent' goroutines will be used. +// If <= 0, runtime.NumCPU will be used. +// On success the number of bytes decompressed nil and is returned. +// This is mainly intended for bigger streams. +func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) { + if r.i > 0 || r.j > 0 || r.blockStart > 0 { + return 0, errors.New("DecodeConcurrent called after ") + } + if concurrent <= 0 { + concurrent = runtime.NumCPU() + } + + // Write to output + var errMu sync.Mutex + var aErr error + setErr := func(e error) (ok bool) { + errMu.Lock() + defer errMu.Unlock() + if e == nil { + return aErr == nil + } + if aErr == nil { + aErr = e + } + return false + } + hasErr := func() (ok bool) { + errMu.Lock() + v := aErr != nil + errMu.Unlock() + return v + } + + var aWritten int64 + toRead := make(chan []byte, concurrent) + writtenBlocks := make(chan []byte, concurrent) + queue := make(chan chan []byte, concurrent) + reUse := make(chan chan []byte, concurrent) + for i := 0; i < concurrent; i++ { + toRead <- make([]byte, 0, r.maxBufSize) + writtenBlocks <- make([]byte, 0, r.maxBufSize) + reUse <- make(chan []byte, 1) + } + // Writer + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for toWrite := range queue { + entry := <-toWrite + reUse <- toWrite + if hasErr() || entry == nil { + if entry != nil { + writtenBlocks <- entry + } + continue + } + if hasErr() { + writtenBlocks <- entry + continue + } + n, err := w.Write(entry) + want := len(entry) + writtenBlocks <- entry + if err != nil { + setErr(err) + continue + } + if n != want { + setErr(io.ErrShortWrite) + continue + } + aWritten += int64(n) + } + }() + + defer func() { + if r.err != nil { + setErr(r.err) + } else if err != nil { + setErr(err) + } + close(queue) + wg.Wait() + if err == nil { + err = aErr + } + written = aWritten + }() + + // Reader + for !hasErr() { + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = nil + } + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + orgBuf := <-toRead + buf := orgBuf[:chunkLen] + + if !r.readFull(buf, false) { + return 0, r.err + } + + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + wg.Add(1) + + decoded := <-writtenBlocks + entry := <-reUse + queue <- entry + go func() { + defer wg.Done() + decoded = decoded[:n] + _, err := Decode(decoded, buf) + toRead <- orgBuf + if err != nil { + writtenBlocks <- decoded + setErr(err) + entry <- nil + return + } + if !r.ignoreCRC && crc(decoded) != checksum { + writtenBlocks <- decoded + setErr(ErrCRC) + entry <- nil + return + } + entry <- decoded + }() + continue + + case chunkTypeUncompressedData: + + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + // Grab write buffer + orgBuf := <-writtenBlocks + buf := orgBuf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read content. + n := chunkLen - checksumSize + + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + // Read uncompressed + buf = orgBuf[:n] + if !r.readFull(buf, false) { + return 0, r.err + } + + if !r.ignoreCRC && crc(buf) != checksum { + r.err = ErrCRC + return 0, r.err + } + entry := <-reUse + queue <- entry + entry <- buf + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } + return 0, r.err +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + tmp := int64(r.i) + n + if tmp > math.MaxInt32 { + return errors.New("s2: internal overflow in skip") + } + r.i = int(tmp) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + r.blockStart += int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader + readAtMu sync.Mutex +} + +// ReadSeeker will return an io.ReadSeeker and io.ReaderAt +// compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The ReadAt position will affect regular reads and the current position of Seek. +// So using Read after ReadAt will continue from where the ReadAt stopped. +// No functions should be used concurrently. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + // If we don't require random seeking, reset input and return. + if !random { + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()} + } + r.index = nil + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil + } + + // Calculate absolute offset. + absOffset := offset + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + absOffset = r.blockStart + int64(r.i) + offset + case io.SeekEnd: + if r.index == nil { + return 0, ErrUnsupported + } + absOffset = r.index.TotalUncompressed + offset + default: + r.err = ErrUnsupported + return 0, r.err + } + + if absOffset < 0 { + return 0, errors.New("seek before start of file") + } + + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + if r.err != nil { + return 0, r.err + } + } + + // If we are inside current block no need to seek. + // This includes no offset changes. + if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { + r.i = int(absOffset - r.blockStart) + return r.blockStart + int64(r.i), nil + } + + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + currOffset := r.blockStart + int64(r.i) + if absOffset >= currOffset { + err := r.Skip(absOffset - currOffset) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + } + + // We can seek and we have an index. + c, u, err := r.index.Find(absOffset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + r.i = r.j // Remove rest of current block. + r.blockStart = u - int64(r.j) // Adjust current block start for accounting. + if u < absOffset { + // Forward inside block + return absOffset, r.Skip(absOffset - u) + } + if u > absOffset { + return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) + } + return absOffset, nil +} + +// ReadAt reads len(p) bytes into p starting at offset off in the +// underlying input source. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +// +// When ReadAt returns n < len(p), it returns a non-nil error +// explaining why more bytes were not returned. In this respect, +// ReadAt is stricter than Read. +// +// Even if ReadAt returns n < len(p), it may use all of p as scratch +// space during the call. If some data is available but not len(p) bytes, +// ReadAt blocks until either all the data is available or an error occurs. +// In this respect ReadAt is different from Read. +// +// If the n = len(p) bytes returned by ReadAt are at the end of the +// input source, ReadAt may return either err == EOF or err == nil. +// +// If ReadAt is reading from an input source with a seek offset, +// ReadAt should not affect nor be affected by the underlying +// seek offset. +// +// Clients of ReadAt can execute parallel ReadAt calls on the +// same input source. This is however not recommended. +func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { + r.readAtMu.Lock() + defer r.readAtMu.Unlock() + _, err := r.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + n := 0 + for n < len(p) { + n2, err := r.Read(p[n:]) + if err != nil { + // This will include io.EOF + return n + n2, err + } + n += n2 + } + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for i := 0; i < 10; i++ { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id >= chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 000000000..cbd1ed64d --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" + + "github.com/klauspost/compress/internal/race" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + crcTable = crc32.MakeTable(crc32.Castagnoli) + magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. + magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. +) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + race.ReadSlice(b) + + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go new file mode 100644 index 000000000..0a46f2b98 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -0,0 +1,1039 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/internal/race" +) + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() interface{} { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + customEnc func(dst, src []byte) int + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + level uint8 +} + +type result struct { + b []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.AsyncFlush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(b) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + return write(data) + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// If you cannot control that, use the regular Write function. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + go func() { + race.ReadSlice(uncompressed) + + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.customEnc != nil { + if ret := w.customEnc(obuf, uncompressed); ret >= 0 { + return ret + } + } + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed, nil) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write(magicChunkSnappyBytes) + } else { + n, err = w.writer.Write(magicChunkBytes) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// AsyncFlush writes any buffered bytes to a block and starts compressing it. +// It does not wait for the output has been written as Flush() does. +func (w *Writer) AsyncFlush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + return w.err(nil) +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.AsyncFlush(); err != nil { + return err + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(err) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +var errClosed = errors.New("s2: Writer is closed") + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} + +// WriterCustomEncoder allows to override the encoder for blocks on the stream. +// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. +// Block size (initial varint) should not be added by the encoder. +// Returning value 0 indicates the block could not be compressed. +// Returning a negative value indicates that compression should be attempted. +// The function should expect to be called concurrently. +func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { + return func(w *Writer) error { + w.customEnc = fn + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 000000000..042091d9b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 000000000..52ccb5a93 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 000000000..ea6524ddd --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md new file mode 100644 index 000000000..8271bbd09 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README.md @@ -0,0 +1,17 @@ +# snappy + +The Snappy compression format in the Go programming language. + +This is a drop-in replacement for `github.com/golang/snappy`. + +It provides a full, compatible replacement of the Snappy package by simply changing imports. + +See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation. + +"Better" compression mode is used. For buffered streams concurrent compression is used. + +For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2). + +# usage + +Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`. diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 000000000..89f1fa234 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,60 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = s2.ErrCorrupt + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = s2.ErrTooLarge + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = s2.ErrUnsupported +) + +const ( + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + return s2.DecodedLen(src) +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + return s2.Decode(dst, src) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize)) +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader = s2.Reader diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 000000000..e8bd72c18 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + return s2.EncodeSnappyBetter(dst, src) +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + return s2.MaxEncodedLen(srcLen) +} + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1)) +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression()) +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer = s2.Writer diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 000000000..398cdc95a --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,46 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ diff --git a/vendor/github.com/klauspost/cpuid/v2/.gitignore b/vendor/github.com/klauspost/cpuid/v2/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml new file mode 100644 index 000000000..944cc0007 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/.goreleaser.yml @@ -0,0 +1,74 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com + +builds: + - + id: "cpuid" + binary: cpuid + main: ./cmd/cpuid/main.go + env: + - CGO_ENABLED=0 + flags: + - -ldflags=-s -w + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm64 + goarm: + - 7 + +archives: + - + id: cpuid + name_template: "cpuid-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - LICENSE +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "cpuid_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/cpuid + maintainer: Klaus Post + description: CPUID Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt new file mode 100644 index 000000000..2ef4714f7 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -0,0 +1,35 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/v2/LICENSE b/vendor/github.com/klauspost/cpuid/v2/LICENSE new file mode 100644 index 000000000..5cec7ee94 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md new file mode 100644 index 000000000..21508edbd --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -0,0 +1,498 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) +[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml) + +## installing + +`go get -u github.com/klauspost/cpuid/v2` using modules. +Drop `v2` for others. + +Installing binary: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +Or download binaries from release page: https://github.com/klauspost/cpuid/releases + +### Homebrew + +For macOS/Linux users, you can install via [brew](https://brew.sh/) + +```sh +$ brew install cpuid +``` + +## example + +```Go +package main + +import ( + "fmt" + "strings" + + . "github.com/klauspost/cpuid/v2" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model, "Vendor ID:", CPU.VendorID) + fmt.Println("Features:", strings.Join(CPU.FeatureSet(), ",")) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + fmt.Println("L1 Data Cache:", CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + fmt.Println("L2 Cache:", CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", CPU.Cache.L3, "bytes") + fmt.Println("Frequency", CPU.Hz, "hz") + + // Test if we have these specific features: + if CPU.Supports(SSE, SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: AMD Ryzen 9 3950X 16-Core Processor +PhysicalCores: 16 +ThreadsPerCore: 2 +LogicalCores: 32 +Family 23 Model: 113 Vendor ID: AMD +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CMOV,CX16,F16C,FMA3,HTT,HYPERVISOR,LZCNT,MMX,MMXEXT,NX,POPCNT,RDRAND,RDSEED,RDTSCP,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3 +Cacheline bytes: 64 +L1 Data Cache: 32768 bytes +L1 Instruction Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes +Frequency 0 hz +We have Streaming SIMD 2 Extensions +``` + +# usage + +The `cpuid.CPU` provides access to CPU features. Use `cpuid.CPU.Supports()` to check for CPU features. +A faster `cpuid.CPU.Has()` is provided which will usually be inlined by the gc compiler. + +To test a larger number of features, they can be combined using `f := CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SYSCALL, SSE, SSE2)`, etc. +This can be using with `cpuid.CPU.HasAll(f)` to quickly test if all features are supported. + +Note that for some cpu/os combinations some features will not be detected. +`amd64` has rather good support and should work reliably on all platforms. + +Note that hypervisors may not pass through all CPU features through to the guest OS, +so even if your host supports a feature it may not be visible on guests. + +## arm64 feature detection + +Not all operating systems provide ARM features directly +and there is no safe way to do so for the rest. + +Currently `arm64/linux` and `arm64/freebsd` should be quite reliable. +`arm64/darwin` adds features expected from the M1 processor, but a lot remains undetected. + +A `DetectARM()` can be used if you are able to control your deployment, +it will detect CPU features, but may crash if the OS doesn't intercept the calls. +A `-cpu.arm` flag for detecting unsafe ARM features can be added. See below. + +Note that currently only features are detected on ARM, +no additional information is currently available. + +## flags + +It is possible to add flags that affects cpu detection. + +For this the `Flags()` command is provided. + +This must be called *before* `flag.Parse()` AND after the flags have been parsed `Detect()` must be called. + +This means that any detection used in `init()` functions will not contain these flags. + +Example: + +```Go +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/klauspost/cpuid/v2" +) + +func main() { + cpuid.Flags() + flag.Parse() + cpuid.Detect() + + // Test if we have these specific features: + if cpuid.CPU.Supports(cpuid.SSE, cpuid.SSE2) { + fmt.Println("We have Streaming SIMD 2 Extensions") + } +} +``` + +## commandline + +Download as binary from: https://github.com/klauspost/cpuid/releases + +Install from source: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +### Example + +``` +λ cpuid +Name: AMD Ryzen 9 3950X 16-Core Processor +Vendor String: AuthenticAMD +Vendor ID: AMD +PhysicalCores: 16 +Threads Per Core: 2 +Logical Cores: 32 +CPU Family 23 Model: 113 +Features: ADX,AESNI,AVX,AVX2,BMI1,BMI2,CLMUL,CLZERO,CMOV,CMPXCHG8,CPBOOST,CX16,F16C,FMA3,FXSR,FXSROPT,HTT,HYPERVISOR,LAHF,LZCNT,MCAOVERFLOW,MMX,MMXEXT,MOVBE,NX,OSXSAVE,POPCNT,RDRAND,RDSEED,RDTSCP,SCE,SHA,SSE,SSE2,SSE3,SSE4,SSE42,SSE4A,SSSE3,SUCCOR,X87,XSAVE +Microarchitecture level: 3 +Cacheline bytes: 64 +L1 Instruction Cache: 32768 bytes +L1 Data Cache: 32768 bytes +L2 Cache: 524288 bytes +L3 Cache: 16777216 bytes + +``` +### JSON Output: + +``` +λ cpuid --json +{ + "BrandName": "AMD Ryzen 9 3950X 16-Core Processor", + "VendorID": 2, + "VendorString": "AuthenticAMD", + "PhysicalCores": 16, + "ThreadsPerCore": 2, + "LogicalCores": 32, + "Family": 23, + "Model": 113, + "CacheLine": 64, + "Hz": 0, + "BoostFreq": 0, + "Cache": { + "L1I": 32768, + "L1D": 32768, + "L2": 524288, + "L3": 16777216 + }, + "SGX": { + "Available": false, + "LaunchControl": false, + "SGX1Supported": false, + "SGX2Supported": false, + "MaxEnclaveSizeNot64": 0, + "MaxEnclaveSize64": 0, + "EPCSections": null + }, + "Features": [ + "ADX", + "AESNI", + "AVX", + "AVX2", + "BMI1", + "BMI2", + "CLMUL", + "CLZERO", + "CMOV", + "CMPXCHG8", + "CPBOOST", + "CX16", + "F16C", + "FMA3", + "FXSR", + "FXSROPT", + "HTT", + "HYPERVISOR", + "LAHF", + "LZCNT", + "MCAOVERFLOW", + "MMX", + "MMXEXT", + "MOVBE", + "NX", + "OSXSAVE", + "POPCNT", + "RDRAND", + "RDSEED", + "RDTSCP", + "SCE", + "SHA", + "SSE", + "SSE2", + "SSE3", + "SSE4", + "SSE42", + "SSE4A", + "SSSE3", + "SUCCOR", + "X87", + "XSAVE" + ], + "X64Level": 3 +} +``` + +### Check CPU microarch level + +``` +λ cpuid --check-level=3 +2022/03/18 17:04:40 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:04:40 Microarchitecture level 3 is supported. Max level is 3. +Exit Code 0 + +λ cpuid --check-level=4 +2022/03/18 17:06:18 AMD Ryzen 9 3950X 16-Core Processor +2022/03/18 17:06:18 Microarchitecture level 4 not supported. Max level is 3. +Exit Code 1 +``` + + +## Available flags + +### x86 & amd64 + +| Feature Flag | Description | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ADX | Intel ADX (Multi-Precision Add-Carry Instruction Extensions) | +| AESNI | Advanced Encryption Standard New Instructions | +| AMD3DNOW | AMD 3DNOW | +| AMD3DNOWEXT | AMD 3DNowExt | +| AMXBF16 | Tile computational operations on BFLOAT16 numbers | +| AMXINT8 | Tile computational operations on 8-bit integers | +| AMXFP16 | Tile computational operations on FP16 numbers | +| AMXTILE | Tile architecture | +| APX_F | Intel APX | +| AVX | AVX functions | +| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | +| AVX10_128 | If set indicates that AVX10 128-bit vector support is present | +| AVX10_256 | If set indicates that AVX10 256-bit vector support is present | +| AVX10_512 | If set indicates that AVX10 512-bit vector support is present | +| AVX2 | AVX2 functions | +| AVX512BF16 | AVX-512 BFLOAT16 Instructions | +| AVX512BITALG | AVX-512 Bit Algorithms | +| AVX512BW | AVX-512 Byte and Word Instructions | +| AVX512CD | AVX-512 Conflict Detection Instructions | +| AVX512DQ | AVX-512 Doubleword and Quadword Instructions | +| AVX512ER | AVX-512 Exponential and Reciprocal Instructions | +| AVX512F | AVX-512 Foundation | +| AVX512FP16 | AVX-512 FP16 Instructions | +| AVX512IFMA | AVX-512 Integer Fused Multiply-Add Instructions | +| AVX512PF | AVX-512 Prefetch Instructions | +| AVX512VBMI | AVX-512 Vector Bit Manipulation Instructions | +| AVX512VBMI2 | AVX-512 Vector Bit Manipulation Instructions, Version 2 | +| AVX512VL | AVX-512 Vector Length Extensions | +| AVX512VNNI | AVX-512 Vector Neural Network Instructions | +| AVX512VP2INTERSECT | AVX-512 Intersect for D/Q | +| AVX512VPOPCNTDQ | AVX-512 Vector Population Count Doubleword and Quadword | +| AVXIFMA | AVX-IFMA instructions | +| AVXNECONVERT | AVX-NE-CONVERT instructions | +| AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | +| AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | +| AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| AVXVNNIINT16 | AVX-VNNI-INT16 instructions | +| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | +| BMI1 | Bit Manipulation Instruction Set 1 | +| BMI2 | Bit Manipulation Instruction Set 2 | +| CETIBT | Intel CET Indirect Branch Tracking | +| CETSS | Intel CET Shadow Stack | +| CLDEMOTE | Cache Line Demote | +| CLMUL | Carry-less Multiplication | +| CLZERO | CLZERO instruction supported | +| CMOV | i686 CMOV | +| CMPCCXADD | CMPCCXADD instructions | +| CMPSB_SCADBS_SHORT | Fast short CMPSB and SCASB | +| CMPXCHG8 | CMPXCHG8 instruction | +| CPBOOST | Core Performance Boost | +| CPPC | AMD: Collaborative Processor Performance Control | +| CX16 | CMPXCHG16B Instruction | +| EFER_LMSLE_UNS | AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ | +| ENQCMD | Enqueue Command | +| ERMS | Enhanced REP MOVSB/STOSB | +| F16C | Half-precision floating-point conversion | +| FLUSH_L1D | Flush L1D cache | +| FMA3 | Intel FMA 3. Does not imply AVX. | +| FMA4 | Bulldozer FMA4 functions | +| FP128 | AMD: When set, the internal FP/SIMD execution datapath is 128-bits wide | +| FP256 | AMD: When set, the internal FP/SIMD execution datapath is 256-bits wide | +| FSRM | Fast Short Rep Mov | +| FXSR | FXSAVE, FXRESTOR instructions, CR4 bit 9 | +| FXSROPT | FXSAVE/FXRSTOR optimizations | +| GFNI | Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. | +| HLE | Hardware Lock Elision | +| HRESET | If set CPU supports history reset and the IA32_HRESET_ENABLE MSR | +| HTT | Hyperthreading (enabled) | +| HWA | Hardware assert supported. Indicates support for MSRC001_10 | +| HYBRID_CPU | This part has CPUs of more than one type. | +| HYPERVISOR | This bit has been reserved by Intel & AMD for use by hypervisors | +| IA32_ARCH_CAP | IA32_ARCH_CAPABILITIES MSR (Intel) | +| IA32_CORE_CAP | IA32_CORE_CAPABILITIES MSR | +| IBPB | Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) | +| IBRS | AMD: Indirect Branch Restricted Speculation | +| IBRS_PREFERRED | AMD: IBRS is preferred over software solution | +| IBRS_PROVIDES_SMP | AMD: IBRS provides Same Mode Protection | +| IBS | Instruction Based Sampling (AMD) | +| IBSBRNTRGT | Instruction Based Sampling Feature (AMD) | +| IBSFETCHSAM | Instruction Based Sampling Feature (AMD) | +| IBSFFV | Instruction Based Sampling Feature (AMD) | +| IBSOPCNT | Instruction Based Sampling Feature (AMD) | +| IBSOPCNTEXT | Instruction Based Sampling Feature (AMD) | +| IBSOPSAM | Instruction Based Sampling Feature (AMD) | +| IBSRDWROPCNT | Instruction Based Sampling Feature (AMD) | +| IBSRIPINVALIDCHK | Instruction Based Sampling Feature (AMD) | +| IBS_FETCH_CTLX | AMD: IBS fetch control extended MSR supported | +| IBS_OPDATA4 | AMD: IBS op data 4 MSR supported | +| IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | +| IBS_PREVENTHOST | Disallowing IBS use by the host supported | +| IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| IDPRED_CTRL | IPRED_DIS | +| INT_WBINVD | WBINVD/WBNOINVD are interruptible. | +| INVLPGB | NVLPGB and TLBSYNC instruction supported | +| KEYLOCKER | Key locker | +| KEYLOCKERW | Key locker wide | +| LAHF | LAHF/SAHF in long mode | +| LAM | If set, CPU supports Linear Address Masking | +| LBRVIRT | LBR virtualization | +| LZCNT | LZCNT instruction | +| MCAOVERFLOW | MCA overflow recovery support. | +| MCDT_NO | Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. | +| MCOMMIT | MCOMMIT instruction supported | +| MD_CLEAR | VERW clears CPU buffers | +| MMX | standard MMX | +| MMXEXT | SSE integer functions or AMD MMX ext | +| MOVBE | MOVBE instruction (big-endian) | +| MOVDIR64B | Move 64 Bytes as Direct Store | +| MOVDIRI | Move Doubleword as Direct Store | +| MOVSB_ZL | Fast Zero-Length MOVSB | +| MPX | Intel MPX (Memory Protection Extensions) | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MSRIRC | Instruction Retired Counter MSR available | +| MSRLIST | Read/Write List of Model Specific Registers | +| MSR_PAGEFLUSH | Page Flush MSR available | +| NRIPS | Indicates support for NRIP save on VMEXIT | +| NX | NX (No-Execute) bit | +| OSXSAVE | XSAVE enabled by OS | +| PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | +| POPCNT | POPCNT instruction | +| PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | Predictive Store Forward Disable | +| RDPRU | RDPRU instruction supported | +| RDRAND | RDRAND instruction is available | +| RDSEED | RDSEED instruction is available | +| RDTSCP | RDTSCP Instruction | +| RRSBA_CTRL | Restricted RSB Alternate | +| RTM | Restricted Transactional Memory | +| RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | +| SERIALIZE | Serialize Instruction Execution | +| SEV | AMD Secure Encrypted Virtualization supported | +| SEV_64BIT | AMD SEV guest execution only allowed from a 64-bit host | +| SEV_ALTERNATIVE | AMD SEV Alternate Injection supported | +| SEV_DEBUGSWAP | Full debug state swap supported for SEV-ES guests | +| SEV_ES | AMD SEV Encrypted State supported | +| SEV_RESTRICTED | AMD SEV Restricted Injection supported | +| SEV_SNP | AMD SEV Secure Nested Paging supported | +| SGX | Software Guard Extensions | +| SGXLC | Software Guard Extensions Launch Control | +| SHA | Intel SHA Extensions | +| SME | AMD Secure Memory Encryption supported | +| SME_COHERENT | AMD Hardware cache coherency across encryption domains enforced | +| SPEC_CTRL_SSBD | Speculative Store Bypass Disable | +| SRBDS_CTRL | SRBDS mitigation MSR available | +| SSE | SSE functions | +| SSE2 | P4 SSE functions | +| SSE3 | Prescott SSE3 functions | +| SSE4 | Penryn SSE4.1 functions | +| SSE42 | Nehalem SSE4.2 functions | +| SSE4A | AMD Barcelona microarchitecture SSE4a instructions | +| SSSE3 | Conroe SSSE3 functions | +| STIBP | Single Thread Indirect Branch Predictors | +| STIBP_ALWAYSON | AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On | +| STOSB_SHORT | Fast short STOSB | +| SUCCOR | Software uncorrectable error containment and recovery capability. | +| SVM | AMD Secure Virtual Machine | +| SVMDA | Indicates support for the SVM decode assists. | +| SVMFBASID | SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control | +| SVML | AMD SVM lock. Indicates support for SVM-Lock. | +| SVMNP | AMD SVM nested paging | +| SVMPF | SVM pause intercept filter. Indicates support for the pause intercept filter | +| SVMPFT | SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold | +| SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | +| SYSEE | SYSENTER and SYSEXIT instructions | +| TBM | AMD Trailing Bit Manipulation | +| TDX_GUEST | Intel Trust Domain Extensions Guest | +| TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | +| TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | +| TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | +| TSCRATEMSR | MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 | +| TSXLDTRK | Intel TSX Suspend Load Address Tracking | +| VAES | Vector AES. AVX(512) versions requires additional checks. | +| VMCBCLEAN | VMCB clean bits. Indicates support for VMCB clean bits. | +| VMPL | AMD VM Permission Levels supported | +| VMSA_REGPROT | AMD VMSA Register Protection supported | +| VMX | Virtual Machine Extensions | +| VPCLMULQDQ | Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. | +| VTE | AMD Virtual Transparent Encryption supported | +| WAITPKG | TPAUSE, UMONITOR, UMWAIT | +| WBNOINVD | Write Back and Do Not Invalidate Cache | +| WRMSRNS | Non-Serializing Write to Model Specific Register | +| X87 | FPU | +| XGETBV1 | Supports XGETBV with ECX = 1 | +| XOP | Bulldozer XOP functions | +| XSAVE | XSAVE, XRESTOR, XSETBV, XGETBV | +| XSAVEC | Supports XSAVEC and the compacted form of XRSTOR. | +| XSAVEOPT | XSAVEOPT available | +| XSAVES | Supports XSAVES/XRSTORS and IA32_XSS | + +# ARM features: + +| Feature Flag | Description | +|--------------|------------------------------------------------------------------| +| AESARM | AES instructions | +| ARMCPUID | Some CPU ID registers readable at user-level | +| ASIMD | Advanced SIMD | +| ASIMDDP | SIMD Dot Product | +| ASIMDHP | Advanced SIMD half-precision floating point | +| ASIMDRDM | Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) | +| ATOMICS | Large System Extensions (LSE) | +| CRC32 | CRC32/CRC32C instructions | +| DCPOP | Data cache clean to Point of Persistence (DC CVAP) | +| EVTSTRM | Generic timer | +| FCMA | Floatin point complex number addition and multiplication | +| FP | Single-precision and double-precision floating point | +| FPHP | Half-precision floating point | +| GPA | Generic Pointer Authentication | +| JSCVT | Javascript-style double->int convert (FJCVTZS) | +| LRCPC | Weaker release consistency (LDAPR, etc) | +| PMULL | Polynomial Multiply instructions (PMULL/PMULL2) | +| SHA1 | SHA-1 instructions (SHA1C, etc) | +| SHA2 | SHA-2 instructions (SHA256H, etc) | +| SHA3 | SHA-3 instructions (EOR3, RAXI, XAR, BCAX) | +| SHA512 | SHA512 instructions | +| SM3 | SM3 instructions | +| SM4 | SM4 instructions | +| SVE | Scalable Vector Extension | + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go new file mode 100644 index 000000000..53bc18ca7 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -0,0 +1,1516 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) as well as arm64 is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import ( + "flag" + "fmt" + "math" + "math/bits" + "os" + "runtime" + "strings" +) + +// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf +// and Processor Programming Reference (PPR) + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + VendorUnknown Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM + Bhyve + Hygon + SiS + RDC + + Ampere + ARM + Broadcom + Cavium + DEC + Fujitsu + Infineon + Motorola + NVIDIA + AMCC + Qualcomm + Marvell + + lastVendor +) + +//go:generate stringer -type=FeatureID,Vendor + +// FeatureID is the ID of a specific cpu feature. +type FeatureID int + +const ( + // Keep index -1 as unknown + UNKNOWN = -1 + + // x86 features + ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + AESNI // Advanced Encryption Standard New Instructions + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + AMXBF16 // Tile computational operations on BFLOAT16 numbers + AMXFP16 // Tile computational operations on FP16 numbers + AMXINT8 // Tile computational operations on 8-bit integers + AMXTILE // Tile architecture + APX_F // Intel APX + AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512FP16 // AVX-512 FP16 Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + AVXIFMA // AVX-IFMA instructions + AVXNECONVERT // AVX-NE-CONVERT instructions + AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one + AVXVNNI // AVX (VEX encoded) VNNI neural network instructions + AVXVNNIINT8 // AVX-VNNI-INT8 instructions + AVXVNNIINT16 // AVX-VNNI-INT16 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + CETIBT // Intel CET Indirect Branch Tracking + CETSS // Intel CET Shadow Stack + CLDEMOTE // Cache Line Demote + CLMUL // Carry-less Multiplication + CLZERO // CLZERO instruction supported + CMOV // i686 CMOV + CMPCCXADD // CMPCCXADD instructions + CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB + CMPXCHG8 // CMPXCHG8 instruction + CPBOOST // Core Performance Boost + CPPC // AMD: Collaborative Processor Performance Control + CX16 // CMPXCHG16B Instruction + EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ + ENQCMD // Enqueue Command + ERMS // Enhanced REP MOVSB/STOSB + F16C // Half-precision floating-point conversion + FLUSH_L1D // Flush L1D cache + FMA3 // Intel FMA 3. Does not imply AVX. + FMA4 // Bulldozer FMA4 functions + FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide + FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide + FSRM // Fast Short Rep Mov + FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 + FXSROPT // FXSAVE/FXRSTOR optimizations + GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. + HLE // Hardware Lock Elision + HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR + HTT // Hyperthreading (enabled) + HWA // Hardware assert supported. Indicates support for MSRC001_10 + HYBRID_CPU // This part has CPUs of more than one type. + HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors + IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) + IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR + IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) + IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor + IBRS // AMD: Indirect Branch Restricted Speculation + IBRS_PREFERRED // AMD: IBRS is preferred over software solution + IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection + IBS // Instruction Based Sampling (AMD) + IBSBRNTRGT // Instruction Based Sampling Feature (AMD) + IBSFETCHSAM // Instruction Based Sampling Feature (AMD) + IBSFFV // Instruction Based Sampling Feature (AMD) + IBSOPCNT // Instruction Based Sampling Feature (AMD) + IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) + IBSOPSAM // Instruction Based Sampling Feature (AMD) + IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) + IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) + IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported + IBS_OPDATA4 // AMD: IBS op data 4 MSR supported + IBS_OPFUSE // AMD: Indicates support for IbsOpFuse + IBS_PREVENTHOST // Disallowing IBS use by the host supported + IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS + INT_WBINVD // WBINVD/WBNOINVD are interruptible. + INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide + LAHF // LAHF/SAHF in long mode + LAM // If set, CPU supports Linear Address Masking + LBRVIRT // LBR virtualization + LZCNT // LZCNT instruction + MCAOVERFLOW // MCA overflow recovery support. + MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. + MCOMMIT // MCOMMIT instruction supported + MD_CLEAR // VERW clears CPU buffers + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + MOVBE // MOVBE instruction (big-endian) + MOVDIR64B // Move 64 Bytes as Direct Store + MOVDIRI // Move Doubleword as Direct Store + MOVSB_ZL // Fast Zero-Length MOVSB + MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD + MPX // Intel MPX (Memory Protection Extensions) + MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers + MSR_PAGEFLUSH // Page Flush MSR available + NRIPS // Indicates support for NRIP save on VMEXIT + NX // NX (No-Execute) bit + OSXSAVE // XSAVE enabled by OS + PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption + POPCNT // POPCNT instruction + PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled + PREFETCHI // PREFETCHIT0/1 instructions + PSFD // Predictive Store Forward Disable + RDPRU // RDPRU instruction supported + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate + RTM // Restricted Transactional Memory + RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. + SBPB // Indicates support for the Selective Branch Predictor Barrier + SERIALIZE // Serialize Instruction Execution + SEV // AMD Secure Encrypted Virtualization supported + SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host + SEV_ALTERNATIVE // AMD SEV Alternate Injection supported + SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests + SEV_ES // AMD SEV Encrypted State supported + SEV_RESTRICTED // AMD SEV Restricted Injection supported + SEV_SNP // AMD SEV Secure Nested Paging supported + SGX // Software Guard Extensions + SGXLC // Software Guard Extensions Launch Control + SHA // Intel SHA Extensions + SME // AMD Secure Memory Encryption supported + SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced + SPEC_CTRL_SSBD // Speculative Store Bypass Disable + SRBDS_CTRL // SRBDS mitigation MSR available + SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. + SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability + SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + STIBP // Single Thread Indirect Branch Predictors + STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On + STOSB_SHORT // Fast short STOSB + SUCCOR // Software uncorrectable error containment and recovery capability. + SVM // AMD Secure Virtual Machine + SVMDA // Indicates support for the SVM decode assists. + SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control + SVML // AMD SVM lock. Indicates support for SVM-Lock. + SVMNP // AMD SVM nested paging + SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter + SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold + SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. + SYSEE // SYSENTER and SYSEXIT instructions + TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest + TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations + TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. + TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. + TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 + TSXLDTRK // Intel TSX Suspend Load Address Tracking + VAES // Vector AES. AVX(512) versions requires additional checks. + VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. + VMPL // AMD VM Permission Levels supported + VMSA_REGPROT // AMD VMSA Register Protection supported + VMX // Virtual Machine Extensions + VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. + VTE // AMD Virtual Transparent Encryption supported + WAITPKG // TPAUSE, UMONITOR, UMWAIT + WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register + X87 // FPU + XGETBV1 // Supports XGETBV with ECX = 1 + XOP // Bulldozer XOP functions + XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV + XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. + XSAVEOPT // XSAVEOPT available + XSAVES // Supports XSAVES/XRSTORS and IA32_XSS + + // ARM features: + AESARM // AES instructions + ARMCPUID // Some CPU ID registers readable at user-level + ASIMD // Advanced SIMD + ASIMDDP // SIMD Dot Product + ASIMDHP // Advanced SIMD half-precision floating point + ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) + ATOMICS // Large System Extensions (LSE) + CRC32 // CRC32/CRC32C instructions + DCPOP // Data cache clean to Point of Persistence (DC CVAP) + EVTSTRM // Generic timer + FCMA // Floatin point complex number addition and multiplication + FP // Single-precision and double-precision floating point + FPHP // Half-precision floating point + GPA // Generic Pointer Authentication + JSCVT // Javascript-style double->int convert (FJCVTZS) + LRCPC // Weaker release consistency (LDAPR, etc) + PMULL // Polynomial Multiply instructions (PMULL/PMULL2) + SHA1 // SHA-1 instructions (SHA1C, etc) + SHA2 // SHA-2 instructions (SHA256H, etc) + SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) + SHA512 // SHA512 instructions + SM3 // SM3 instructions + SM4 // SM4 instructions + SVE // Scalable Vector Extension + // Keep it last. It automatically defines the size of []flagSet + lastID + + firstID FeatureID = UNKNOWN + 1 +) + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected + } + SGX SGXSupport + AMDMemEncryption AMDMemEncryptionSupport + AVX10Level uint8 + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) +var darwinHasAVX512 = func() bool { return false } + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data. +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + // Set defaults + CPU.ThreadsPerCore = 1 + CPU.Cache.L1I = -1 + CPU.Cache.L1D = -1 + CPU.Cache.L2 = -1 + CPU.Cache.L3 = -1 + safe := true + if detectArmFlag != nil { + safe = !*detectArmFlag + } + addInfo(&CPU, safe) + if displayFeats != nil && *displayFeats { + fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) + // Exit with non-zero so tests will print value. + os.Exit(1) + } + if disableFlag != nil { + s := strings.Split(*disableFlag, ",") + for _, feat := range s { + feat := ParseFeature(strings.TrimSpace(feat)) + if feat != UNKNOWN { + CPU.featureSet.unset(feat) + } + } + } +} + +// DetectARM will detect ARM64 features. +// This is NOT done automatically since it can potentially crash +// if the OS does not handle the command. +// If in the future this can be done safely this function may not +// do anything. +func DetectARM() { + addInfo(&CPU, false) +} + +var detectArmFlag *bool +var displayFeats *bool +var disableFlag *string + +// Flags will enable flags. +// This must be called *before* flag.Parse AND +// Detect must be called after the flags have been parsed. +// Note that this means that any detection used in init() functions +// will not contain these flags. +func Flags() { + disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") + displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") + detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") +} + +// Supports returns whether the CPU supports all of the requested features. +func (c CPUInfo) Supports(ids ...FeatureID) bool { + for _, id := range ids { + if !c.featureSet.inSet(id) { + return false + } + } + return true +} + +// Has allows for checking a single feature. +// Should be inlined by the compiler. +func (c *CPUInfo) Has(id FeatureID) bool { + return c.featureSet.inSet(id) +} + +// AnyOf returns whether the CPU supports one or more of the requested features. +func (c CPUInfo) AnyOf(ids ...FeatureID) bool { + for _, id := range ids { + if c.featureSet.inSet(id) { + return true + } + } + return false +} + +// Features contains several features combined for a fast check using +// CpuInfo.HasAll +type Features *flagSet + +// CombineFeatures allows to combine several features for a close to constant time lookup. +func CombineFeatures(ids ...FeatureID) Features { + var v flagSet + for _, id := range ids { + v.set(id) + } + return &v +} + +func (c *CPUInfo) HasAll(f Features) bool { + return c.featureSet.hasSetP(f) +} + +// https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) +var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) +var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) +var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) +var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) + +// X64Level returns the microarchitecture level detected on the CPU. +// If features are lacking or non x64 mode, 0 is returned. +// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels +func (c CPUInfo) X64Level() int { + if !c.featureSet.hasOneOf(oneOfLevel) { + return 0 + } + if c.featureSet.hasSetP(level4Features) { + return 4 + } + if c.featureSet.hasSetP(level3Features) { + return 3 + } + if c.featureSet.hasSetP(level2Features) { + return 2 + } + if c.featureSet.hasSetP(level1Features) { + return 1 + } + return 0 +} + +// Disable will disable one or several features. +func (c *CPUInfo) Disable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.unset(id) + } + return true +} + +// Enable will disable one or several features even if they were undetected. +// This is of course not recommended for obvious reasons. +func (c *CPUInfo) Enable(ids ...FeatureID) bool { + for _, id := range ids { + c.featureSet.set(id) + } + return true +} + +// IsVendor returns true if vendor is recognized as Intel +func (c CPUInfo) IsVendor(v Vendor) bool { + return c.VendorID == v +} + +// FeatureSet returns all available features as strings. +func (c CPUInfo) FeatureSet() []string { + s := make([]string, 0, c.featureSet.nEnabled()) + s = append(s, c.featureSet.Strings()...) + return s +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.Supports(RDTSCP) { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.Supports(RDTSCP) { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// frequencies tries to compute the clock speed of the CPU. If leaf 15 is +// supported, use it, otherwise parse the brand string. Yes, really. +func (c *CPUInfo) frequencies() { + c.Hz, c.BoostFreq = 0, 0 + mfi := maxFunctionID() + if mfi >= 0x15 { + eax, ebx, ecx, _ := cpuid(0x15) + if eax != 0 && ebx != 0 && ecx != 0 { + c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) + } + } + if mfi >= 0x16 { + a, b, _, _ := cpuid(0x16) + // Base... + if a&0xffff > 0 { + c.Hz = int64(a&0xffff) * 1_000_000 + } + // Boost... + if b&0xffff > 0 { + c.BoostFreq = int64(b&0xffff) * 1_000_000 + } + } + if c.Hz > 0 { + return + } + + // computeHz determines the official rated speed of a CPU from its brand + // string. This insanity is *actually the official documented way to do + // this according to Intel*, prior to leaf 0x15 existing. The official + // documentation only shows this working for exactly `x.xx` or `xxxx` + // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other + // sizes. + model := c.BrandName + hz := strings.LastIndex(model, "Hz") + if hz < 3 { + return + } + var multiplier int64 + switch model[hz-1] { + case 'M': + multiplier = 1000 * 1000 + case 'G': + multiplier = 1000 * 1000 * 1000 + case 'T': + multiplier = 1000 * 1000 * 1000 * 1000 + } + if multiplier == 0 { + return + } + freq := int64(0) + divisor := int64(0) + decimalShift := int64(1) + var i int + for i = hz - 2; i >= 0 && model[i] != ' '; i-- { + if model[i] >= '0' && model[i] <= '9' { + freq += int64(model[i]-'0') * decimalShift + decimalShift *= 10 + } else if model[i] == '.' { + if divisor != 0 { + return + } + divisor = decimalShift + } else { + return + } + } + // we didn't find a space + if i < 0 { + return + } + if divisor != 0 { + c.Hz = (freq * multiplier) / divisor + return + } + c.Hz = freq * multiplier +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. +func (c CPUInfo) VM() bool { + return CPU.featureSet.inSet(HYPERVISOR) +} + +// flags contains detected cpu features and characteristics +type flags uint64 + +// log2(bits_in_uint64) +const flagBitsLog2 = 6 +const flagBits = 1 << flagBitsLog2 +const flagMask = flagBits - 1 + +// flagSet contains detected cpu features and characteristics in an array of flags +type flagSet [(lastID + flagMask) / flagBits]flags + +func (s *flagSet) inSet(feat FeatureID) bool { + return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 +} + +func (s *flagSet) set(feat FeatureID) { + s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) +} + +// setIf will set a feature if boolean is true. +func (s *flagSet) setIf(cond bool, features ...FeatureID) { + if cond { + for _, offset := range features { + s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) + } + } +} + +func (s *flagSet) unset(offset FeatureID) { + bit := flags(1 << (offset & flagMask)) + s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit +} + +// or with another flagset. +func (s *flagSet) or(other flagSet) { + for i, v := range other[:] { + s[i] |= v + } +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSet(other flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasSet returns whether all features are present. +func (s *flagSet) hasSetP(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != v { + return false + } + } + return true +} + +// hasOneOf returns whether one or more features are present. +func (s *flagSet) hasOneOf(other *flagSet) bool { + for i, v := range other[:] { + if s[i]&v != 0 { + return true + } + } + return false +} + +// nEnabled will return the number of enabled flags. +func (s *flagSet) nEnabled() (n int) { + for _, v := range s[:] { + n += bits.OnesCount64(uint64(v)) + } + return n +} + +func flagSetWith(feat ...FeatureID) flagSet { + var res flagSet + for _, f := range feat { + res.set(f) + } + return res +} + +// ParseFeature will parse the string and return the ID of the matching feature. +// Will return UNKNOWN if not found. +func ParseFeature(s string) FeatureID { + s = strings.ToUpper(s) + for i := firstID; i < lastID; i++ { + if i.String() == s { + return i + } + } + return UNKNOWN +} + +// Strings returns an array of the detected features for FlagsSet. +func (s flagSet) Strings() []string { + if len(s) == 0 { + return []string{""} + } + r := make([]string, 0) + for i := firstID; i < lastID; i++ { + if s.inSet(i) { + r = append(r, i.String()) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + vend, _ := vendorID() + + if mfi < 0x4 || (vend != Intel && vend != AMD) { + return 1 + } + + if mfi < 0xb { + if vend != Intel { + return 1 + } + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + if vend == AMD { + // Workaround for AMD returning 0, assume 2 if >= Zen 2 + // It will be more correct than not. + fam, _, _ := familyModel() + _, _, _, d := cpuid(1) + if (d&(1<<28)) != 0 && fam >= 23 { + return 2 + } + } + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + v, _ := vendorID() + switch v { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD, Hygon: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (family, model, stepping int) { + if maxFunctionID() < 0x1 { + return 0, 0, 0 + } + eax, _, _, _ := cpuid(1) + // If BaseFamily[3:0] is less than Fh then ExtendedFamily[7:0] is reserved and Family is equal to BaseFamily[3:0]. + family = int((eax >> 8) & 0xf) + extFam := family == 0x6 // Intel is 0x6, needs extended model. + if family == 0xf { + // Add ExtFamily + family += int((eax >> 20) & 0xff) + extFam = true + } + // If BaseFamily[3:0] is less than 0Fh then ExtendedModel[3:0] is reserved and Model is equal to BaseModel[3:0]. + model = int((eax >> 4) & 0xf) + if extFam { + // Add ExtModel + model += int((eax >> 12) & 0xf0) + } + stepping = int(eax & 0xf) + return family, model, stepping +} + +func physicalCores() int { + v, _ := vendorID() + switch v { + case Intel: + return logicalCores() / threadsPerCore() + case AMD, Hygon: + lc := logicalCores() + tpc := threadsPerCore() + if lc > 0 && tpc > 0 { + return lc / tpc + } + + // The following is inaccurate on AMD EPYC 7742 64-Core Processor + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + if c&0xff > 0 { + return int(c&0xff) + 1 + } + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, + "bhyve bhyve ": Bhyve, + "HygonGenuine": Hygon, + "Vortex86 SoC": SiS, + "SiS SiS SiS ": SiS, + "RiseRiseRise": SiS, + "Genuine RDC": RDC, +} + +func vendorID() (Vendor, string) { + _, b, c, d := cpuid(0) + v := string(valAsString(b, d, c)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor, _ := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + c.Cache.L1I, c.Cache.L1D, c.Cache.L2, c.Cache.L3 = 0, 0, 0, 0 + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD, Hygon: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + + // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties + if maxExtendedFunction() < 0x8000001D || !c.Has(TOPEXT) { + return + } + + // Xen Hypervisor is buggy and returns the same entry no matter ECX value. + // Hack: When we encounter the same entry 100 times we break. + nSame := 0 + var last uint32 + for i := uint32(0); i < math.MaxUint32; i++ { + eax, ebx, ecx, _ := cpuidex(0x8000001D, i) + + level := (eax >> 5) & 7 + cacheNumSets := ecx + 1 + cacheLineSize := 1 + (ebx & 2047) + cachePhysPartitions := 1 + ((ebx >> 12) & 511) + cacheNumWays := 1 + ((ebx >> 22) & 511) + + typ := eax & 15 + size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) + if typ == 0 { + return + } + + // Check for the same value repeated. + comb := eax ^ ebx ^ ecx + if comb == last { + nSame++ + if nSame == 100 { + return + } + } + last = comb + + switch level { + case 1: + switch typ { + case 1: + // Data cache + c.Cache.L1D = size + case 2: + // Inst cache + c.Cache.L1I = size + default: + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + } +} + +type SGXEPCSection struct { + BaseAddress uint64 + EPCSize uint64 +} + +type SGXSupport struct { + Available bool + LaunchControl bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 + EPCSections []SGXEPCSection +} + +func hasSGX(available, lc bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + rval.LaunchControl = lc + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + rval.EPCSections = make([]SGXEPCSection, 0) + + for subleaf := uint32(2); subleaf < 2+8; subleaf++ { + eax, ebx, ecx, edx := cpuidex(0x12, subleaf) + leafType := eax & 0xf + + if leafType == 0 { + // Invalid subleaf, stop iterating + break + } else if leafType == 1 { + // EPC Section subleaf + baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) + size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) + + section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} + rval.EPCSections = append(rval.EPCSections, section) + } + } + + return +} + +type AMDMemEncryptionSupport struct { + Available bool + CBitPossition uint32 + NumVMPL uint32 + PhysAddrReduction uint32 + NumEntryptedGuests uint32 + MinSevNoEsAsid uint32 +} + +func hasAMDMemEncryption(available bool) (rval AMDMemEncryptionSupport) { + rval.Available = available + if !available { + return + } + + _, b, c, d := cpuidex(0x8000001f, 0) + + rval.CBitPossition = b & 0x3f + rval.PhysAddrReduction = (b >> 6) & 0x3F + rval.NumVMPL = (b >> 12) & 0xf + rval.NumEntryptedGuests = c + rval.MinSevNoEsAsid = d + + return +} + +func support() flagSet { + var fs flagSet + mfi := maxFunctionID() + vend, _ := vendorID() + if mfi < 0x1 { + return fs + } + family, model, _ := familyModel() + + _, _, c, d := cpuid(1) + fs.setIf((d&(1<<0)) != 0, X87) + fs.setIf((d&(1<<8)) != 0, CMPXCHG8) + fs.setIf((d&(1<<11)) != 0, SYSEE) + fs.setIf((d&(1<<15)) != 0, CMOV) + fs.setIf((d&(1<<23)) != 0, MMX) + fs.setIf((d&(1<<24)) != 0, FXSR) + fs.setIf((d&(1<<25)) != 0, FXSROPT) + fs.setIf((d&(1<<25)) != 0, SSE) + fs.setIf((d&(1<<26)) != 0, SSE2) + fs.setIf((c&1) != 0, SSE3) + fs.setIf((c&(1<<5)) != 0, VMX) + fs.setIf((c&(1<<9)) != 0, SSSE3) + fs.setIf((c&(1<<19)) != 0, SSE4) + fs.setIf((c&(1<<20)) != 0, SSE42) + fs.setIf((c&(1<<25)) != 0, AESNI) + fs.setIf((c&(1<<1)) != 0, CLMUL) + fs.setIf(c&(1<<22) != 0, MOVBE) + fs.setIf(c&(1<<23) != 0, POPCNT) + fs.setIf(c&(1<<30) != 0, RDRAND) + + // This bit has been reserved by Intel & AMD for use by hypervisors, + // and indicates the presence of a hypervisor. + fs.setIf(c&(1<<31) != 0, HYPERVISOR) + fs.setIf(c&(1<<29) != 0, F16C) + fs.setIf(c&(1<<13) != 0, CX16) + + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { + fs.setIf(threadsPerCore() > 1, HTT) + } + fs.setIf(c&1<<26 != 0, XSAVE) + fs.setIf(c&1<<27 != 0, OSXSAVE) + // Check XGETBV/XSAVE (26), OXSAVE (27) and AVX (28) bits + const avxCheck = 1<<26 | 1<<27 | 1<<28 + if c&avxCheck == avxCheck { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + fs.set(AVX) + switch vend { + case Intel: + // Older than Haswell. + fs.setIf(family == 6 && model < 60, AVXSLOW) + case AMD: + // Older than Zen 2 + fs.setIf(family < 23 || (family == 23 && model < 49), AVXSLOW) + } + } + } + // FMA3 can be used with SSE registers, so no OS support is strictly needed. + // fma3 and OSXSAVE needed. + const fma3Check = 1<<12 | 1<<27 + fs.setIf(c&fma3Check == fma3Check, FMA3) + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, edx := cpuidex(7, 0) + if fs.inSet(AVX) && (ebx&0x00000020) != 0 { + fs.set(AVX2) + } + // CPUID.(EAX=7, ECX=0).EBX + if (ebx & 0x00000008) != 0 { + fs.set(BMI1) + fs.setIf((ebx&0x00000100) != 0, BMI2) + } + fs.setIf(ebx&(1<<2) != 0, SGX) + fs.setIf(ebx&(1<<4) != 0, HLE) + fs.setIf(ebx&(1<<9) != 0, ERMS) + fs.setIf(ebx&(1<<11) != 0, RTM) + fs.setIf(ebx&(1<<14) != 0, MPX) + fs.setIf(ebx&(1<<18) != 0, RDSEED) + fs.setIf(ebx&(1<<19) != 0, ADX) + fs.setIf(ebx&(1<<29) != 0, SHA) + + // CPUID.(EAX=7, ECX=0).ECX + fs.setIf(ecx&(1<<5) != 0, WAITPKG) + fs.setIf(ecx&(1<<7) != 0, CETSS) + fs.setIf(ecx&(1<<8) != 0, GFNI) + fs.setIf(ecx&(1<<9) != 0, VAES) + fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) + fs.setIf(ecx&(1<<13) != 0, TME) + fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<23) != 0, KEYLOCKER) + fs.setIf(ecx&(1<<27) != 0, MOVDIRI) + fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) + fs.setIf(ecx&(1<<29) != 0, ENQCMD) + fs.setIf(ecx&(1<<30) != 0, SGXLC) + + // CPUID.(EAX=7, ECX=0).EDX + fs.setIf(edx&(1<<4) != 0, FSRM) + fs.setIf(edx&(1<<9) != 0, SRBDS_CTRL) + fs.setIf(edx&(1<<10) != 0, MD_CLEAR) + fs.setIf(edx&(1<<11) != 0, RTM_ALWAYS_ABORT) + fs.setIf(edx&(1<<14) != 0, SERIALIZE) + fs.setIf(edx&(1<<15) != 0, HYBRID_CPU) + fs.setIf(edx&(1<<16) != 0, TSXLDTRK) + fs.setIf(edx&(1<<18) != 0, PCONFIG) + fs.setIf(edx&(1<<20) != 0, CETIBT) + fs.setIf(edx&(1<<26) != 0, IBPB) + fs.setIf(edx&(1<<27) != 0, STIBP) + fs.setIf(edx&(1<<28) != 0, FLUSH_L1D) + fs.setIf(edx&(1<<29) != 0, IA32_ARCH_CAP) + fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) + fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) + + // CPUID.(EAX=7, ECX=1).EAX + eax1, _, _, edx1 := cpuidex(7, 1) + fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) + fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) + fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) + fs.setIf(eax1&(1<<11) != 0, STOSB_SHORT) + fs.setIf(eax1&(1<<12) != 0, CMPSB_SCADBS_SHORT) + fs.setIf(eax1&(1<<22) != 0, HRESET) + fs.setIf(eax1&(1<<23) != 0, AVXIFMA) + fs.setIf(eax1&(1<<26) != 0, LAM) + + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<10) != 0, AVXVNNIINT16) + fs.setIf(edx1&(1<<14) != 0, PREFETCHI) + fs.setIf(edx1&(1<<19) != 0, AVX10) + fs.setIf(edx1&(1<<21) != 0, APX_F) + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + hasAVX512 := (eax>>5)&7 == 7 && (eax>>1)&3 == 3 + if runtime.GOOS == "darwin" { + hasAVX512 = fs.inSet(AVX) && darwinHasAVX512() + } + if hasAVX512 { + fs.setIf(ebx&(1<<16) != 0, AVX512F) + fs.setIf(ebx&(1<<17) != 0, AVX512DQ) + fs.setIf(ebx&(1<<21) != 0, AVX512IFMA) + fs.setIf(ebx&(1<<26) != 0, AVX512PF) + fs.setIf(ebx&(1<<27) != 0, AVX512ER) + fs.setIf(ebx&(1<<28) != 0, AVX512CD) + fs.setIf(ebx&(1<<30) != 0, AVX512BW) + fs.setIf(ebx&(1<<31) != 0, AVX512VL) + // ecx + fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) + fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) + fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) + fs.setIf(ecx&(1<<14) != 0, AVX512VPOPCNTDQ) + // edx + fs.setIf(edx&(1<<8) != 0, AVX512VP2INTERSECT) + fs.setIf(edx&(1<<22) != 0, AMXBF16) + fs.setIf(edx&(1<<23) != 0, AVX512FP16) + fs.setIf(edx&(1<<24) != 0, AMXTILE) + fs.setIf(edx&(1<<25) != 0, AMXINT8) + // eax1 = CPUID.(EAX=7, ECX=1).EAX + fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<19) != 0, WRMSRNS) + fs.setIf(eax1&(1<<21) != 0, AMXFP16) + fs.setIf(eax1&(1<<27) != 0, MSRLIST) + } + } + + // CPUID.(EAX=7, ECX=2) + _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<0) != 0, PSFD) + fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL) + fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL) + fs.setIf(edx&(1<<4) != 0, BHI_CTRL) + fs.setIf(edx&(1<<5) != 0, MCDT_NO) + + // Add keylocker features. + if fs.inSet(KEYLOCKER) && mfi >= 0x19 { + _, ebx, _, _ := cpuidex(0x19, 0) + fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4) + } + + // Add AVX10 features. + if fs.inSet(AVX10) && mfi >= 0x24 { + _, ebx, _, _ := cpuidex(0x24, 0) + fs.setIf(ebx&(1<<16) != 0, AVX10_128) + fs.setIf(ebx&(1<<17) != 0, AVX10_256) + fs.setIf(ebx&(1<<18) != 0, AVX10_512) + } + } + + // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) + // EAX + // Bit 00: XSAVEOPT is available. + // Bit 01: Supports XSAVEC and the compacted form of XRSTOR if set. + // Bit 02: Supports XGETBV with ECX = 1 if set. + // Bit 03: Supports XSAVES/XRSTORS and IA32_XSS if set. + // Bits 31 - 04: Reserved. + // EBX + // Bits 31 - 00: The size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS. + // ECX + // Bits 31 - 00: Reports the supported bits of the lower 32 bits of the IA32_XSS MSR. IA32_XSS[n] can be set to 1 only if ECX[n] is 1. + // EDX? + // Bits 07 - 00: Used for XCR0. Bit 08: PT state. Bit 09: Used for XCR0. Bits 12 - 10: Reserved. Bit 13: HWP state. Bits 31 - 14: Reserved. + if mfi >= 0xd { + if fs.inSet(XSAVE) { + eax, _, _, _ := cpuidex(0xd, 1) + fs.setIf(eax&(1<<0) != 0, XSAVEOPT) + fs.setIf(eax&(1<<1) != 0, XSAVEC) + fs.setIf(eax&(1<<2) != 0, XGETBV1) + fs.setIf(eax&(1<<3) != 0, XSAVES) + } + } + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + fs.set(LZCNT) + fs.set(POPCNT) + } + // ECX + fs.setIf((c&(1<<0)) != 0, LAHF) + fs.setIf((c&(1<<2)) != 0, SVM) + fs.setIf((c&(1<<6)) != 0, SSE4A) + fs.setIf((c&(1<<10)) != 0, IBS) + fs.setIf((c&(1<<22)) != 0, TOPEXT) + + // EDX + fs.setIf(d&(1<<11) != 0, SYSCALL) + fs.setIf(d&(1<<20) != 0, NX) + fs.setIf(d&(1<<22) != 0, MMXEXT) + fs.setIf(d&(1<<23) != 0, MMX) + fs.setIf(d&(1<<24) != 0, FXSR) + fs.setIf(d&(1<<25) != 0, FXSROPT) + fs.setIf(d&(1<<27) != 0, RDTSCP) + fs.setIf(d&(1<<30) != 0, AMD3DNOWEXT) + fs.setIf(d&(1<<31) != 0, AMD3DNOW) + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if fs.inSet(AVX) { + fs.setIf((c&(1<<11)) != 0, XOP) + fs.setIf((c&(1<<16)) != 0, FMA4) + } + + } + if maxExtendedFunction() >= 0x80000007 { + _, b, _, d := cpuid(0x80000007) + fs.setIf((b&(1<<0)) != 0, MCAOVERFLOW) + fs.setIf((b&(1<<1)) != 0, SUCCOR) + fs.setIf((b&(1<<2)) != 0, HWA) + fs.setIf((d&(1<<9)) != 0, CPBOOST) + } + + if maxExtendedFunction() >= 0x80000008 { + _, b, _, _ := cpuid(0x80000008) + fs.setIf(b&(1<<28) != 0, PSFD) + fs.setIf(b&(1<<27) != 0, CPPC) + fs.setIf(b&(1<<24) != 0, SPEC_CTRL_SSBD) + fs.setIf(b&(1<<23) != 0, PPIN) + fs.setIf(b&(1<<21) != 0, TLB_FLUSH_NESTED) + fs.setIf(b&(1<<20) != 0, EFER_LMSLE_UNS) + fs.setIf(b&(1<<19) != 0, IBRS_PROVIDES_SMP) + fs.setIf(b&(1<<18) != 0, IBRS_PREFERRED) + fs.setIf(b&(1<<17) != 0, STIBP_ALWAYSON) + fs.setIf(b&(1<<15) != 0, STIBP) + fs.setIf(b&(1<<14) != 0, IBRS) + fs.setIf((b&(1<<13)) != 0, INT_WBINVD) + fs.setIf(b&(1<<12) != 0, IBPB) + fs.setIf((b&(1<<9)) != 0, WBNOINVD) + fs.setIf((b&(1<<8)) != 0, MCOMMIT) + fs.setIf((b&(1<<4)) != 0, RDPRU) + fs.setIf((b&(1<<3)) != 0, INVLPGB) + fs.setIf((b&(1<<1)) != 0, MSRIRC) + fs.setIf((b&(1<<0)) != 0, CLZERO) + } + + if fs.inSet(SVM) && maxExtendedFunction() >= 0x8000000A { + _, _, _, edx := cpuid(0x8000000A) + fs.setIf((edx>>0)&1 == 1, SVMNP) + fs.setIf((edx>>1)&1 == 1, LBRVIRT) + fs.setIf((edx>>2)&1 == 1, SVML) + fs.setIf((edx>>3)&1 == 1, NRIPS) + fs.setIf((edx>>4)&1 == 1, TSCRATEMSR) + fs.setIf((edx>>5)&1 == 1, VMCBCLEAN) + fs.setIf((edx>>6)&1 == 1, SVMFBASID) + fs.setIf((edx>>7)&1 == 1, SVMDA) + fs.setIf((edx>>10)&1 == 1, SVMPF) + fs.setIf((edx>>12)&1 == 1, SVMPFT) + } + + if maxExtendedFunction() >= 0x8000001a { + eax, _, _, _ := cpuid(0x8000001a) + fs.setIf((eax>>0)&1 == 1, FP128) + fs.setIf((eax>>1)&1 == 1, MOVU) + fs.setIf((eax>>2)&1 == 1, FP256) + } + + if maxExtendedFunction() >= 0x8000001b && fs.inSet(IBS) { + eax, _, _, _ := cpuid(0x8000001b) + fs.setIf((eax>>0)&1 == 1, IBSFFV) + fs.setIf((eax>>1)&1 == 1, IBSFETCHSAM) + fs.setIf((eax>>2)&1 == 1, IBSOPSAM) + fs.setIf((eax>>3)&1 == 1, IBSRDWROPCNT) + fs.setIf((eax>>4)&1 == 1, IBSOPCNT) + fs.setIf((eax>>5)&1 == 1, IBSBRNTRGT) + fs.setIf((eax>>6)&1 == 1, IBSOPCNTEXT) + fs.setIf((eax>>7)&1 == 1, IBSRIPINVALIDCHK) + fs.setIf((eax>>8)&1 == 1, IBS_OPFUSE) + fs.setIf((eax>>9)&1 == 1, IBS_FETCH_CTLX) + fs.setIf((eax>>10)&1 == 1, IBS_OPDATA4) // Doc says "Fixed,0. IBS op data 4 MSR supported", but assuming they mean 1. + fs.setIf((eax>>11)&1 == 1, IBS_ZEN4) + } + + if maxExtendedFunction() >= 0x8000001f && vend == AMD { + a, _, _, _ := cpuid(0x8000001f) + fs.setIf((a>>0)&1 == 1, SME) + fs.setIf((a>>1)&1 == 1, SEV) + fs.setIf((a>>2)&1 == 1, MSR_PAGEFLUSH) + fs.setIf((a>>3)&1 == 1, SEV_ES) + fs.setIf((a>>4)&1 == 1, SEV_SNP) + fs.setIf((a>>5)&1 == 1, VMPL) + fs.setIf((a>>10)&1 == 1, SME_COHERENT) + fs.setIf((a>>11)&1 == 1, SEV_64BIT) + fs.setIf((a>>12)&1 == 1, SEV_RESTRICTED) + fs.setIf((a>>13)&1 == 1, SEV_ALTERNATIVE) + fs.setIf((a>>14)&1 == 1, SEV_DEBUGSWAP) + fs.setIf((a>>15)&1 == 1, IBS_PREVENTHOST) + fs.setIf((a>>16)&1 == 1, VTE) + fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) + } + + if maxExtendedFunction() >= 0x80000021 && vend == AMD { + a, _, _, _ := cpuid(0x80000021) + fs.setIf((a>>31)&1 == 1, SRSO_MSR_FIX) + fs.setIf((a>>30)&1 == 1, SRSO_USER_KERNEL_NO) + fs.setIf((a>>29)&1 == 1, SRSO_NO) + fs.setIf((a>>28)&1 == 1, IBPB_BRTYPE) + fs.setIf((a>>27)&1 == 1, SBPB) + } + + if mfi >= 0x20 { + // Microsoft has decided to purposefully hide the information + // of the guest TEE when VMs are being created using Hyper-V. + // + // This leads us to check for the Hyper-V cpuid features + // (0x4000000C), and then for the `ebx` value set. + // + // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part + // we're mostly interested about,according to: + // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174 + _, ebx, _, _ := cpuid(0x4000000C) + fs.setIf(ebx == 0xbe3, TDX_GUEST) + } + + if mfi >= 0x21 { + // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21). + _, ebx, ecx, edx := cpuid(0x21) + identity := string(valAsString(ebx, edx, ecx)) + fs.setIf(identity == "IntelTDX ", TDX_GUEST) + } + + return fs +} + +func (c *CPUInfo) supportAVX10() uint8 { + if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) { + _, ebx, _, _ := cpuidex(0x24, 0) + return uint8(ebx) + } + return 0 +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s new file mode 100644 index 000000000..8587c3a1f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_386.s @@ -0,0 +1,47 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build 386,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0 + MOVL $0, eax+0(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s new file mode 100644 index 000000000..bc11f8942 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_amd64.s @@ -0,0 +1,72 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo,!noasm,!appengine + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET + +// From https://go-review.googlesource.com/c/sys/+/285572/ +// func asmDarwinHasAVX512() bool +TEXT ·asmDarwinHasAVX512(SB), 7, $0-1 + MOVB $0, ret+0(FP) // default to false + +#ifdef GOOS_darwin // return if not darwin +#ifdef GOARCH_amd64 // return if not amd64 +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + MOVW (BX), AX + CMPW AX, $13 // versions < 13 do not support AVX512 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ (BX), AX + MOVQ $hasAVX512F, CX + ANDQ CX, AX + JZ no_avx512 + MOVB $1, ret+0(FP) + +no_avx512: +#endif +#endif + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s new file mode 100644 index 000000000..b31d6aec4 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -0,0 +1,26 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build arm64,!gccgo,!noasm,!appengine + +// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt + +// func getMidr +TEXT ·getMidr(SB), 7, $0 + WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ + MOVD R0, midr+0(FP) + RET + +// func getProcFeatures +TEXT ·getProcFeatures(SB), 7, $0 + WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ + MOVD R0, procFeatures+0(FP) + RET + +// func getInstAttributes +TEXT ·getInstAttributes(SB), 7, $0 + WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ + WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ + MOVD R0, instAttrReg0+0(FP) + MOVD R1, instAttrReg1+8(FP) + RET + diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go new file mode 100644 index 000000000..9a53504a0 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -0,0 +1,247 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !gccgo && !noasm && !appengine +// +build arm64,!gccgo,!noasm,!appengine + +package cpuid + +import "runtime" + +func getMidr() (midr uint64) +func getProcFeatures() (procFeatures uint64) +func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(c *CPUInfo, safe bool) { + // Seems to be safe to assume on ARM64 + c.CacheLine = 64 + detectOS(c) + + // ARM64 disabled since it may crash if interrupt is not intercepted by OS. + if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + return + } + midr := getMidr() + + // MIDR_EL1 - Main ID Register + // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | Implementer | [31-24] | y | + // |--------------------------------------------------| + // | Variant | [23-20] | y | + // |--------------------------------------------------| + // | Architecture | [19-16] | y | + // |--------------------------------------------------| + // | PartNum | [15-4] | y | + // |--------------------------------------------------| + // | Revision | [3-0] | y | + // x--------------------------------------------------x + + switch (midr >> 24) & 0xff { + case 0xC0: + c.VendorString = "Ampere Computing" + c.VendorID = Ampere + case 0x41: + c.VendorString = "Arm Limited" + c.VendorID = ARM + case 0x42: + c.VendorString = "Broadcom Corporation" + c.VendorID = Broadcom + case 0x43: + c.VendorString = "Cavium Inc" + c.VendorID = Cavium + case 0x44: + c.VendorString = "Digital Equipment Corporation" + c.VendorID = DEC + case 0x46: + c.VendorString = "Fujitsu Ltd" + c.VendorID = Fujitsu + case 0x49: + c.VendorString = "Infineon Technologies AG" + c.VendorID = Infineon + case 0x4D: + c.VendorString = "Motorola or Freescale Semiconductor Inc" + c.VendorID = Motorola + case 0x4E: + c.VendorString = "NVIDIA Corporation" + c.VendorID = NVIDIA + case 0x50: + c.VendorString = "Applied Micro Circuits Corporation" + c.VendorID = AMCC + case 0x51: + c.VendorString = "Qualcomm Inc" + c.VendorID = Qualcomm + case 0x56: + c.VendorString = "Marvell International Ltd" + c.VendorID = Marvell + case 0x69: + c.VendorString = "Intel Corporation" + c.VendorID = Intel + } + + // Lower 4 bits: Architecture + // Architecture Meaning + // 0b0001 Armv4. + // 0b0010 Armv4T. + // 0b0011 Armv5 (obsolete). + // 0b0100 Armv5T. + // 0b0101 Armv5TE. + // 0b0110 Armv5TEJ. + // 0b0111 Armv6. + // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. + // Upper 4 bit: Variant + // An IMPLEMENTATION DEFINED variant number. + // Typically, this field is used to distinguish between different product variants, or major revisions of a product. + c.Family = int(midr>>16) & 0xff + + // PartNum, bits [15:4] + // An IMPLEMENTATION DEFINED primary part number for the device. + // On processors implemented by Arm, if the top four bits of the primary + // part number are 0x0 or 0x7, the variant and architecture are encoded differently. + // Revision, bits [3:0] + // An IMPLEMENTATION DEFINED revision number for the device. + c.Model = int(midr) & 0xffff + + procFeatures := getProcFeatures() + + // ID_AA64PFR0_EL1 - Processor Feature Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | DIT | [51-48] | y | + // |--------------------------------------------------| + // | SVE | [35-32] | y | + // |--------------------------------------------------| + // | GIC | [27-24] | n | + // |--------------------------------------------------| + // | AdvSIMD | [23-20] | y | + // |--------------------------------------------------| + // | FP | [19-16] | y | + // |--------------------------------------------------| + // | EL3 | [15-12] | n | + // |--------------------------------------------------| + // | EL2 | [11-8] | n | + // |--------------------------------------------------| + // | EL1 | [7-4] | n | + // |--------------------------------------------------| + // | EL0 | [3-0] | n | + // x--------------------------------------------------x + + var f flagSet + // if procFeatures&(0xf<<48) != 0 { + // fmt.Println("DIT") + // } + f.setIf(procFeatures&(0xf<<32) != 0, SVE) + if procFeatures&(0xf<<20) != 15<<20 { + f.set(ASIMD) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 + // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. + f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) + } + f.setIf(procFeatures&(0xf<<16) != 0, FP) + + instAttrReg0, instAttrReg1 := getInstAttributes() + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // + // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | TS | [55-52] | y | + // |--------------------------------------------------| + // | FHM | [51-48] | y | + // |--------------------------------------------------| + // | DP | [47-44] | y | + // |--------------------------------------------------| + // | SM4 | [43-40] | y | + // |--------------------------------------------------| + // | SM3 | [39-36] | y | + // |--------------------------------------------------| + // | SHA3 | [35-32] | y | + // |--------------------------------------------------| + // | RDM | [31-28] | y | + // |--------------------------------------------------| + // | ATOMICS | [23-20] | y | + // |--------------------------------------------------| + // | CRC32 | [19-16] | y | + // |--------------------------------------------------| + // | SHA2 | [15-12] | y | + // |--------------------------------------------------| + // | SHA1 | [11-8] | y | + // |--------------------------------------------------| + // | AES | [7-4] | y | + // x--------------------------------------------------x + + // if instAttrReg0&(0xf<<52) != 0 { + // fmt.Println("TS") + // } + // if instAttrReg0&(0xf<<48) != 0 { + // fmt.Println("FHM") + // } + f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) + f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) + f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) + f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) + f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) + f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) + f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) + f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. + f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) + f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) + f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 + // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. + f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) + + // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 + // + // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 + // x--------------------------------------------------x + // | Name | bits | visible | + // |--------------------------------------------------| + // | GPI | [31-28] | y | + // |--------------------------------------------------| + // | GPA | [27-24] | y | + // |--------------------------------------------------| + // | LRCPC | [23-20] | y | + // |--------------------------------------------------| + // | FCMA | [19-16] | y | + // |--------------------------------------------------| + // | JSCVT | [15-12] | y | + // |--------------------------------------------------| + // | API | [11-8] | y | + // |--------------------------------------------------| + // | APA | [7-4] | y | + // |--------------------------------------------------| + // | DPB | [3-0] | y | + // x--------------------------------------------------x + + // if instAttrReg1&(0xf<<28) != 0 { + // fmt.Println("GPI") + // } + f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) + f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) + f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) + f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) + // if instAttrReg1&(0xf<<8) != 0 { + // fmt.Println("API") + // } + // if instAttrReg1&(0xf<<4) != 0 { + // fmt.Println("APA") + // } + f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) + + // Store + c.featureSet.or(f) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go new file mode 100644 index 000000000..9636c2bc1 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -0,0 +1,15 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine +// +build !amd64,!386,!arm64 gccgo noasm appengine + +package cpuid + +func initCPU() { + cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } + xgetbv = func(uint32) (a, b uint32) { return 0, 0 } + rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } +} + +func addInfo(info *CPUInfo, safe bool) {} diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go new file mode 100644 index 000000000..799b400c2 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) +// +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +func asmDarwinHasAVX512() bool + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm + darwinHasAVX512 = asmDarwinHasAVX512 +} + +func addInfo(c *CPUInfo, safe bool) { + c.maxFunc = maxFunctionID() + c.maxExFunc = maxExtendedFunction() + c.BrandName = brandName() + c.CacheLine = cacheLine() + c.Family, c.Model, c.Stepping = familyModel() + c.featureSet = support() + c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) + c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) + c.ThreadsPerCore = threadsPerCore() + c.LogicalCores = logicalCores() + c.PhysicalCores = physicalCores() + c.VendorID, c.VendorString = vendorID() + c.AVX10Level = c.supportAVX10() + c.cacheSize() + c.frequencies() +} diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go new file mode 100644 index 000000000..3a2560310 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -0,0 +1,285 @@ +// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. + +package cpuid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ADX-1] + _ = x[AESNI-2] + _ = x[AMD3DNOW-3] + _ = x[AMD3DNOWEXT-4] + _ = x[AMXBF16-5] + _ = x[AMXFP16-6] + _ = x[AMXINT8-7] + _ = x[AMXTILE-8] + _ = x[APX_F-9] + _ = x[AVX-10] + _ = x[AVX10-11] + _ = x[AVX10_128-12] + _ = x[AVX10_256-13] + _ = x[AVX10_512-14] + _ = x[AVX2-15] + _ = x[AVX512BF16-16] + _ = x[AVX512BITALG-17] + _ = x[AVX512BW-18] + _ = x[AVX512CD-19] + _ = x[AVX512DQ-20] + _ = x[AVX512ER-21] + _ = x[AVX512F-22] + _ = x[AVX512FP16-23] + _ = x[AVX512IFMA-24] + _ = x[AVX512PF-25] + _ = x[AVX512VBMI-26] + _ = x[AVX512VBMI2-27] + _ = x[AVX512VL-28] + _ = x[AVX512VNNI-29] + _ = x[AVX512VP2INTERSECT-30] + _ = x[AVX512VPOPCNTDQ-31] + _ = x[AVXIFMA-32] + _ = x[AVXNECONVERT-33] + _ = x[AVXSLOW-34] + _ = x[AVXVNNI-35] + _ = x[AVXVNNIINT8-36] + _ = x[AVXVNNIINT16-37] + _ = x[BHI_CTRL-38] + _ = x[BMI1-39] + _ = x[BMI2-40] + _ = x[CETIBT-41] + _ = x[CETSS-42] + _ = x[CLDEMOTE-43] + _ = x[CLMUL-44] + _ = x[CLZERO-45] + _ = x[CMOV-46] + _ = x[CMPCCXADD-47] + _ = x[CMPSB_SCADBS_SHORT-48] + _ = x[CMPXCHG8-49] + _ = x[CPBOOST-50] + _ = x[CPPC-51] + _ = x[CX16-52] + _ = x[EFER_LMSLE_UNS-53] + _ = x[ENQCMD-54] + _ = x[ERMS-55] + _ = x[F16C-56] + _ = x[FLUSH_L1D-57] + _ = x[FMA3-58] + _ = x[FMA4-59] + _ = x[FP128-60] + _ = x[FP256-61] + _ = x[FSRM-62] + _ = x[FXSR-63] + _ = x[FXSROPT-64] + _ = x[GFNI-65] + _ = x[HLE-66] + _ = x[HRESET-67] + _ = x[HTT-68] + _ = x[HWA-69] + _ = x[HYBRID_CPU-70] + _ = x[HYPERVISOR-71] + _ = x[IA32_ARCH_CAP-72] + _ = x[IA32_CORE_CAP-73] + _ = x[IBPB-74] + _ = x[IBPB_BRTYPE-75] + _ = x[IBRS-76] + _ = x[IBRS_PREFERRED-77] + _ = x[IBRS_PROVIDES_SMP-78] + _ = x[IBS-79] + _ = x[IBSBRNTRGT-80] + _ = x[IBSFETCHSAM-81] + _ = x[IBSFFV-82] + _ = x[IBSOPCNT-83] + _ = x[IBSOPCNTEXT-84] + _ = x[IBSOPSAM-85] + _ = x[IBSRDWROPCNT-86] + _ = x[IBSRIPINVALIDCHK-87] + _ = x[IBS_FETCH_CTLX-88] + _ = x[IBS_OPDATA4-89] + _ = x[IBS_OPFUSE-90] + _ = x[IBS_PREVENTHOST-91] + _ = x[IBS_ZEN4-92] + _ = x[IDPRED_CTRL-93] + _ = x[INT_WBINVD-94] + _ = x[INVLPGB-95] + _ = x[KEYLOCKER-96] + _ = x[KEYLOCKERW-97] + _ = x[LAHF-98] + _ = x[LAM-99] + _ = x[LBRVIRT-100] + _ = x[LZCNT-101] + _ = x[MCAOVERFLOW-102] + _ = x[MCDT_NO-103] + _ = x[MCOMMIT-104] + _ = x[MD_CLEAR-105] + _ = x[MMX-106] + _ = x[MMXEXT-107] + _ = x[MOVBE-108] + _ = x[MOVDIR64B-109] + _ = x[MOVDIRI-110] + _ = x[MOVSB_ZL-111] + _ = x[MOVU-112] + _ = x[MPX-113] + _ = x[MSRIRC-114] + _ = x[MSRLIST-115] + _ = x[MSR_PAGEFLUSH-116] + _ = x[NRIPS-117] + _ = x[NX-118] + _ = x[OSXSAVE-119] + _ = x[PCONFIG-120] + _ = x[POPCNT-121] + _ = x[PPIN-122] + _ = x[PREFETCHI-123] + _ = x[PSFD-124] + _ = x[RDPRU-125] + _ = x[RDRAND-126] + _ = x[RDSEED-127] + _ = x[RDTSCP-128] + _ = x[RRSBA_CTRL-129] + _ = x[RTM-130] + _ = x[RTM_ALWAYS_ABORT-131] + _ = x[SBPB-132] + _ = x[SERIALIZE-133] + _ = x[SEV-134] + _ = x[SEV_64BIT-135] + _ = x[SEV_ALTERNATIVE-136] + _ = x[SEV_DEBUGSWAP-137] + _ = x[SEV_ES-138] + _ = x[SEV_RESTRICTED-139] + _ = x[SEV_SNP-140] + _ = x[SGX-141] + _ = x[SGXLC-142] + _ = x[SHA-143] + _ = x[SME-144] + _ = x[SME_COHERENT-145] + _ = x[SPEC_CTRL_SSBD-146] + _ = x[SRBDS_CTRL-147] + _ = x[SRSO_MSR_FIX-148] + _ = x[SRSO_NO-149] + _ = x[SRSO_USER_KERNEL_NO-150] + _ = x[SSE-151] + _ = x[SSE2-152] + _ = x[SSE3-153] + _ = x[SSE4-154] + _ = x[SSE42-155] + _ = x[SSE4A-156] + _ = x[SSSE3-157] + _ = x[STIBP-158] + _ = x[STIBP_ALWAYSON-159] + _ = x[STOSB_SHORT-160] + _ = x[SUCCOR-161] + _ = x[SVM-162] + _ = x[SVMDA-163] + _ = x[SVMFBASID-164] + _ = x[SVML-165] + _ = x[SVMNP-166] + _ = x[SVMPF-167] + _ = x[SVMPFT-168] + _ = x[SYSCALL-169] + _ = x[SYSEE-170] + _ = x[TBM-171] + _ = x[TDX_GUEST-172] + _ = x[TLB_FLUSH_NESTED-173] + _ = x[TME-174] + _ = x[TOPEXT-175] + _ = x[TSCRATEMSR-176] + _ = x[TSXLDTRK-177] + _ = x[VAES-178] + _ = x[VMCBCLEAN-179] + _ = x[VMPL-180] + _ = x[VMSA_REGPROT-181] + _ = x[VMX-182] + _ = x[VPCLMULQDQ-183] + _ = x[VTE-184] + _ = x[WAITPKG-185] + _ = x[WBNOINVD-186] + _ = x[WRMSRNS-187] + _ = x[X87-188] + _ = x[XGETBV1-189] + _ = x[XOP-190] + _ = x[XSAVE-191] + _ = x[XSAVEC-192] + _ = x[XSAVEOPT-193] + _ = x[XSAVES-194] + _ = x[AESARM-195] + _ = x[ARMCPUID-196] + _ = x[ASIMD-197] + _ = x[ASIMDDP-198] + _ = x[ASIMDHP-199] + _ = x[ASIMDRDM-200] + _ = x[ATOMICS-201] + _ = x[CRC32-202] + _ = x[DCPOP-203] + _ = x[EVTSTRM-204] + _ = x[FCMA-205] + _ = x[FP-206] + _ = x[FPHP-207] + _ = x[GPA-208] + _ = x[JSCVT-209] + _ = x[LRCPC-210] + _ = x[PMULL-211] + _ = x[SHA1-212] + _ = x[SHA2-213] + _ = x[SHA3-214] + _ = x[SHA512-215] + _ = x[SM3-216] + _ = x[SM4-217] + _ = x[SVE-218] + _ = x[lastID-219] + _ = x[firstID-0] +} + +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" + +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} + +func (i FeatureID) String() string { + if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { + return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VendorUnknown-0] + _ = x[Intel-1] + _ = x[AMD-2] + _ = x[VIA-3] + _ = x[Transmeta-4] + _ = x[NSC-5] + _ = x[KVM-6] + _ = x[MSVM-7] + _ = x[VMware-8] + _ = x[XenHVM-9] + _ = x[Bhyve-10] + _ = x[Hygon-11] + _ = x[SiS-12] + _ = x[RDC-13] + _ = x[Ampere-14] + _ = x[ARM-15] + _ = x[Broadcom-16] + _ = x[Cavium-17] + _ = x[DEC-18] + _ = x[Fujitsu-19] + _ = x[Infineon-20] + _ = x[Motorola-21] + _ = x[NVIDIA-22] + _ = x[AMCC-23] + _ = x[Qualcomm-24] + _ = x[Marvell-25] + _ = x[lastVendor-26] +} + +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" + +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} + +func (i Vendor) String() string { + if i < 0 || i >= Vendor(len(_Vendor_index)-1) { + return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go new file mode 100644 index 000000000..84b1acd21 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go @@ -0,0 +1,121 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +func detectOS(c *CPUInfo) bool { + if runtime.GOOS != "ios" { + tryToFillCPUInfoFomSysctl(c) + } + // There are no hw.optional sysctl values for the below features on Mac OS 11.0 + // to detect their supported state dynamically. Assume the CPU features that + // Apple Silicon M1 supports to be available as a minimal set of features + // to all Go programs running on darwin/arm64. + // TODO: Add more if we know them. + c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) + + return true +} + +func sysctlGetBool(name string) bool { + value, err := unix.SysctlUint32(name) + if err != nil { + return false + } + return value != 0 +} + +func sysctlGetString(name string) string { + value, err := unix.Sysctl(name) + if err != nil { + return "" + } + return value +} + +func sysctlGetInt(unknown int, names ...string) int { + for _, name := range names { + value, err := unix.SysctlUint32(name) + if err != nil { + continue + } + if value != 0 { + return int(value) + } + } + return unknown +} + +func sysctlGetInt64(unknown int, names ...string) int { + for _, name := range names { + value64, err := unix.SysctlUint64(name) + if err != nil { + continue + } + if int(value64) != unknown { + return int(value64) + } + } + return unknown +} + +func setFeature(c *CPUInfo, name string, feature FeatureID) { + c.featureSet.setIf(sysctlGetBool(name), feature) +} +func tryToFillCPUInfoFomSysctl(c *CPUInfo) { + c.BrandName = sysctlGetString("machdep.cpu.brand_string") + + if len(c.BrandName) != 0 { + c.VendorString = strings.Fields(c.BrandName)[0] + } + + c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu") + c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") / + sysctlGetInt(1, "hw.physicalcpu") + c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count") + c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily") + c.Model = sysctlGetInt(0, "machdep.cpu.model") + c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") + c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") + c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") + c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") + c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") + + // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile + setFeature(c, "hw.optional.arm.FEAT_AES", AESARM) + setFeature(c, "hw.optional.AdvSIMD", ASIMD) + setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP) + setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM) + setFeature(c, "hw.optional.FEAT_CRC32", CRC32) + setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP) + // setFeature(c, "", EVTSTRM) + setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA) + setFeature(c, "hw.optional.arm.FEAT_FP", FP) + setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP) + setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA) + setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT) + setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC) + setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL) + setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1) + setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2) + setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3) + setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512) + // setFeature(c, "", SM3) + // setFeature(c, "", SM4) + setFeature(c, "hw.optional.arm.FEAT_SVE", SVE) + + // from empirical observation + setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP) + setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS) + setFeature(c, "hw.optional.floatingpoint", FP) + setFeature(c, "hw.optional.armv8_2_sha3", SHA3) + setFeature(c, "hw.optional.armv8_2_sha512", SHA512) + setFeature(c, "hw.optional.armv8_3_compnum", FCMA) + setFeature(c, "hw.optional.armv8_crc32", CRC32) +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go new file mode 100644 index 000000000..ee278b9e4 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go @@ -0,0 +1,130 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file located +// here https://github.com/golang/sys/blob/master/LICENSE + +package cpuid + +import ( + "encoding/binary" + "io/ioutil" + "runtime" +) + +// HWCAP bits. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func detectOS(c *CPUInfo) bool { + // For now assuming no hyperthreading is reasonable. + c.LogicalCores = runtime.NumCPU() + c.PhysicalCores = c.LogicalCores + c.ThreadsPerCore = 1 + if hwcap == 0 { + // We did not get values from the runtime. + // Try reading /proc/self/auxv + + // From https://github.com/golang/sys + const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + uintSize = int(32 << (^uint(0) >> 63)) + ) + + buf, err := ioutil.ReadFile("/proc/self/auxv") + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return false + } + bo := binary.LittleEndian + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwcap = val + case _AT_HWCAP2: + // Not used + } + } + if hwcap == 0 { + return false + } + } + + // HWCap was populated by the runtime from the auxiliary vector. + // Use HWCap information since reading aarch64 system registers + // is not supported in user space on older linux kernels. + c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) + c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) + c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) + c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) + c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) + c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) + c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) + c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) + c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) + c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) + c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) + c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) + c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) + c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) + c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) + c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) + + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets on android. + c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) + + return true +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go new file mode 100644 index 000000000..8733ba343 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. + +//go:build arm64 && !linux && !darwin +// +build arm64,!linux,!darwin + +package cpuid + +import "runtime" + +func detectOS(c *CPUInfo) bool { + c.PhysicalCores = runtime.NumCPU() + // For now assuming 1 thread per core... + c.ThreadsPerCore = 1 + c.LogicalCores = c.PhysicalCores + return false +} diff --git a/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go new file mode 100644 index 000000000..f8f201b5f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go @@ -0,0 +1,8 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build nounsafe +// +build nounsafe + +package cpuid + +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go new file mode 100644 index 000000000..92af622eb --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go @@ -0,0 +1,11 @@ +// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. + +//go:build !nounsafe +// +build !nounsafe + +package cpuid + +import _ "unsafe" // needed for go:linkname + +//go:linkname hwcap internal/cpu.HWCap +var hwcap uint diff --git a/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh new file mode 100644 index 000000000..471d986d2 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/v2/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null . + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null . + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null . + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null . +done diff --git a/vendor/github.com/libp2p/go-reuseport/LICENSE b/vendor/github.com/libp2p/go-reuseport/LICENSE new file mode 100644 index 000000000..0d760cbb4 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2013 Conformal Systems LLC. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-reuseport/README.md b/vendor/github.com/libp2p/go-reuseport/README.md new file mode 100644 index 000000000..d511adebc --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/README.md @@ -0,0 +1,48 @@ +# go-reuseport + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![GoDoc](https://godoc.org/github.com/libp2p/go-reuseport?status.svg)](https://godoc.org/github.com/libp2p/go-reuseport) +[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p) +[![codecov](https://codecov.io/gh/libp2p/go-reuseport/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-reuseport) +[![Travis CI](https://travis-ci.org/libp2p/go-reuseport.svg?branch=master)](https://travis-ci.org/libp2p/go-reuseport) +[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) + +This package enables listening and dialing from _the same_ TCP or UDP port. +This means that the following sockopts may be set: + +``` +SO_REUSEADDR +SO_REUSEPORT +``` + +This is a simple package to help with address reuse. This is particularly +important when attempting to do TCP NAT holepunching, which requires a process +to both Listen and Dial on the same TCP port. This package provides some +utilities around enabling this behaviour on various OS. + +## Examples + + +```Go +// listen on the same port. oh yeah. +l1, _ := reuse.Listen("tcp", "127.0.0.1:1234") +l2, _ := reuse.Listen("tcp", "127.0.0.1:1234") +``` + +```Go +// dial from the same port. oh yeah. +l1, _ := reuse.Listen("tcp", "127.0.0.1:1234") +l2, _ := reuse.Listen("tcp", "127.0.0.1:1235") +c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235") +``` + +**Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections, and doing so would clash.** + +## Tested + +Tested on `darwin`, `linux`, and `windows`. + +--- + +The last gx published version of this module was: 0.2.2: Qme8kdM7thoCqLqd7GYCRqipoZJS64rhJo5MBcTpyWfsL9 diff --git a/vendor/github.com/libp2p/go-reuseport/addr.go b/vendor/github.com/libp2p/go-reuseport/addr.go new file mode 100644 index 000000000..cfffc7c8c --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/addr.go @@ -0,0 +1,20 @@ +package reuseport + +import ( + "net" +) + +func ResolveAddr(network, address string) (net.Addr, error) { + switch network { + default: + return nil, net.UnknownNetworkError(network) + case "ip", "ip4", "ip6": + return net.ResolveIPAddr(network, address) + case "tcp", "tcp4", "tcp6": + return net.ResolveTCPAddr(network, address) + case "udp", "udp4", "udp6": + return net.ResolveUDPAddr(network, address) + case "unix", "unixgram", "unixpacket": + return net.ResolveUnixAddr(network, address) + } +} diff --git a/vendor/github.com/libp2p/go-reuseport/codecov.yml b/vendor/github.com/libp2p/go-reuseport/codecov.yml new file mode 100644 index 000000000..5f88a9ea2 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/libp2p/go-reuseport/control_plan9.go b/vendor/github.com/libp2p/go-reuseport/control_plan9.go new file mode 100644 index 000000000..a8f7f3456 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/control_plan9.go @@ -0,0 +1,9 @@ +package reuseport + +import ( + "syscall" +) + +func Control(network, address string, c syscall.RawConn) error { + return nil +} diff --git a/vendor/github.com/libp2p/go-reuseport/control_unix.go b/vendor/github.com/libp2p/go-reuseport/control_unix.go new file mode 100644 index 000000000..4197d1f74 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/control_unix.go @@ -0,0 +1,23 @@ +//go:build !plan9 && !windows && !wasm + +package reuseport + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func Control(network, address string, c syscall.RawConn) (err error) { + controlErr := c.Control(func(fd uintptr) { + err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1) + if err != nil { + return + } + err = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) + if controlErr != nil { + err = controlErr + } + return +} diff --git a/vendor/github.com/libp2p/go-reuseport/control_wasm.go b/vendor/github.com/libp2p/go-reuseport/control_wasm.go new file mode 100644 index 000000000..8b22fade5 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/control_wasm.go @@ -0,0 +1,11 @@ +//go:build wasm + +package reuseport + +import ( + "syscall" +) + +func Control(network, address string, c syscall.RawConn) error { + return nil +} diff --git a/vendor/github.com/libp2p/go-reuseport/control_windows.go b/vendor/github.com/libp2p/go-reuseport/control_windows.go new file mode 100644 index 000000000..c45e43f4b --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/control_windows.go @@ -0,0 +1,17 @@ +package reuseport + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +func Control(network, address string, c syscall.RawConn) (err error) { + controlErr := c.Control(func(fd uintptr) { + err = windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_REUSEADDR, 1) + }) + if controlErr != nil { + err = controlErr + } + return +} diff --git a/vendor/github.com/libp2p/go-reuseport/interface.go b/vendor/github.com/libp2p/go-reuseport/interface.go new file mode 100644 index 000000000..b864da8c5 --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/interface.go @@ -0,0 +1,72 @@ +// Package reuseport provides Listen and Dial functions that set socket +// options in order to be able to reuse ports. You should only use this +// package if you know what SO_REUSEADDR and SO_REUSEPORT are. +// +// For example: +// +// // listen on the same port. oh yeah. +// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234") +// l2, _ := reuse.Listen("tcp", "127.0.0.1:1234") +// +// // dial from the same port. oh yeah. +// l1, _ := reuse.Listen("tcp", "127.0.0.1:1234") +// l2, _ := reuse.Listen("tcp", "127.0.0.1:1235") +// c, _ := reuse.Dial("tcp", "127.0.0.1:1234", "127.0.0.1:1235") +// +// Note: cant dial self because tcp/ip stacks use 4-tuples to identify connections, +// and doing so would clash. +package reuseport + +import ( + "context" + "fmt" + "net" + "time" +) + +// Available returns whether or not SO_REUSEPORT or equivalent behaviour is +// available in the OS. +func Available() bool { + return true +} + +var listenConfig = net.ListenConfig{ + Control: Control, +} + +// Listen listens at the given network and address. see net.Listen +// Returns a net.Listener created from a file discriptor for a socket +// with SO_REUSEPORT and SO_REUSEADDR option set. +func Listen(network, address string) (net.Listener, error) { + return listenConfig.Listen(context.Background(), network, address) +} + +// ListenPacket listens at the given network and address. see net.ListenPacket +// Returns a net.Listener created from a file discriptor for a socket +// with SO_REUSEPORT and SO_REUSEADDR option set. +func ListenPacket(network, address string) (net.PacketConn, error) { + return listenConfig.ListenPacket(context.Background(), network, address) +} + +// Dial dials the given network and address. see net.Dial +// Returns a net.Conn created from a file descriptor for a socket +// with SO_REUSEPORT and SO_REUSEADDR option set. +func Dial(network, laddr, raddr string) (net.Conn, error) { + return DialTimeout(network, laddr, raddr, time.Duration(0)) +} + +// Dial dials the given network and address, with the given timeout. see +// net.DialTimeout Returns a net.Conn created from a file descriptor for +// a socket with SO_REUSEPORT and SO_REUSEADDR option set. +func DialTimeout(network, laddr, raddr string, timeout time.Duration) (net.Conn, error) { + nla, err := ResolveAddr(network, laddr) + if err != nil { + return nil, fmt.Errorf("failed to resolve local addr: %w", err) + } + d := net.Dialer{ + Control: Control, + LocalAddr: nla, + Timeout: timeout, + } + return d.Dial(network, raddr) +} diff --git a/vendor/github.com/libp2p/go-reuseport/version.json b/vendor/github.com/libp2p/go-reuseport/version.json new file mode 100644 index 000000000..a654d65ab --- /dev/null +++ b/vendor/github.com/libp2p/go-reuseport/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.3.0" +} diff --git a/vendor/github.com/mdlayher/ethernet/.travis.yml b/vendor/github.com/mdlayher/ethernet/.travis.yml new file mode 100644 index 000000000..cc215992a --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.x +os: + - linux +before_install: + - go get golang.org/x/lint/golint + - go get honnef.co/go/tools/cmd/staticcheck + - go get -d ./... +script: + - go build -tags=gofuzz ./... + - go vet ./... + - staticcheck ./... + - golint -set_exit_status ./... + - go test -v -race ./... diff --git a/vendor/github.com/mdlayher/ethernet/LICENSE.md b/vendor/github.com/mdlayher/ethernet/LICENSE.md new file mode 100644 index 000000000..75ed9de17 --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/LICENSE.md @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (C) 2015 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mdlayher/ethernet/README.md b/vendor/github.com/mdlayher/ethernet/README.md new file mode 100644 index 000000000..ec6f4fe4b --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/README.md @@ -0,0 +1,8 @@ +ethernet [![Build Status](https://travis-ci.org/mdlayher/ethernet.svg?branch=master)](https://travis-ci.org/mdlayher/ethernet) [![GoDoc](https://godoc.org/github.com/mdlayher/ethernet?status.svg)](https://godoc.org/github.com/mdlayher/ethernet) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/ethernet)](https://goreportcard.com/report/github.com/mdlayher/ethernet) +======== + +Package `ethernet` implements marshaling and unmarshaling of IEEE 802.3 +Ethernet II frames and IEEE 802.1Q VLAN tags. MIT Licensed. + +For more information about using Ethernet frames in Go, check out my blog +post: [Network Protocol Breakdown: Ethernet and Go](https://medium.com/@mdlayher/network-protocol-breakdown-ethernet-and-go-de985d726cc1). \ No newline at end of file diff --git a/vendor/github.com/mdlayher/ethernet/ethernet.go b/vendor/github.com/mdlayher/ethernet/ethernet.go new file mode 100644 index 000000000..a462413f3 --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/ethernet.go @@ -0,0 +1,297 @@ +// Package ethernet implements marshaling and unmarshaling of IEEE 802.3 +// Ethernet II frames and IEEE 802.1Q VLAN tags. +package ethernet + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "net" +) + +//go:generate stringer -output=string.go -type=EtherType + +const ( + // minPayload is the minimum payload size for an Ethernet frame, assuming + // that no 802.1Q VLAN tags are present. + minPayload = 46 +) + +// Broadcast is a special hardware address which indicates a Frame should +// be sent to every device on a given LAN segment. +var Broadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// ErrInvalidFCS is returned when Frame.UnmarshalFCS detects an incorrect +// Ethernet frame check sequence in a byte slice for a Frame. +var ErrInvalidFCS = errors.New("invalid frame check sequence") + +// An EtherType is a value used to identify an upper layer protocol +// encapsulated in a Frame. +// +// A list of IANA-assigned EtherType values may be found here: +// http://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml. +type EtherType uint16 + +// Common EtherType values frequently used in a Frame. +const ( + EtherTypeIPv4 EtherType = 0x0800 + EtherTypeARP EtherType = 0x0806 + EtherTypeIPv6 EtherType = 0x86DD + + // EtherTypeVLAN and EtherTypeServiceVLAN are used as 802.1Q Tag Protocol + // Identifiers (TPIDs). + EtherTypeVLAN EtherType = 0x8100 + EtherTypeServiceVLAN EtherType = 0x88a8 +) + +// A Frame is an IEEE 802.3 Ethernet II frame. A Frame contains information +// such as source and destination hardware addresses, zero or more optional +// 802.1Q VLAN tags, an EtherType, and payload data. +type Frame struct { + // Destination specifies the destination hardware address for this Frame. + // + // If this address is set to Broadcast, the Frame will be sent to every + // device on a given LAN segment. + Destination net.HardwareAddr + + // Source specifies the source hardware address for this Frame. + // + // Typically, this is the hardware address of the network interface used to + // send this Frame. + Source net.HardwareAddr + + // ServiceVLAN specifies an optional 802.1Q service VLAN tag, for use with + // 802.1ad double tagging, or "Q-in-Q". If ServiceVLAN is not nil, VLAN must + // not be nil as well. + // + // Most users should leave this field set to nil and use VLAN instead. + ServiceVLAN *VLAN + + // VLAN specifies an optional 802.1Q customer VLAN tag, which may or may + // not be present in a Frame. It is important to note that the operating + // system may automatically strip VLAN tags before they can be parsed. + VLAN *VLAN + + // EtherType is a value used to identify an upper layer protocol + // encapsulated in this Frame. + EtherType EtherType + + // Payload is a variable length data payload encapsulated by this Frame. + Payload []byte +} + +// MarshalBinary allocates a byte slice and marshals a Frame into binary form. +func (f *Frame) MarshalBinary() ([]byte, error) { + b := make([]byte, f.length()) + _, err := f.read(b) + return b, err +} + +// MarshalFCS allocates a byte slice, marshals a Frame into binary form, and +// finally calculates and places a 4-byte IEEE CRC32 frame check sequence at +// the end of the slice. +// +// Most users should use MarshalBinary instead. MarshalFCS is provided as a +// convenience for rare occasions when the operating system cannot +// automatically generate a frame check sequence for an Ethernet frame. +func (f *Frame) MarshalFCS() ([]byte, error) { + // Frame length with 4 extra bytes for frame check sequence + b := make([]byte, f.length()+4) + if _, err := f.read(b); err != nil { + return nil, err + } + + // Compute IEEE CRC32 checksum of frame bytes and place it directly + // in the last four bytes of the slice + binary.BigEndian.PutUint32(b[len(b)-4:], crc32.ChecksumIEEE(b[0:len(b)-4])) + return b, nil +} + +// read reads data from a Frame into b. read is used to marshal a Frame +// into binary form, but does not allocate on its own. +func (f *Frame) read(b []byte) (int, error) { + // S-VLAN must also have accompanying C-VLAN. + if f.ServiceVLAN != nil && f.VLAN == nil { + return 0, ErrInvalidVLAN + } + + copy(b[0:6], f.Destination) + copy(b[6:12], f.Source) + + // Marshal each non-nil VLAN tag into bytes, inserting the appropriate + // EtherType/TPID before each, so devices know that one or more VLANs + // are present. + vlans := []struct { + vlan *VLAN + tpid EtherType + }{ + {vlan: f.ServiceVLAN, tpid: EtherTypeServiceVLAN}, + {vlan: f.VLAN, tpid: EtherTypeVLAN}, + } + + n := 12 + for _, vt := range vlans { + if vt.vlan == nil { + continue + } + + // Add VLAN EtherType and VLAN bytes. + binary.BigEndian.PutUint16(b[n:n+2], uint16(vt.tpid)) + if _, err := vt.vlan.read(b[n+2 : n+4]); err != nil { + return 0, err + } + n += 4 + } + + // Marshal actual EtherType after any VLANs, copy payload into + // output bytes. + binary.BigEndian.PutUint16(b[n:n+2], uint16(f.EtherType)) + copy(b[n+2:], f.Payload) + + return len(b), nil +} + +// UnmarshalBinary unmarshals a byte slice into a Frame. +func (f *Frame) UnmarshalBinary(b []byte) error { + // Verify that both hardware addresses and a single EtherType are present + if len(b) < 14 { + return io.ErrUnexpectedEOF + } + + // Track offset in packet for reading data + n := 14 + + // Continue looping and parsing VLAN tags until no more VLAN EtherType + // values are detected + et := EtherType(binary.BigEndian.Uint16(b[n-2 : n])) + switch et { + case EtherTypeServiceVLAN, EtherTypeVLAN: + // VLAN type is hinted for further parsing. An index is returned which + // indicates how many bytes were consumed by VLAN tags. + nn, err := f.unmarshalVLANs(et, b[n:]) + if err != nil { + return err + } + + n += nn + default: + // No VLANs detected. + f.EtherType = et + } + + // Allocate single byte slice to store destination and source hardware + // addresses, and payload + bb := make([]byte, 6+6+len(b[n:])) + copy(bb[0:6], b[0:6]) + f.Destination = bb[0:6] + copy(bb[6:12], b[6:12]) + f.Source = bb[6:12] + + // There used to be a minimum payload length restriction here, but as + // long as two hardware addresses and an EtherType are present, it + // doesn't really matter what is contained in the payload. We will + // follow the "robustness principle". + copy(bb[12:], b[n:]) + f.Payload = bb[12:] + + return nil +} + +// UnmarshalFCS computes the IEEE CRC32 frame check sequence of a Frame, +// verifies it against the checksum present in the byte slice, and finally, +// unmarshals a byte slice into a Frame. +// +// Most users should use UnmarshalBinary instead. UnmarshalFCS is provided as +// a convenience for rare occasions when the operating system cannot +// automatically verify a frame check sequence for an Ethernet frame. +func (f *Frame) UnmarshalFCS(b []byte) error { + // Must contain enough data for FCS, to avoid panics + if len(b) < 4 { + return io.ErrUnexpectedEOF + } + + // Verify checksum in slice versus newly computed checksum + want := binary.BigEndian.Uint32(b[len(b)-4:]) + got := crc32.ChecksumIEEE(b[0 : len(b)-4]) + if want != got { + return ErrInvalidFCS + } + + return f.UnmarshalBinary(b[0 : len(b)-4]) +} + +// length calculates the number of bytes required to store a Frame. +func (f *Frame) length() int { + // If payload is less than the required minimum length, we zero-pad up to + // the required minimum length + pl := len(f.Payload) + if pl < minPayload { + pl = minPayload + } + + // Add additional length if VLAN tags are needed. + var vlanLen int + switch { + case f.ServiceVLAN != nil && f.VLAN != nil: + vlanLen = 8 + case f.VLAN != nil: + vlanLen = 4 + } + + // 6 bytes: destination hardware address + // 6 bytes: source hardware address + // N bytes: VLAN tags (if present) + // 2 bytes: EtherType + // N bytes: payload length (may be padded) + return 6 + 6 + vlanLen + 2 + pl +} + +// unmarshalVLANs unmarshals S/C-VLAN tags. It is assumed that tpid +// is a valid S/C-VLAN TPID. +func (f *Frame) unmarshalVLANs(tpid EtherType, b []byte) (int, error) { + // 4 or more bytes must remain for valid S/C-VLAN tag and EtherType. + if len(b) < 4 { + return 0, io.ErrUnexpectedEOF + } + + // Track how many bytes are consumed by VLAN tags. + var n int + + switch tpid { + case EtherTypeServiceVLAN: + vlan := new(VLAN) + if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil { + return 0, err + } + f.ServiceVLAN = vlan + + // Assume that a C-VLAN immediately trails an S-VLAN. + if EtherType(binary.BigEndian.Uint16(b[n+2:n+4])) != EtherTypeVLAN { + return 0, ErrInvalidVLAN + } + + // 4 or more bytes must remain for valid C-VLAN tag and EtherType. + n += 4 + if len(b[n:]) < 4 { + return 0, io.ErrUnexpectedEOF + } + + // Continue to parse the C-VLAN. + fallthrough + case EtherTypeVLAN: + vlan := new(VLAN) + if err := vlan.UnmarshalBinary(b[n : n+2]); err != nil { + return 0, err + } + + f.VLAN = vlan + f.EtherType = EtherType(binary.BigEndian.Uint16(b[n+2 : n+4])) + n += 4 + default: + panic(fmt.Sprintf("unknown VLAN TPID: %04x", tpid)) + } + + return n, nil +} diff --git a/vendor/github.com/mdlayher/ethernet/fuzz.go b/vendor/github.com/mdlayher/ethernet/fuzz.go new file mode 100644 index 000000000..2698b3bd9 --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/fuzz.go @@ -0,0 +1,25 @@ +//go:build gofuzz +// +build gofuzz + +package ethernet + +func Fuzz(data []byte) int { + f := new(Frame) + if err := f.UnmarshalBinary(data); err != nil { + return 0 + } + + if _, err := f.MarshalBinary(); err != nil { + panic(err) + } + + if err := f.UnmarshalFCS(data); err != nil { + return 0 + } + + if _, err := f.MarshalFCS(); err != nil { + panic(err) + } + + return 1 +} diff --git a/vendor/github.com/mdlayher/ethernet/string.go b/vendor/github.com/mdlayher/ethernet/string.go new file mode 100644 index 000000000..89a3e010a --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -output=string.go -type=EtherType"; DO NOT EDIT. + +package ethernet + +import "fmt" + +const ( + _EtherType_name_0 = "EtherTypeIPv4" + _EtherType_name_1 = "EtherTypeARP" + _EtherType_name_2 = "EtherTypeVLAN" + _EtherType_name_3 = "EtherTypeIPv6" + _EtherType_name_4 = "EtherTypeServiceVLAN" +) + +var ( + _EtherType_index_0 = [...]uint8{0, 13} + _EtherType_index_1 = [...]uint8{0, 12} + _EtherType_index_2 = [...]uint8{0, 13} + _EtherType_index_3 = [...]uint8{0, 13} + _EtherType_index_4 = [...]uint8{0, 20} +) + +func (i EtherType) String() string { + switch { + case i == 2048: + return _EtherType_name_0 + case i == 2054: + return _EtherType_name_1 + case i == 33024: + return _EtherType_name_2 + case i == 34525: + return _EtherType_name_3 + case i == 34984: + return _EtherType_name_4 + default: + return fmt.Sprintf("EtherType(%d)", i) + } +} diff --git a/vendor/github.com/mdlayher/ethernet/vlan.go b/vendor/github.com/mdlayher/ethernet/vlan.go new file mode 100644 index 000000000..87b7ca2da --- /dev/null +++ b/vendor/github.com/mdlayher/ethernet/vlan.go @@ -0,0 +1,127 @@ +package ethernet + +import ( + "encoding/binary" + "errors" + "io" +) + +const ( + // VLANNone is a special VLAN ID which indicates that no VLAN is being + // used in a Frame. In this case, the VLAN's other fields may be used + // to indicate a Frame's priority. + VLANNone = 0x000 + + // VLANMax is a reserved VLAN ID which may indicate a wildcard in some + // management systems, but may not be configured or transmitted in a + // VLAN tag. + VLANMax = 0xfff +) + +// ErrInvalidVLAN is returned when a VLAN tag is invalid due to one of the +// following reasons: +// - Priority of greater than 7 is detected +// - ID of greater than 4094 (0xffe) is detected +// - A customer VLAN does not follow a service VLAN (when using Q-in-Q) +var ErrInvalidVLAN = errors.New("invalid VLAN") + +// Priority is an IEEE P802.1p priority level. Priority can be any value from +// 0 to 7. +// +// It is important to note that priority 1 (PriorityBackground) actually has +// a lower priority than 0 (PriorityBestEffort). All other Priority constants +// indicate higher priority as the integer values increase. +type Priority uint8 + +// IEEE P802.1p recommended priority levels. Note that PriorityBackground has +// a lower priority than PriorityBestEffort. +const ( + PriorityBackground Priority = 1 + PriorityBestEffort Priority = 0 + PriorityExcellentEffort Priority = 2 + PriorityCriticalApplications Priority = 3 + PriorityVideo Priority = 4 + PriorityVoice Priority = 5 + PriorityInternetworkControl Priority = 6 + PriorityNetworkControl Priority = 7 +) + +// A VLAN is an IEEE 802.1Q Virtual LAN (VLAN) tag. A VLAN contains +// information regarding traffic priority and a VLAN identifier for +// a given Frame. +type VLAN struct { + // Priority specifies a IEEE P802.1p priority level. Priority can be any + // value from 0 to 7. + Priority Priority + + // DropEligible indicates if a Frame is eligible to be dropped in the + // presence of network congestion. + DropEligible bool + + // ID specifies the VLAN ID for a Frame. ID can be any value from 0 to + // 4094 (0x000 to 0xffe), allowing up to 4094 VLANs. + // + // If ID is 0 (0x000, VLANNone), no VLAN is specified, and the other fields + // simply indicate a Frame's priority. + ID uint16 +} + +// MarshalBinary allocates a byte slice and marshals a VLAN into binary form. +func (v *VLAN) MarshalBinary() ([]byte, error) { + b := make([]byte, 2) + _, err := v.read(b) + return b, err +} + +// read reads data from a VLAN into b. read is used to marshal a VLAN into +// binary form, but does not allocate on its own. +func (v *VLAN) read(b []byte) (int, error) { + // Check for VLAN priority in valid range + if v.Priority > PriorityNetworkControl { + return 0, ErrInvalidVLAN + } + + // Check for VLAN ID in valid range + if v.ID >= VLANMax { + return 0, ErrInvalidVLAN + } + + // 3 bits: priority + ub := uint16(v.Priority) << 13 + + // 1 bit: drop eligible + var drop uint16 + if v.DropEligible { + drop = 1 + } + ub |= drop << 12 + + // 12 bits: VLAN ID + ub |= v.ID + + binary.BigEndian.PutUint16(b, ub) + return 2, nil +} + +// UnmarshalBinary unmarshals a byte slice into a VLAN. +func (v *VLAN) UnmarshalBinary(b []byte) error { + // VLAN tag is always 2 bytes + if len(b) != 2 { + return io.ErrUnexpectedEOF + } + + // 3 bits: priority + // 1 bit : drop eligible + // 12 bits: VLAN ID + ub := binary.BigEndian.Uint16(b[0:2]) + v.Priority = Priority(uint8(ub >> 13)) + v.DropEligible = ub&0x1000 != 0 + v.ID = ub & 0x0fff + + // Check for VLAN ID in valid range + if v.ID >= VLANMax { + return ErrInvalidVLAN + } + + return nil +} diff --git a/vendor/github.com/minio/md5-simd/LICENSE b/vendor/github.com/minio/md5-simd/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/md5-simd/LICENSE.Golang b/vendor/github.com/minio/md5-simd/LICENSE.Golang new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/minio/md5-simd/LICENSE.Golang @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/minio/md5-simd/README.md b/vendor/github.com/minio/md5-simd/README.md new file mode 100644 index 000000000..fa6fce1a4 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/README.md @@ -0,0 +1,198 @@ + +# md5-simd + +This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core. + +It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512. + +`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load. + +It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum. +Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core, +thereby making more efficient usage of the computing resources. + +## Usage + +[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc) + + +In order to use `md5-simd`, you must first create an `Server` which can be +used to instantiate one or more objects for MD5 hashing. + +These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface +and as such the normal Write/Reset/Sum functionality works as expected. + +As an example: +``` + // Create server + server := md5simd.NewServer() + defer server.Close() + + // Create hashing object (conforming to hash.Hash) + md5Hash := server.NewHash() + defer md5Hash.Close() + + // Write one (or more) blocks + md5Hash.Write(block) + + // Return digest + digest := md5Hash.Sum([]byte{}) +``` + +To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server) +and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should +be closed using the `Close()` function when no longer needed. + +A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality. + +In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing. + +## Limitations + +As explained above `md5-simd` does not speed up an individual MD5 hash sum computation, +unless some hierarchical tree construct is used but this will result in different outcomes. +Running a single hash on a server results in approximately half the throughput. + +Instead, it allows running multiple MD5 calculations in parallel on a single CPU core. +This can be beneficial in e.g. multi-threaded server applications where many go-routines +are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core. + +This will result in a lower overall CPU usage as compared to using the standard `crypto/md5` +functionality where each MD5 hash computation will consume a single thread (core). + +It is best to test and measure the overall CPU usage in a representative usage scenario in your application +to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load. + +Also note that `md5-simd` is best meant to work with large objects, +so if your application only hashes small objects of a few kilobytes +you may be better of by using `crypto/md5`. + +## Performance + +For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB. +To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize) +can be inserted if you are unsure of the sizes of the writes. +Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash. + +A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2). +In situations where it is likely that more than 16 streams are fully loaded it may be beneficial +to use multiple servers. + +The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code: + +![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png) + +Compared to `crypto/md5`, the AVX2 version is up to 4x faster: + +``` +$ benchcmp crypto-md5.txt avx2.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x +BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x +BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x +BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x +BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x +BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x +BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x +BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x +BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x +``` + +Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes): + +``` +$ benchcmp crypto-md5.txt avx512.txt +benchmark old MB/s new MB/s speedup +BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x +BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x +BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x +BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x +BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x +BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x +BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x +BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x +BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x +``` + +These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz. + +If only one or two inputs are available the scalar calculation method will be used for the +optimal speed in these cases. + +## Operation + +To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows: + +![server-architecture](chart/server-architecture.png) + +The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results. + +Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written. + +![server-lanes-example](chart/server-lanes-example.png) + +In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance. + +For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes. +So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes. + + +## Design & Tech + +md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions: +``` +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, ptrs *int64, mask uint64, n int) +``` + +The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes. + +The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications. + +### Caching in upper ZMM registers + +The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`. + +Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function. + +### Direct loading using 64-bit pointers + +The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices. + +As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code. + +For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero). + +Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS). + +### Masking support + +Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers. +For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations. + +### Minor optimizations + +The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction. + +Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up. + +## Low level block function performance + +The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison. + +``` +BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op +BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op +BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op +``` + +## License + +`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file diff --git a/vendor/github.com/minio/md5-simd/block16_amd64.s b/vendor/github.com/minio/md5-simd/block16_amd64.s new file mode 100644 index 000000000..be0a43a3b --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block16_amd64.s @@ -0,0 +1,228 @@ +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +//+build !noasm,!appengine,gc + +// This is the AVX512 implementation of the MD5 block function (16-way parallel) + +#define prep(index) \ + KMOVQ kmask, ktmp \ + VPGATHERDD index*4(base)(ptrs*1), ktmp, mem + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1noload(a, b, c, d, const, shift) \ + VPXORQ c, tmp, tmp \ + VPADDD 64*const(consts), a, a \ + VPADDD mem, a, a \ + VPTERNLOGD $0x6C, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VANDNPD c, tmp, tmp \ + VPTERNLOGD $0xEC, b, tmp, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + VPROLD $shift, a, a \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x96, b, d, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, zreg, const, shift) \ + VPADDD 64*const(consts), a, a \ + VPADDD zreg, a, a \ + VPTERNLOGD $0x36, b, c, tmp \ + VPADDD tmp, a, a \ + VPROLD $shift, a, a \ + VPXORQ c, ones, tmp \ + VPADDD b, a, a + +TEXT ·block16(SB), 4, $0-40 + + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ ptrs+16(FP), AX + KMOVQ mask+24(FP), K1 + MOVQ n+32(FP), DX + MOVQ ·avx512md5consts+0(SB), DI + +#define a Z0 +#define b Z1 +#define c Z2 +#define d Z3 + +#define sa Z4 +#define sb Z5 +#define sc Z6 +#define sd Z7 + +#define tmp Z8 +#define tmp2 Z9 +#define ptrs Z10 +#define ones Z12 +#define mem Z15 + +#define kmask K1 +#define ktmp K3 + +// ---------------------------------------------------------- +// Registers Z16 through to Z31 are used for caching purposes +// ---------------------------------------------------------- + +#define dig BX +#define count DX +#define base SI +#define consts DI + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 0x40(dig), b + VMOVUPD 0x80(dig), c + VMOVUPD 0xc0(dig), d + + // load source pointers + VMOVUPD 0x00(AX), ptrs + + MOVQ $-1, AX + VPBROADCASTQ AX, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + VMOVAPD mem, Z16 + + ROUND1(a,b,c,d, 1,0x00, 7) + VMOVAPD mem, Z17 + ROUND1(d,a,b,c, 2,0x01,12) + VMOVAPD mem, Z18 + ROUND1(c,d,a,b, 3,0x02,17) + VMOVAPD mem, Z19 + ROUND1(b,c,d,a, 4,0x03,22) + VMOVAPD mem, Z20 + ROUND1(a,b,c,d, 5,0x04, 7) + VMOVAPD mem, Z21 + ROUND1(d,a,b,c, 6,0x05,12) + VMOVAPD mem, Z22 + ROUND1(c,d,a,b, 7,0x06,17) + VMOVAPD mem, Z23 + ROUND1(b,c,d,a, 8,0x07,22) + VMOVAPD mem, Z24 + ROUND1(a,b,c,d, 9,0x08, 7) + VMOVAPD mem, Z25 + ROUND1(d,a,b,c,10,0x09,12) + VMOVAPD mem, Z26 + ROUND1(c,d,a,b,11,0x0a,17) + VMOVAPD mem, Z27 + ROUND1(b,c,d,a,12,0x0b,22) + VMOVAPD mem, Z28 + ROUND1(a,b,c,d,13,0x0c, 7) + VMOVAPD mem, Z29 + ROUND1(d,a,b,c,14,0x0d,12) + VMOVAPD mem, Z30 + ROUND1(c,d,a,b,15,0x0e,17) + VMOVAPD mem, Z31 + + ROUND1noload(b,c,d,a, 0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, Z17,0x10, 5) + ROUND2(d,a,b,c, Z22,0x11, 9) + ROUND2(c,d,a,b, Z27,0x12,14) + ROUND2(b,c,d,a, Z16,0x13,20) + ROUND2(a,b,c,d, Z21,0x14, 5) + ROUND2(d,a,b,c, Z26,0x15, 9) + ROUND2(c,d,a,b, Z31,0x16,14) + ROUND2(b,c,d,a, Z20,0x17,20) + ROUND2(a,b,c,d, Z25,0x18, 5) + ROUND2(d,a,b,c, Z30,0x19, 9) + ROUND2(c,d,a,b, Z19,0x1a,14) + ROUND2(b,c,d,a, Z24,0x1b,20) + ROUND2(a,b,c,d, Z29,0x1c, 5) + ROUND2(d,a,b,c, Z18,0x1d, 9) + ROUND2(c,d,a,b, Z23,0x1e,14) + ROUND2(b,c,d,a, Z28,0x1f,20) + + VMOVAPD c, tmp + + ROUND3(a,b,c,d, Z21,0x20, 4) + ROUND3(d,a,b,c, Z24,0x21,11) + ROUND3(c,d,a,b, Z27,0x22,16) + ROUND3(b,c,d,a, Z30,0x23,23) + ROUND3(a,b,c,d, Z17,0x24, 4) + ROUND3(d,a,b,c, Z20,0x25,11) + ROUND3(c,d,a,b, Z23,0x26,16) + ROUND3(b,c,d,a, Z26,0x27,23) + ROUND3(a,b,c,d, Z29,0x28, 4) + ROUND3(d,a,b,c, Z16,0x29,11) + ROUND3(c,d,a,b, Z19,0x2a,16) + ROUND3(b,c,d,a, Z22,0x2b,23) + ROUND3(a,b,c,d, Z25,0x2c, 4) + ROUND3(d,a,b,c, Z28,0x2d,11) + ROUND3(c,d,a,b, Z31,0x2e,16) + ROUND3(b,c,d,a, Z18,0x2f,23) + + VPXORQ d, ones, tmp + + ROUND4(a,b,c,d, Z16,0x30, 6) + ROUND4(d,a,b,c, Z23,0x31,10) + ROUND4(c,d,a,b, Z30,0x32,15) + ROUND4(b,c,d,a, Z21,0x33,21) + ROUND4(a,b,c,d, Z28,0x34, 6) + ROUND4(d,a,b,c, Z19,0x35,10) + ROUND4(c,d,a,b, Z26,0x36,15) + ROUND4(b,c,d,a, Z17,0x37,21) + ROUND4(a,b,c,d, Z24,0x38, 6) + ROUND4(d,a,b,c, Z31,0x39,10) + ROUND4(c,d,a,b, Z22,0x3a,15) + ROUND4(b,c,d,a, Z29,0x3b,21) + ROUND4(a,b,c,d, Z20,0x3c, 6) + ROUND4(d,a,b,c, Z27,0x3d,10) + ROUND4(c,d,a,b, Z18,0x3e,15) + ROUND4(b,c,d,a, Z25,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 0x40(dig) + VMOVUPD c, 0x80(dig) + VMOVUPD d, 0xc0(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block8_amd64.s b/vendor/github.com/minio/md5-simd/block8_amd64.s new file mode 100644 index 000000000..f57db17aa --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block8_amd64.s @@ -0,0 +1,281 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2018 Igneous Systems +// MIT License +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// This is the AVX2 implementation of the MD5 block function (8-way parallel) + +// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int) +TEXT ·block8(SB), 4, $0-40 + MOVQ state+0(FP), BX + MOVQ base+8(FP), SI + MOVQ bufs+16(FP), AX + MOVQ cache+24(FP), CX + MOVQ n+32(FP), DX + MOVQ ·avx256md5consts+0(SB), DI + + // Align cache (which is stack allocated by the compiler) + // to a 256 bit boundary (ymm register alignment) + // The cache8 type is deliberately oversized to permit this. + ADDQ $31, CX + ANDB $-32, CL + +#define a Y0 +#define b Y1 +#define c Y2 +#define d Y3 + +#define sa Y4 +#define sb Y5 +#define sc Y6 +#define sd Y7 + +#define tmp Y8 +#define tmp2 Y9 + +#define mask Y10 +#define off Y11 + +#define ones Y12 + +#define rtmp1 Y13 +#define rtmp2 Y14 + +#define mem Y15 + +#define dig BX +#define cache CX +#define count DX +#define base SI +#define consts DI + +#define prepmask \ + VPXOR mask, mask, mask \ + VPCMPGTD mask, off, mask + +#define prep(index) \ + VMOVAPD mask, rtmp2 \ + VPGATHERDD rtmp2, index*4(base)(off*1), mem + +#define load(index) \ + VMOVAPD index*32(cache), mem + +#define store(index) \ + VMOVAPD mem, index*32(cache) + +#define roll(shift, a) \ + VPSLLD $shift, a, rtmp1 \ + VPSRLD $32-shift, a, a \ + VPOR rtmp1, a, a + +#define ROUND1(a, b, c, d, index, const, shift) \ + VPXOR c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + prep(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND1load(a, b, c, d, index, const, shift) \ + VXORPD c, tmp, tmp \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp, tmp \ + VPXOR d, tmp, tmp \ + load(index) \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD c, tmp \ + VPADDD b, a, a + +#define ROUND2(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPAND b, tmp2, tmp2 \ + VANDNPD c, tmp, tmp \ + load(index) \ + VPOR tmp, tmp2, tmp2 \ + VMOVAPD c, tmp \ + VPADDD tmp2, a, a \ + VMOVAPD c, tmp2 \ + roll(shift,a) \ + VPADDD b, a, a + +#define ROUND3(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + load(index) \ + VPXOR d, tmp, tmp \ + VPXOR b, tmp, tmp \ + VPADDD tmp, a, a \ + roll(shift,a) \ + VMOVAPD b, tmp \ + VPADDD b, a, a + +#define ROUND4(a, b, c, d, index, const, shift) \ + VPADDD 32*const(consts), a, a \ + VPADDD mem, a, a \ + VPOR b, tmp, tmp \ + VPXOR c, tmp, tmp \ + VPADDD tmp, a, a \ + load(index) \ + roll(shift,a) \ + VPXOR c, ones, tmp \ + VPADDD b, a, a + + // load digest into state registers + VMOVUPD (dig), a + VMOVUPD 32(dig), b + VMOVUPD 64(dig), c + VMOVUPD 96(dig), d + + // load source buffer offsets + VMOVUPD (AX), off + + prepmask + VPCMPEQD ones, ones, ones + +loop: + VMOVAPD a, sa + VMOVAPD b, sb + VMOVAPD c, sc + VMOVAPD d, sd + + prep(0) + VMOVAPD d, tmp + store(0) + + ROUND1(a,b,c,d, 1,0x00, 7) + store(1) + ROUND1(d,a,b,c, 2,0x01,12) + store(2) + ROUND1(c,d,a,b, 3,0x02,17) + store(3) + ROUND1(b,c,d,a, 4,0x03,22) + store(4) + ROUND1(a,b,c,d, 5,0x04, 7) + store(5) + ROUND1(d,a,b,c, 6,0x05,12) + store(6) + ROUND1(c,d,a,b, 7,0x06,17) + store(7) + ROUND1(b,c,d,a, 8,0x07,22) + store(8) + ROUND1(a,b,c,d, 9,0x08, 7) + store(9) + ROUND1(d,a,b,c,10,0x09,12) + store(10) + ROUND1(c,d,a,b,11,0x0a,17) + store(11) + ROUND1(b,c,d,a,12,0x0b,22) + store(12) + ROUND1(a,b,c,d,13,0x0c, 7) + store(13) + ROUND1(d,a,b,c,14,0x0d,12) + store(14) + ROUND1(c,d,a,b,15,0x0e,17) + store(15) + ROUND1load(b,c,d,a, 1,0x0f,22) + + VMOVAPD d, tmp + VMOVAPD d, tmp2 + + ROUND2(a,b,c,d, 6,0x10, 5) + ROUND2(d,a,b,c,11,0x11, 9) + ROUND2(c,d,a,b, 0,0x12,14) + ROUND2(b,c,d,a, 5,0x13,20) + ROUND2(a,b,c,d,10,0x14, 5) + ROUND2(d,a,b,c,15,0x15, 9) + ROUND2(c,d,a,b, 4,0x16,14) + ROUND2(b,c,d,a, 9,0x17,20) + ROUND2(a,b,c,d,14,0x18, 5) + ROUND2(d,a,b,c, 3,0x19, 9) + ROUND2(c,d,a,b, 8,0x1a,14) + ROUND2(b,c,d,a,13,0x1b,20) + ROUND2(a,b,c,d, 2,0x1c, 5) + ROUND2(d,a,b,c, 7,0x1d, 9) + ROUND2(c,d,a,b,12,0x1e,14) + ROUND2(b,c,d,a, 0,0x1f,20) + + load(5) + VMOVAPD c, tmp + + ROUND3(a,b,c,d, 8,0x20, 4) + ROUND3(d,a,b,c,11,0x21,11) + ROUND3(c,d,a,b,14,0x22,16) + ROUND3(b,c,d,a, 1,0x23,23) + ROUND3(a,b,c,d, 4,0x24, 4) + ROUND3(d,a,b,c, 7,0x25,11) + ROUND3(c,d,a,b,10,0x26,16) + ROUND3(b,c,d,a,13,0x27,23) + ROUND3(a,b,c,d, 0,0x28, 4) + ROUND3(d,a,b,c, 3,0x29,11) + ROUND3(c,d,a,b, 6,0x2a,16) + ROUND3(b,c,d,a, 9,0x2b,23) + ROUND3(a,b,c,d,12,0x2c, 4) + ROUND3(d,a,b,c,15,0x2d,11) + ROUND3(c,d,a,b, 2,0x2e,16) + ROUND3(b,c,d,a, 0,0x2f,23) + + load(0) + VPXOR d, ones, tmp + + ROUND4(a,b,c,d, 7,0x30, 6) + ROUND4(d,a,b,c,14,0x31,10) + ROUND4(c,d,a,b, 5,0x32,15) + ROUND4(b,c,d,a,12,0x33,21) + ROUND4(a,b,c,d, 3,0x34, 6) + ROUND4(d,a,b,c,10,0x35,10) + ROUND4(c,d,a,b, 1,0x36,15) + ROUND4(b,c,d,a, 8,0x37,21) + ROUND4(a,b,c,d,15,0x38, 6) + ROUND4(d,a,b,c, 6,0x39,10) + ROUND4(c,d,a,b,13,0x3a,15) + ROUND4(b,c,d,a, 4,0x3b,21) + ROUND4(a,b,c,d,11,0x3c, 6) + ROUND4(d,a,b,c, 2,0x3d,10) + ROUND4(c,d,a,b, 9,0x3e,15) + ROUND4(b,c,d,a, 0,0x3f,21) + + VPADDD sa, a, a + VPADDD sb, b, b + VPADDD sc, c, c + VPADDD sd, d, d + + LEAQ 64(base), base + SUBQ $64, count + JNE loop + + VMOVUPD a, (dig) + VMOVUPD b, 32(dig) + VMOVUPD c, 64(dig) + VMOVUPD d, 96(dig) + + VZEROUPPER + RET diff --git a/vendor/github.com/minio/md5-simd/block_amd64.go b/vendor/github.com/minio/md5-simd/block_amd64.go new file mode 100644 index 000000000..16edda268 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/block_amd64.go @@ -0,0 +1,210 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "fmt" + "math" + "unsafe" + + "github.com/klauspost/cpuid/v2" +) + +var hasAVX512 bool + +func init() { + // VANDNPD requires AVX512DQ. Technically it could be VPTERNLOGQ which is AVX512F. + hasAVX512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ) +} + +//go:noescape +func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) + +//go:noescape +func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int) + +// 8-way 4x uint32 digests in 4 ymm registers +// (ymm0, ymm1, ymm2, ymm3) +type digest8 struct { + v0, v1, v2, v3 [8]uint32 +} + +// Stack cache for 8x64 byte md5.BlockSize bytes. +// Must be 32-byte aligned, so allocate 512+32 and +// align upwards at runtime. +type cache8 [512 + 32]byte + +// MD5 magic numbers for one lane of hashing; inflated +// 8x below at init time. +var md5consts = [64]uint32{ + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, + 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, +} + +// inflate the consts 8-way for 8x md5 (256 bit ymm registers) +var avx256md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 8*len(c)) + for i := range c { + for j := 0; j < 8; j++ { + inf[(i*8)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// 16-way 4x uint32 digests in 4 zmm registers +type digest16 struct { + v0, v1, v2, v3 [16]uint32 +} + +// inflate the consts 16-way for 16x md5 (512 bit zmm registers) +var avx512md5consts = func(c []uint32) []uint32 { + inf := make([]uint32, 16*len(c)) + for i := range c { + for j := 0; j < 16; j++ { + inf[(i*16)+j] = c[i] + } + } + return inf +}(md5consts[:]) + +// Interface function to assembly code +func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) { + if hasAVX512 { + blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16) + return + } + + // Preparing data using copy is slower since copies aren't inlined. + + // Calculate on this goroutine + if half { + for i := range s.i8[0][:] { + s.i8[0][i] = input[i] + } + for i := range s.d8a.v0[:] { + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + } + blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a) + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + return + } + + for i := range s.i8[0][:] { + s.i8[0][i], s.i8[1][i] = input[i], input[8+i] + } + + for i := range s.d8a.v0[:] { + j := (i + 8) & 15 + s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] + s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j] + } + + // Benchmarks appears to be slightly faster when spinning up 2 goroutines instead + // of using the current for one of the blocks. + s.wg.Add(2) + go func() { blockMd5_avx2(&s.d8a, s.i8[0], s.allBufs, &s.maskRounds8a); s.wg.Done() }() + go func() { blockMd5_avx2(&s.d8b, s.i8[1], s.allBufs, &s.maskRounds8b); s.wg.Done() }() + s.wg.Wait() + for i := range s.d8a.v0[:] { + d.v0[i], d.v1[i], d.v2[i], d.v3[i] = s.d8a.v0[i], s.d8a.v1[i], s.d8a.v2[i], s.d8a.v3[i] + } + for i := range s.d8b.v0[:] { + j := (i + 8) & 15 + d.v0[j], d.v1[j], d.v2[j], d.v3[j] = s.d8b.v0[i], s.d8b.v1[i], s.d8b.v2[i], s.d8b.v3[i] + } +} + +// Interface function to AVX512 assembly code +func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) { + baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) + ptrs := [16]int32{} + + for i := range ptrs { + if len(input[i]) > 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds16(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + + block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< 0 { + if len(input[i]) > internalBlockSize { + panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) + } + + off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin + if off > math.MaxUint32 { + panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) + } + ptrs[i] = int32(off) + } + } + + sdup := *s // create copy of initial states to receive intermediate updates + + rounds := generateMaskAndRounds8(input, maskRounds) + + for r := 0; r < rounds; r++ { + m := maskRounds[r] + var cache cache8 // stack storage for block8 tmp state + block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds)) + + for j := 0; j < len(ptrs); j++ { + ptrs[j] += int32(64 * m.rounds) // update pointers for next round + if m.mask&(1< internalBlockSize { + l = internalBlockSize + } + nnn, err := d.write(p[:l]) + if err != nil { + return nn, err + } + nn += nnn + p = p[l:] + + if len(p) == 0 { + break + } + + } + return +} + +func (d *md5Digest) write(p []byte) (nn int, err error) { + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == BlockSize { + // Create a copy of the overflow buffer in order to send it async over the channel + // (since we will modify the overflow buffer down below with any access beyond multiples of 64) + tmp := <-d.buffers + tmp = tmp[:BlockSize] + copy(tmp, d.x[:]) + d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= BlockSize { + n := len(p) &^ (BlockSize - 1) + buf := <-d.buffers + buf = buf[:n] + copy(buf, p) + d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d *md5Digest) Close() { + if d.blocksCh != nil { + close(d.blocksCh) + d.blocksCh = nil + } +} + +var sumChPool sync.Pool + +func init() { + sumChPool.New = func() interface{} { + return make(chan sumResult, 1) + } +} + +// Sum - Return MD5 sum in bytes +func (d *md5Digest) Sum(in []byte) (result []byte) { + if d.blocksCh == nil { + panic("sum after close") + } + + trail := <-d.buffers + trail = append(trail[:0], d.x[:d.nx]...) + + length := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if length%64 < 56 { + trail = append(trail, tmp[0:56-length%64]...) + } else { + trail = append(trail, tmp[0:64+56-length%64]...) + } + + // Length in bits. + length <<= 3 + binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits + + trail = append(trail, tmp[0:8]...) + if len(trail)%BlockSize != 0 { + panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx)) + } + sumCh := sumChPool.Get().(chan sumResult) + d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true) + + sum := <-sumCh + sumChPool.Put(sumCh) + + return append(in, sum.digest[:]...) +} + +// sendBlock will send a block for processing. +// If cycle is true we will block on cycle, otherwise we will only block +// if the block channel is full. +func (d *md5Digest) sendBlock(bi blockInput, cycle bool) { + if cycle { + select { + case d.blocksCh <- bi: + d.cycleServer <- d.uid + } + return + } + // Only block on cycle if we filled the buffer + select { + case d.blocksCh <- bi: + return + default: + d.cycleServer <- d.uid + d.blocksCh <- bi + } +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/vendor/github.com/minio/md5-simd/md5-server_amd64.go new file mode 100644 index 000000000..94f741c54 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_amd64.go @@ -0,0 +1,397 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +import ( + "encoding/binary" + "fmt" + "runtime" + "sync" + + "github.com/klauspost/cpuid/v2" +) + +// MD5 initialization constants +const ( + // Lanes is the number of concurrently calculated hashes. + Lanes = 16 + + init0 = 0x67452301 + init1 = 0xefcdab89 + init2 = 0x98badcfe + init3 = 0x10325476 + + // Use scalar routine when below this many lanes + useScalarBelow = 3 +) + +// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialisation value of 0 +const md5ServerUID = Lanes + +const buffersPerLane = 3 + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + sumCh chan sumResult + reset bool +} + +type sumResult struct { + digest [Size]byte +} + +type lanesInfo [Lanes]blockInput + +// md5Server - Type to implement parallel handling of MD5 invocations +type md5Server struct { + uidCounter uint64 + cycle chan uint64 // client with uid has update. + newInput chan newClient // Add new client. + digests map[uint64][Size]byte // Map of uids to (interim) digest results + maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds + maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core) + maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core) + allBufs []byte // Preallocated buffer. + buffers chan []byte // Preallocated buffers, sliced from allBufs. + + i8 [2][8][]byte // avx2 temporary vars + d8a, d8b digest8 + wg sync.WaitGroup +} + +// NewServer - Create new object for parallel processing handling +func NewServer() Server { + if !cpuid.CPU.Supports(cpuid.AVX2) { + return &fallbackServer{} + } + md5srv := &md5Server{} + md5srv.digests = make(map[uint64][Size]byte) + md5srv.newInput = make(chan newClient, Lanes) + md5srv.cycle = make(chan uint64, Lanes*10) + md5srv.uidCounter = md5ServerUID - 1 + md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize) + md5srv.buffers = make(chan []byte, buffersPerLane*Lanes) + // Fill buffers. + for i := 0; i < buffersPerLane*Lanes; i++ { + s := 32 + i*internalBlockSize + md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize] + } + + // Start a single thread for reading from the input channel + go md5srv.process(md5srv.newInput) + return md5srv +} + +type newClient struct { + uid uint64 + input chan blockInput +} + +// process - Sole handler for reading from the input channel. +func (s *md5Server) process(newClients chan newClient) { + // To fill up as many lanes as possible: + // + // 1. Wait for a cycle id. + // 2. If not already in a lane, add, otherwise leave on channel + // 3. Start timer + // 4. Check if lanes is full, if so, goto 10 (process). + // 5. If timeout, goto 10. + // 6. Wait for new id (goto 2) or timeout (goto 10). + // 10. Process. + // 11. Check all input if there is already input, if so add to lanes. + // 12. Goto 1 + + // lanes contains the lanes. + var lanes lanesInfo + // lanesFilled contains the number of filled lanes for current cycle. + var lanesFilled int + // clients contains active clients + var clients = make(map[uint64]chan blockInput, Lanes) + + addToLane := func(uid uint64) { + cl, ok := clients[uid] + if !ok { + // Unknown client. Maybe it was already removed. + return + } + // Check if we already have it. + for _, lane := range lanes[:lanesFilled] { + if lane.uid == uid { + return + } + } + // Continue until we get a block or there is nothing on channel + for { + select { + case block, ok := <-cl: + if !ok { + // Client disconnected + delete(clients, block.uid) + return + } + if block.uid != uid { + panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid)) + } + // If reset message, reset and we're done + if block.reset { + delete(s.digests, uid) + continue + } + + // If requesting sum, we will need to maintain state. + if block.sumCh != nil { + var dig digest + d, ok := s.digests[uid] + if ok { + dig.s[0] = binary.LittleEndian.Uint32(d[0:4]) + dig.s[1] = binary.LittleEndian.Uint32(d[4:8]) + dig.s[2] = binary.LittleEndian.Uint32(d[8:12]) + dig.s[3] = binary.LittleEndian.Uint32(d[12:16]) + } else { + dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3 + } + + sum := sumResult{} + // Add end block to current digest. + blockScalar(&dig.s, block.msg) + + binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0]) + binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1]) + binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2]) + binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3]) + block.sumCh <- sum + if block.msg != nil { + s.buffers <- block.msg + } + continue + } + if len(block.msg) == 0 { + continue + } + lanes[lanesFilled] = block + lanesFilled++ + return + default: + return + } + } + } + addNewClient := func(cl newClient) { + if _, ok := clients[cl.uid]; ok { + panic("internal error: duplicate client registration") + } + clients[cl.uid] = cl.input + } + + allLanesFilled := func() bool { + return lanesFilled == Lanes || lanesFilled >= len(clients) + } + + for { + // Step 1. + for lanesFilled == 0 { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + // Check if it already sent a payload. + addToLane(cl.uid) + continue + case uid := <-s.cycle: + addToLane(uid) + } + } + + fillLanes: + for !allLanesFilled() { + select { + case cl, ok := <-newClients: + if !ok { + return + } + addNewClient(cl) + + case uid := <-s.cycle: + addToLane(uid) + default: + // Nothing more queued... + break fillLanes + } + } + + // If we did not fill all lanes, check if there is more waiting + if !allLanesFilled() { + runtime.Gosched() + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } + if false { + if !allLanesFilled() { + fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients)) + //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + } else if true { + fmt.Println("all lanes filled") + } + } + // Process the lanes we could collect + s.blocks(lanes[:lanesFilled]) + + // Clear lanes... + lanesFilled = 0 + // Add all current queued + for uid := range clients { + addToLane(uid) + if allLanesFilled() { + break + } + } + } +} + +func (s *md5Server) Close() { + if s.newInput != nil { + close(s.newInput) + s.newInput = nil + } +} + +// Invoke assembly and send results back +func (s *md5Server) blocks(lanes []blockInput) { + if len(lanes) < useScalarBelow { + // Use scalar routine when below this many lanes + switch len(lanes) { + case 0: + case 1: + lane := lanes[0] + var d digest + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) > 0 { + // Update... + blockScalar(&d.s, lane.msg) + } + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], d.s[0]) + binary.LittleEndian.PutUint32(dig[4:], d.s[1]) + binary.LittleEndian.PutUint32(dig[8:], d.s[2]) + binary.LittleEndian.PutUint32(dig[12:], d.s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[0] = blockInput{} + + default: + s.wg.Add(len(lanes)) + var results [useScalarBelow]digest + for i := range lanes { + lane := lanes[i] + go func(i int) { + var d digest + defer s.wg.Done() + a, ok := s.digests[lane.uid] + if ok { + d.s[0] = binary.LittleEndian.Uint32(a[0:4]) + d.s[1] = binary.LittleEndian.Uint32(a[4:8]) + d.s[2] = binary.LittleEndian.Uint32(a[8:12]) + d.s[3] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.s[0] = init0 + d.s[1] = init1 + d.s[2] = init2 + d.s[3] = init3 + } + if len(lane.msg) == 0 { + results[i] = d + return + } + // Update... + blockScalar(&d.s, lane.msg) + results[i] = d + }(i) + } + s.wg.Wait() + for i, lane := range lanes { + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], results[i].s[0]) + binary.LittleEndian.PutUint32(dig[4:], results[i].s[1]) + binary.LittleEndian.PutUint32(dig[8:], results[i].s[2]) + binary.LittleEndian.PutUint32(dig[12:], results[i].s[3]) + s.digests[lane.uid] = dig + + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } + } + return + } + + inputs := [16][]byte{} + for i := range lanes { + inputs[i] = lanes[i].msg + } + + // Collect active digests... + state := s.getDigests(lanes) + // Process all lanes... + s.blockMd5_x16(&state, inputs, len(lanes) <= 8) + + for i, lane := range lanes { + uid := lane.uid + dig := [Size]byte{} + binary.LittleEndian.PutUint32(dig[0:], state.v0[i]) + binary.LittleEndian.PutUint32(dig[4:], state.v1[i]) + binary.LittleEndian.PutUint32(dig[8:], state.v2[i]) + binary.LittleEndian.PutUint32(dig[12:], state.v3[i]) + + s.digests[uid] = dig + if lane.msg != nil { + s.buffers <- lane.msg + } + lanes[i] = blockInput{} + } +} + +func (s *md5Server) getDigests(lanes []blockInput) (d digest16) { + for i, lane := range lanes { + a, ok := s.digests[lane.uid] + if ok { + d.v0[i] = binary.LittleEndian.Uint32(a[0:4]) + d.v1[i] = binary.LittleEndian.Uint32(a[4:8]) + d.v2[i] = binary.LittleEndian.Uint32(a[8:12]) + d.v3[i] = binary.LittleEndian.Uint32(a[12:16]) + } else { + d.v0[i] = init0 + d.v1[i] = init1 + d.v2[i] = init2 + d.v3[i] = init3 + } + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/vendor/github.com/minio/md5-simd/md5-server_fallback.go new file mode 100644 index 000000000..7814dada3 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-server_fallback.go @@ -0,0 +1,12 @@ +//+build !amd64 appengine !gc noasm + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// NewServer - Create new object for parallel processing handling +func NewServer() *fallbackServer { + return &fallbackServer{} +} diff --git a/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/vendor/github.com/minio/md5-simd/md5-util_amd64.go new file mode 100644 index 000000000..73981b0eb --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5-util_amd64.go @@ -0,0 +1,85 @@ +//+build !noasm,!appengine,gc + +// Copyright (c) 2020 MinIO Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package md5simd + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type digest struct { + s [4]uint32 +} + +// Helper struct for generating number of rounds in combination with mask for valid lanes +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [8]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} + +func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) { + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input[:] { + sorted[c] = lane{uint(len(inpt)), uint(c)} + for i := c - 1; i >= 0; i-- { + // swap so largest is at the end... + if sorted[i].len > sorted[i+1].len { + sorted[i], sorted[i+1] = sorted[i+1], sorted[i] + continue + } + break + } + } + + // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks + m, round := uint64(0xffff), uint64(0) + + for _, s := range sorted[:] { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} + rounds++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + return +} diff --git a/vendor/github.com/minio/md5-simd/md5.go b/vendor/github.com/minio/md5-simd/md5.go new file mode 100644 index 000000000..11b0cb962 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5.go @@ -0,0 +1,63 @@ +package md5simd + +import ( + "crypto/md5" + "hash" + "sync" +) + +const ( + // The blocksize of MD5 in bytes. + BlockSize = 64 + + // The size of an MD5 checksum in bytes. + Size = 16 + + // internalBlockSize is the internal block size. + internalBlockSize = 32 << 10 +) + +type Server interface { + NewHash() Hasher + Close() +} + +type Hasher interface { + hash.Hash + Close() +} + +// StdlibHasher returns a Hasher that uses the stdlib for hashing. +// Used hashers are stored in a pool for fast reuse. +func StdlibHasher() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +// md5Wrapper is a wrapper around the builtin hasher. +type md5Wrapper struct { + hash.Hash +} + +var md5Pool = sync.Pool{New: func() interface{} { + return md5.New() +}} + +// fallbackServer - Fallback when no assembly is available. +type fallbackServer struct { +} + +// NewHash -- return regular Golang md5 hashing from crypto +func (s *fallbackServer) NewHash() Hasher { + return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} +} + +func (s *fallbackServer) Close() { +} + +func (m *md5Wrapper) Close() { + if m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + m.Hash = nil + } +} diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.go b/vendor/github.com/minio/md5-simd/md5block_amd64.go new file mode 100644 index 000000000..4c2793662 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.go @@ -0,0 +1,11 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +package md5simd + +// Encode p to digest +//go:noescape +func blockScalar(dig *[4]uint32, p []byte) diff --git a/vendor/github.com/minio/md5-simd/md5block_amd64.s b/vendor/github.com/minio/md5-simd/md5block_amd64.s new file mode 100644 index 000000000..fbc4a21f2 --- /dev/null +++ b/vendor/github.com/minio/md5-simd/md5block_amd64.s @@ -0,0 +1,714 @@ +// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT. + +// +build !appengine +// +build !noasm +// +build gc + +// func blockScalar(dig *[4]uint32, p []byte) +TEXT ·blockScalar(SB), $0-32 + MOVQ p_len+16(FP), AX + MOVQ dig+0(FP), CX + MOVQ p_base+8(FP), DX + SHRQ $0x06, AX + SHLQ $0x06, AX + LEAQ (DX)(AX*1), AX + CMPQ DX, AX + JEQ end + MOVL (CX), BX + MOVL 4(CX), BP + MOVL 8(CX), SI + MOVL 12(CX), CX + MOVL $0xffffffff, DI + +loop: + MOVL (DX), R8 + MOVL CX, R9 + MOVL BX, R10 + MOVL BP, R11 + MOVL SI, R12 + MOVL CX, R13 + + // ROUND1 + XORL SI, R9 + ADDL $0xd76aa478, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 4(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xe8c7b756, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 8(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0x242070db, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 12(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xc1bdceee, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 16(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0xf57c0faf, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 20(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x4787c62a, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 24(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa8304613, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 28(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0xfd469501, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 32(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x698098d8, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 36(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0x8b44f7af, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 40(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xffff5bb1, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 44(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x895cd7be, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 48(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + XORL SI, R9 + ADDL $0x6b901122, BX + ADDL R8, BX + ANDL BP, R9 + XORL CX, R9 + MOVL 52(DX), R8 + ADDL R9, BX + ROLL $0x07, BX + MOVL SI, R9 + ADDL BP, BX + XORL BP, R9 + ADDL $0xfd987193, CX + ADDL R8, CX + ANDL BX, R9 + XORL SI, R9 + MOVL 56(DX), R8 + ADDL R9, CX + ROLL $0x0c, CX + MOVL BP, R9 + ADDL BX, CX + XORL BX, R9 + ADDL $0xa679438e, SI + ADDL R8, SI + ANDL CX, R9 + XORL BP, R9 + MOVL 60(DX), R8 + ADDL R9, SI + ROLL $0x11, SI + MOVL BX, R9 + ADDL CX, SI + XORL CX, R9 + ADDL $0x49b40821, BP + ADDL R8, BP + ANDL SI, R9 + XORL BX, R9 + MOVL 4(DX), R8 + ADDL R9, BP + ROLL $0x16, BP + MOVL CX, R9 + ADDL SI, BP + + // ROUND2 + MOVL CX, R9 + MOVL CX, R14 + XORL DI, R9 + ADDL $0xf61e2562, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 24(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc040b340, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 44(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x265e5a51, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL (DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe9b6c7aa, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xd62f105d, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 40(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0x02441453, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 60(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xd8a1e681, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 16(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0xe7d3fbc8, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 36(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0x21e1cde6, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 56(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xc33707d6, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 12(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0xf4d50d87, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 32(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x455a14ed, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 52(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + XORL DI, R9 + ADDL $0xa9e3e905, BX + ADDL R8, BX + ANDL BP, R14 + ANDL SI, R9 + MOVL 8(DX), R8 + ORL R9, R14 + MOVL SI, R9 + ADDL R14, BX + MOVL SI, R14 + ROLL $0x05, BX + ADDL BP, BX + XORL DI, R9 + ADDL $0xfcefa3f8, CX + ADDL R8, CX + ANDL BX, R14 + ANDL BP, R9 + MOVL 28(DX), R8 + ORL R9, R14 + MOVL BP, R9 + ADDL R14, CX + MOVL BP, R14 + ROLL $0x09, CX + ADDL BX, CX + XORL DI, R9 + ADDL $0x676f02d9, SI + ADDL R8, SI + ANDL CX, R14 + ANDL BX, R9 + MOVL 48(DX), R8 + ORL R9, R14 + MOVL BX, R9 + ADDL R14, SI + MOVL BX, R14 + ROLL $0x0e, SI + ADDL CX, SI + XORL DI, R9 + ADDL $0x8d2a4c8a, BP + ADDL R8, BP + ANDL SI, R14 + ANDL CX, R9 + MOVL 20(DX), R8 + ORL R9, R14 + MOVL CX, R9 + ADDL R14, BP + MOVL CX, R14 + ROLL $0x14, BP + ADDL SI, BP + + // ROUND3 + MOVL SI, R9 + ADDL $0xfffa3942, BX + ADDL R8, BX + MOVL 32(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x8771f681, CX + ADDL R8, CX + MOVL 44(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x6d9d6122, SI + ADDL R8, SI + MOVL 56(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xfde5380c, BP + ADDL R8, BP + MOVL 4(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xa4beea44, BX + ADDL R8, BX + MOVL 16(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0x4bdecfa9, CX + ADDL R8, CX + MOVL 28(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xf6bb4b60, SI + ADDL R8, SI + MOVL 40(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xbebfbc70, BP + ADDL R8, BP + MOVL 52(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0x289b7ec6, BX + ADDL R8, BX + MOVL (DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xeaa127fa, CX + ADDL R8, CX + MOVL 12(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0xd4ef3085, SI + ADDL R8, SI + MOVL 24(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0x04881d05, BP + ADDL R8, BP + MOVL 36(DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + ADDL $0xd9d4d039, BX + ADDL R8, BX + MOVL 48(DX), R8 + XORL CX, R9 + XORL BP, R9 + ADDL R9, BX + ROLL $0x04, BX + MOVL BP, R9 + ADDL BP, BX + ADDL $0xe6db99e5, CX + ADDL R8, CX + MOVL 60(DX), R8 + XORL SI, R9 + XORL BX, R9 + ADDL R9, CX + ROLL $0x0b, CX + MOVL BX, R9 + ADDL BX, CX + ADDL $0x1fa27cf8, SI + ADDL R8, SI + MOVL 8(DX), R8 + XORL BP, R9 + XORL CX, R9 + ADDL R9, SI + ROLL $0x10, SI + MOVL CX, R9 + ADDL CX, SI + ADDL $0xc4ac5665, BP + ADDL R8, BP + MOVL (DX), R8 + XORL BX, R9 + XORL SI, R9 + ADDL R9, BP + ROLL $0x17, BP + MOVL SI, R9 + ADDL SI, BP + + // ROUND4 + MOVL DI, R9 + XORL CX, R9 + ADDL $0xf4292244, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 28(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x432aff97, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 56(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xab9423a7, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 20(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xfc93a039, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 48(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x655b59c3, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 12(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0x8f0ccc92, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 40(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xffeff47d, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 4(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x85845dd1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 32(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0x6fa87e4f, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 60(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xfe2ce6e0, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 24(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0xa3014314, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 52(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0x4e0811a1, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + MOVL 16(DX), R8 + MOVL DI, R9 + ROLL $0x15, BP + XORL CX, R9 + ADDL SI, BP + ADDL $0xf7537e82, BX + ADDL R8, BX + ORL BP, R9 + XORL SI, R9 + ADDL R9, BX + MOVL 44(DX), R8 + MOVL DI, R9 + ROLL $0x06, BX + XORL SI, R9 + ADDL BP, BX + ADDL $0xbd3af235, CX + ADDL R8, CX + ORL BX, R9 + XORL BP, R9 + ADDL R9, CX + MOVL 8(DX), R8 + MOVL DI, R9 + ROLL $0x0a, CX + XORL BP, R9 + ADDL BX, CX + ADDL $0x2ad7d2bb, SI + ADDL R8, SI + ORL CX, R9 + XORL BX, R9 + ADDL R9, SI + MOVL 36(DX), R8 + MOVL DI, R9 + ROLL $0x0f, SI + XORL BX, R9 + ADDL CX, SI + ADDL $0xeb86d391, BP + ADDL R8, BP + ORL SI, R9 + XORL CX, R9 + ADDL R9, BP + ROLL $0x15, BP + ADDL SI, BP + ADDL R10, BX + ADDL R11, BP + ADDL R12, SI + ADDL R13, CX + + // Prepare next loop + ADDQ $0x40, DX + CMPQ DX, AX + JB loop + + // Write output + MOVQ dig+0(FP), AX + MOVL BX, (AX) + MOVL BP, 4(AX) + MOVL SI, 8(AX) + MOVL CX, 12(AX) + +end: + RET diff --git a/vendor/github.com/minio/minio-go/v7/.gitignore b/vendor/github.com/minio/minio-go/v7/.gitignore new file mode 100644 index 000000000..8ae0384eb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.gitignore @@ -0,0 +1,6 @@ +*~ +*.test +validator +golangci-lint +functional_tests +.idea \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/.golangci.yml b/vendor/github.com/minio/minio-go/v7/.golangci.yml new file mode 100644 index 000000000..875b949c6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + misspell: + locale: US + +linters: + disable-all: true + enable: + - typecheck + - goimports + - misspell + - revive + - govet + - ineffassign + - gosimple + - unused + - gocritic + +issues: + exclude-use-default: false + exclude: + # todo fix these when we get enough time. + - "singleCaseSwitch: should rewrite switch statement to if statement" + - "unlambda: replace" + - "captLocal:" + - "ifElseChain:" + - "elseif:" + - "should have a package comment" diff --git a/vendor/github.com/minio/minio-go/v7/CNAME b/vendor/github.com/minio/minio-go/v7/CNAME new file mode 100644 index 000000000..d365a7bb2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CNAME @@ -0,0 +1 @@ +minio-go.min.io \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md new file mode 100644 index 000000000..24522ef75 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md @@ -0,0 +1,22 @@ +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/v7/CREDITS b/vendor/github.com/minio/minio-go/v7/CREDITS new file mode 100644 index 000000000..154c9fd58 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/CREDITS @@ -0,0 +1,1101 @@ +Go (the standard library) +https://golang.org/ +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/davecgh/go-spew +https://github.com/davecgh/go-spew +---------------------------------------------------------------- +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +================================================================ + +github.com/dustin/go-humanize +https://github.com/dustin/go-humanize +---------------------------------------------------------------- +Copyright (c) 2005-2008 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +================================================================ + +github.com/goccy/go-json +https://github.com/goccy/go-json +---------------------------------------------------------------- +MIT License + +Copyright (c) 2020 Masaaki Goshima + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/google/uuid +https://github.com/google/uuid +---------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/klauspost/compress +https://github.com/klauspost/compress +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/klauspost/cpuid/v2 +https://github.com/klauspost/cpuid/v2 +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +================================================================ + +github.com/minio/md5-simd +https://github.com/minio/md5-simd +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/pmezard/go-difflib +https://github.com/pmezard/go-difflib +---------------------------------------------------------------- +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/rs/xid +https://github.com/rs/xid +---------------------------------------------------------------- +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================ + +github.com/stretchr/testify +https://github.com/stretchr/testify +---------------------------------------------------------------- +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +golang.org/x/crypto +https://golang.org/x/crypto +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/net +https://golang.org/x/net +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/sys +https://golang.org/x/sys +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +golang.org/x/text +https://golang.org/x/text +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +gopkg.in/ini.v1 +https://gopkg.in/ini.v1 +---------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + diff --git a/vendor/github.com/minio/minio-go/v7/LICENSE b/vendor/github.com/minio/minio-go/v7/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md new file mode 100644 index 000000000..f640dfb9f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md @@ -0,0 +1,35 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "MinIO Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/v7/Makefile b/vendor/github.com/minio/minio-go/v7/Makefile new file mode 100644 index 000000000..68444aa68 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/Makefile @@ -0,0 +1,38 @@ +GOPATH := $(shell go env GOPATH) +TMPDIR := $(shell mktemp -d) + +all: checks + +.PHONY: examples docs + +checks: lint vet test examples functional-test + +lint: + @mkdir -p ${GOPATH}/bin + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin + @echo "Running $@ check" + @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean + @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml + +vet: + @GO111MODULE=on go vet ./... + @echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest + ${GOPATH}/bin/staticcheck -tests=false -checks="all,-ST1000,-ST1003,-ST1016,-ST1020,-ST1021,-ST1022,-ST1023,-ST1005" + +test: + @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + +examples: + @echo "Building s3 examples" + @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + @echo "Building minio examples" + @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) + +functional-test: + @GO111MODULE=on go build -race functional_tests.go + @SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minioadmin SECRET_KEY=minioadmin ENABLE_HTTPS=1 MINT_MODE=full ./functional_tests + +clean: + @echo "Cleaning up all the generated files" + @find . -name '*.test' | xargs rm -fv + @find . -name '*~' | xargs rm -fv diff --git a/vendor/github.com/minio/minio-go/v7/NOTICE b/vendor/github.com/minio/minio-go/v7/NOTICE new file mode 100644 index 000000000..1e8fd3b92 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/NOTICE @@ -0,0 +1,9 @@ +MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. + +This product includes software developed at MinIO, Inc. +(https://min.io/). + +The MinIO project contains unmodified/modified subcomponents too with +separate copyright notices and license terms. Your use of the source +code for these subcomponents is subject to the terms and conditions +of Apache License Version 2.0 diff --git a/vendor/github.com/minio/minio-go/v7/README.md b/vendor/github.com/minio/minio-go/v7/README.md new file mode 100644 index 000000000..82f70a131 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/README.md @@ -0,0 +1,312 @@ +# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) + +The MinIO Go Client SDK provides straightforward APIs to access any Amazon S3 compatible object storage. + +This Quickstart Guide covers how to install the MinIO client SDK, connect to MinIO, and create a sample file uploader. +For a complete list of APIs and examples, see the [godoc documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) or [Go Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html). + +These examples presume a working [Go development environment](https://golang.org/doc/install) and the [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html). + +## Download from Github + +From your project directory: + +```sh +go get github.com/minio/minio-go/v7 +``` + +## Initialize a MinIO Client Object + +The MinIO client requires the following parameters to connect to an Amazon S3 compatible object storage: + +| Parameter | Description | +| ----------------- | ---------------------------------------------------------- | +| `endpoint` | URL to object storage service. | +| `_minio.Options_` | All the options such as credentials, custom transport etc. | + +```go +package main + +import ( + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now set up +} +``` + +## Example - File Uploader + +This sample code connects to an object storage server, creates a bucket, and uploads a file to the bucket. +It uses the MinIO `play` server, a public MinIO cluster located at [https://play.min.io](https://play.min.io). + +The `play` server runs the latest stable version of MinIO and may be used for testing and development. +The access credentials shown in this example are open to the public and all data uploaded to `play` should be considered public and non-protected. + +### FileUploader.go + +This example does the following: + +- Connects to the MinIO `play` server using the provided credentials. +- Creates a bucket named `testbucket`. +- Uploads a file named `testdata` from `/tmp`. +- Verifies the file was created using `mc ls`. + +```go +// FileUploader.go MinIO example +package main + +import ( + "context" + "log" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func main() { + ctx := context.Background() + endpoint := "play.min.io" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called testbucket. + bucketName := "testbucket" + location := "us-east-1" + + err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) + if errBucketExists == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the test file + // Change the value of filePath if the file is in another location + objectName := "testdata" + filePath := "/tmp/testdata" + contentType := "application/octet-stream" + + // Upload the test file with FPutObject + info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) +} +``` + +**1. Create a test file containing data:** + +You can do this with `dd` on Linux or macOS systems: + +```sh +dd if=/dev/urandom of=/tmp/testdata bs=2048 count=10 +``` + +or `fsutil` on Windows: + +```sh +fsutil file createnew "C:\Users\\Desktop\sample.txt" 20480 +``` + +**2. Run FileUploader with the following commands:** + +```sh +go mod init example/FileUploader +go get github.com/minio/minio-go/v7 +go get github.com/minio/minio-go/v7/pkg/credentials +go run FileUploader.go +``` + +The output resembles the following: + +```sh +2023/11/01 14:27:55 Successfully created testbucket +2023/11/01 14:27:55 Successfully uploaded testdata of size 20480 +``` + +**3. Verify the Uploaded File With `mc ls`:** + +```sh +mc ls play/testbucket +[2023-11-01 14:27:55 UTC] 20KiB STANDARD TestDataFile +``` + +## API Reference + +The full API Reference is available here. + +* [Complete API Reference](https://min.io/docs/minio/linux/developers/go/API.html) + +### API Reference : Bucket Operations + +* [`MakeBucket`](https://min.io/docs/minio/linux/developers/go/API.html#MakeBucket) +* [`ListBuckets`](https://min.io/docs/minio/linux/developers/go/API.html#ListBuckets) +* [`BucketExists`](https://min.io/docs/minio/linux/developers/go/API.html#BucketExists) +* [`RemoveBucket`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveBucket) +* [`ListObjects`](https://min.io/docs/minio/linux/developers/go/API.html#ListObjects) +* [`ListIncompleteUploads`](https://min.io/docs/minio/linux/developers/go/API.html#ListIncompleteUploads) + +### API Reference : Bucket policy Operations + +* [`SetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketPolicy) +* [`GetBucketPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketPolicy) + +### API Reference : Bucket notification Operations + +* [`SetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#SetBucketNotification) +* [`GetBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) (MinIO Extension) +* [`ListenNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenNotification) (MinIO Extension) + +### API Reference : File Object Operations + +* [`FPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#FPutObject) +* [`FGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#FGetObject) + +### API Reference : Object Operations + +* [`GetObject`](https://min.io/docs/minio/linux/developers/go/API.html#GetObject) +* [`PutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PutObject) +* [`PutObjectStreaming`](https://min.io/docs/minio/linux/developers/go/API.html#PutObjectStreaming) +* [`StatObject`](https://min.io/docs/minio/linux/developers/go/API.html#StatObject) +* [`CopyObject`](https://min.io/docs/minio/linux/developers/go/API.html#CopyObject) +* [`RemoveObject`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObject) +* [`RemoveObjects`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveObjects) +* [`RemoveIncompleteUpload`](https://min.io/docs/minio/linux/developers/go/API.html#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://min.io/docs/minio/linux/developers/go/API.html#SelectObjectContent) + +### API Reference : Presigned Operations + +* [`PresignedGetObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedGetObject) +* [`PresignedPutObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPutObject) +* [`PresignedHeadObject`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedHeadObject) +* [`PresignedPostPolicy`](https://min.io/docs/minio/linux/developers/go/API.html#PresignedPostPolicy) + +### API Reference : Client custom settings + +* [`SetAppInfo`](https://min.io/docs/minio/linux/developers/go/API.html#SetAppInfo) +* [`TraceOn`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOn) +* [`TraceOff`](https://min.io/docs/minio/linux/developers/go/API.html#TraceOff) + +## Full Examples + +### Full Examples : Bucket Operations + +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations + +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations + +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket encryption Operations + +* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) +* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) +* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) + +### Full Examples : Bucket replication Operations + +* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) +* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) +* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) + +### Full Examples : Bucket notification Operations + +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) +* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) + +### Full Examples : File Object Operations + +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) + +### Full Examples : Object Operations + +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations + +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations + +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further + +* [Godoc Documentation](https://pkg.go.dev/github.com/minio/minio-go/v7) +* [Complete Documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) +* [MinIO Go Client SDK API Reference](https://min.io/docs/minio/linux/developers/go/API.html) + +## Contribute + +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +## License + +This SDK is distributed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go new file mode 100644 index 000000000..8bf537f73 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go @@ -0,0 +1,136 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2024 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/cors" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketCors sets the cors configuration for the bucket +func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if corsConfig == nil { + return c.removeBucketCors(ctx, bucketName) + } + + return c.putBucketCors(ctx, bucketName, corsConfig) +} + +func (c *Client) putBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + corsStr, err := corsConfig.ToXML() + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(corsStr), + contentLength: int64(len(corsStr)), + contentMD5Base64: sumMD5Base64([]byte(corsStr)), + } + + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketCors returns the current cors +func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + bucketCors, err := c.getBucketCors(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchCORSConfiguration" { + return nil, nil + } + return nil, err + } + return bucketCors, nil +} + +func (c *Client) getBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) { + urlValues := make(url.Values) + urlValues.Set("cors", "") + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, // TODO: needed? copied over from other example, but not spec'd in API. + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + + corsConfig, err := cors.ParseBucketCorsConfig(resp.Body) + if err != nil { + return nil, err + } + + return corsConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go new file mode 100644 index 000000000..24f94e034 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go @@ -0,0 +1,134 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/sse" +) + +// SetBucketEncryption sets the default encryption configuration on an existing bucket. +func (c *Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if config == nil { + return errInvalidArgument("configuration cannot be empty") + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. +func (c *Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // DELETE default encryption configuration on a bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketEncryption gets the default encryption configuration +// on an existing bucket with a context to control cancellations and timeouts. +func (c *Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("encryption", "") + + // Execute GET on bucket to get the default encryption configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + encryptionConfig := &sse.Configuration{} + if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { + return nil, err + } + + return encryptionConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go new file mode 100644 index 000000000..fec5cece5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go @@ -0,0 +1,169 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/lifecycle" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c *Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if config.Empty() { + return c.removeBucketLifecycle(ctx, bucketName) + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(ctx, bucketName, buf) +} + +// Saves a new bucket lifecycle. +func (c *Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c *Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// GetBucketLifecycle fetch bucket lifecycle configuration +func (c *Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { + lc, _, err := c.GetBucketLifecycleWithInfo(ctx, bucketName) + return lc, err +} + +// GetBucketLifecycleWithInfo fetch bucket lifecycle configuration along with when it was last updated +func (c *Client) GetBucketLifecycleWithInfo(ctx context.Context, bucketName string) (*lifecycle.Configuration, time.Time, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, time.Time{}, err + } + + bucketLifecycle, updatedAt, err := c.getBucketLifecycle(ctx, bucketName) + if err != nil { + return nil, time.Time{}, err + } + + config := lifecycle.NewConfiguration() + if err = xml.Unmarshal(bucketLifecycle, config); err != nil { + return nil, time.Time{}, err + } + return config, updatedAt, nil +} + +// Request server for current bucket lifecycle. +func (c *Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, time.Time, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + urlValues.Set("withUpdatedAt", "true") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, time.Time{}, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, time.Time{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + lcBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, time.Time{}, err + } + + const minIOLifecycleCfgUpdatedAt = "X-Minio-LifecycleConfig-UpdatedAt" + var updatedAt time.Time + if timeStr := resp.Header.Get(minIOLifecycleCfgUpdatedAt); timeStr != "" { + updatedAt, err = time.Parse(iso8601DateFormat, timeStr) + if err != nil { + return nil, time.Time{}, err + } + } + + return lcBytes, updatedAt, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go new file mode 100644 index 000000000..ad8eada4a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go @@ -0,0 +1,260 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + "time" + + "github.com/goccy/go-json" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. +func (c *Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(&config) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c *Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { + return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) +} + +// GetBucketNotification returns current bucket notification configuration +func (c *Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return notification.Configuration{}, err + } + return c.getBucketNotification(ctx, bucketName) +} + +// Request server for notification rules. +func (c *Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return notification.Configuration{}, err + } + return processBucketNotificationResponse(bucketName, resp) +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return notification.Configuration{}, errResponse + } + var bucketNotification notification.Configuration + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return notification.Configuration{}, err + } + return bucketNotification, nil +} + +// ListenNotification listen for all events, this is a MinIO specific API +func (c *Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { + return c.ListenBucketNotification(ctx, "", prefix, suffix, events) +} + +// ListenBucketNotification listen for bucket events, this is a MinIO specific API +func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { + notificationInfoCh := make(chan notification.Info, 1) + const notificationCapacity = 4 * 1024 * 1024 + notificationEventBuffer := make([]byte, notificationCapacity) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- notification.Info) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if bucketName != "" { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + select { + case notificationInfoCh <- notification.Info{ + Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + }: + case <-ctx.Done(): + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Prepare urlValues to pass into the request on every loop + urlValues := make(url.Values) + urlValues.Set("ping", "10") + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + select { + case notificationInfoCh <- notification.Info{ + Err: errResponse, + }: + case <-ctx.Done(): + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Use a higher buffer to support unexpected + // caching done by proxies + bio.Buffer(notificationEventBuffer, notificationCapacity) + + // Unmarshal each line, returns marshaled values. + for bio.Scan() { + var notificationInfo notification.Info + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + // Unexpected error during json unmarshal, send + // the error to caller for actionable as needed. + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + closeResponse(resp) + continue + } + + // Empty events pinged from the server + if len(notificationInfo.Records) == 0 && notificationInfo.Err == nil { + continue + } + + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-ctx.Done(): + closeResponse(resp) + return + } + } + + if err = bio.Err(); err != nil { + select { + case notificationInfoCh <- notification.Info{ + Err: err, + }: + case <-ctx.Done(): + return + } + } + + // Close current connection before looping further. + closeResponse(resp) + + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go new file mode 100644 index 000000000..dbb5259a8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketPolicy sets the access permissions on an existing bucket. +func (c *Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(ctx, bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(ctx, bucketName, policy) +} + +// Saves a new bucket policy. +func (c *Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: strings.NewReader(policy), + contentLength: int64(len(policy)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c *Client) removeBucketPolicy(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// GetBucketPolicy returns the current policy +func (c *Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c *Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go new file mode 100644 index 000000000..b12bb13a6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go @@ -0,0 +1,355 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/replication" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RemoveBucketReplication removes a replication config on an existing bucket. +func (c *Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { + return c.removeBucketReplication(ctx, bucketName) +} + +// SetBucketReplication sets a replication config on an existing bucket. +func (c *Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If replication is empty then delete it. + if cfg.Empty() { + return c.removeBucketReplication(ctx, bucketName) + } + // Save the updated replication. + return c.putBucketReplication(ctx, bucketName, cfg) +} + +// Saves a new bucket replication. +func (c *Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + replication, err := xml.Marshal(cfg) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(replication), + contentLength: int64(len(replication)), + contentMD5Base64: sumMD5Base64(replication), + } + + // Execute PUT to upload a new bucket replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + + return nil +} + +// Remove replication from a bucket. +func (c *Client) removeBucketReplication(ctx context.Context, bucketName string) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// GetBucketReplication fetches bucket replication configuration.If config is not +// found, returns empty config with nil error. +func (c *Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return cfg, err + } + bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "ReplicationConfigurationNotFoundError" { + return cfg, nil + } + return cfg, err + } + return bucketReplicationCfg, nil +} + +// Request server for current bucket replication config. +func (c *Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return cfg, err + } + + if resp.StatusCode != http.StatusOK { + return cfg, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = xmlDecoder(resp.Body, &cfg); err != nil { + return cfg, err + } + + return cfg, nil +} + +// GetBucketReplicationMetrics fetches bucket replication status metrics +func (c *Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// mustGetUUID - get a random UUID. +func mustGetUUID() string { + u, err := uuid.NewRandom() + if err != nil { + return "" + } + return u.String() +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (rID string, err error) { + rID = mustGetUUID() + _, err = c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, "", rID) + if err != nil { + return rID, err + } + return rID, nil +} + +// ResetBucketReplicationOnTarget kicks off replication of previously replicated objects if +// ExistingObjectReplication is enabled in the replication config +func (c *Client) ResetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn string) (replication.ResyncTargetsInfo, error) { + return c.resetBucketReplicationOnTarget(ctx, bucketName, olderThan, tgtArn, mustGetUUID()) +} + +// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication +// is enabled in the replication config +func (c *Client) resetBucketReplicationOnTarget(ctx context.Context, bucketName string, olderThan time.Duration, tgtArn, resetID string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset", "") + if olderThan > 0 { + urlValues.Set("older-than", olderThan.String()) + } + if tgtArn != "" { + urlValues.Set("arn", tgtArn) + } + urlValues.Set("reset-id", resetID) + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// GetBucketReplicationResyncStatus gets the status of replication resync +func (c *Client) GetBucketReplicationResyncStatus(ctx context.Context, bucketName, arn string) (rinfo replication.ResyncTargetsInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return rinfo, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-reset-status", "") + if arn != "" { + urlValues.Set("arn", arn) + } + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return rinfo, err + } + + if resp.StatusCode != http.StatusOK { + return rinfo, httpRespToErrorResponse(resp, bucketName, "") + } + + if err = json.NewDecoder(resp.Body).Decode(&rinfo); err != nil { + return rinfo, err + } + return rinfo, nil +} + +// GetBucketReplicationMetricsV2 fetches bucket replication status metrics +func (c *Client) GetBucketReplicationMetricsV2(ctx context.Context, bucketName string) (s replication.MetricsV2, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return s, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-metrics", "2") + + // Execute GET on bucket to get replication metrics. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return s, err + } + + if resp.StatusCode != http.StatusOK { + return s, httpRespToErrorResponse(resp, bucketName, "") + } + respBytes, err := io.ReadAll(resp.Body) + if err != nil { + return s, err + } + + if err := json.Unmarshal(respBytes, &s); err != nil { + return s, err + } + return s, nil +} + +// CheckBucketReplication validates if replication is set up properly for a bucket +func (c *Client) CheckBucketReplication(ctx context.Context, bucketName string) (err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("replication-check", "") + + // Execute GET on bucket to get replication config. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go new file mode 100644 index 000000000..86d74298a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go @@ -0,0 +1,134 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "io" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// GetBucketTagging fetch tagging configuration for a bucket with a +// context to control cancellations and timeouts. +func (c *Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute GET on bucket to get tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + defer io.Copy(io.Discard, resp.Body) + return tags.ParseBucketXML(resp.Body) +} + +// SetBucketTagging sets tagging configuration for a bucket +// with a context to control cancellations and timeouts. +func (c *Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if tags == nil { + return errors.New("nil tags passed") + } + + buf, err := xml.Marshal(tags) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Content-length is mandatory to set a default encryption configuration + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + } + + // Execute PUT on bucket to put tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} + +// RemoveBucketTagging removes tagging configuration for a +// bucket with a context to control cancellations and timeouts. +func (c *Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + // Execute DELETE on bucket to remove tagging configuration. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go new file mode 100644 index 000000000..8c84e4f27 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go @@ -0,0 +1,146 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// SetBucketVersioning sets a bucket versioning configuration +func (c *Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + buf, err := xml.Marshal(config) + if err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(buf), + contentLength: int64(len(buf)), + contentMD5Base64: sumMD5Base64(buf), + contentSHA256Hex: sum256Hex(buf), + } + + // Execute PUT to set a bucket versioning. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// EnableVersioning - enable object versioning in given bucket. +func (c *Client) EnableVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) +} + +// SuspendVersioning - suspend object versioning in given bucket. +func (c *Client) SuspendVersioning(ctx context.Context, bucketName string) error { + return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) +} + +// ExcludedPrefix - holds individual prefixes excluded from being versioned. +type ExcludedPrefix struct { + Prefix string +} + +// BucketVersioningConfiguration is the versioning configuration structure +type BucketVersioningConfiguration struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` + MFADelete string `xml:"MfaDelete,omitempty"` + // MinIO extension - allows selective, prefix-level versioning exclusion. + // Requires versioning to be enabled + ExcludedPrefixes []ExcludedPrefix `xml:",omitempty"` + ExcludeFolders bool `xml:",omitempty"` +} + +// Various supported states +const ( + Enabled = "Enabled" + // Disabled State = "Disabled" only used by MFA Delete not supported yet. + Suspended = "Suspended" +) + +// Enabled returns true if bucket versioning is enabled +func (b BucketVersioningConfiguration) Enabled() bool { + return b.Status == Enabled +} + +// Suspended returns true if bucket versioning is suspended +func (b BucketVersioningConfiguration) Suspended() bool { + return b.Status == Suspended +} + +// GetBucketVersioning gets the versioning configuration on +// an existing bucket with a context to control cancellations and timeouts. +func (c *Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketVersioningConfiguration{}, err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("versioning", "") + + // Execute GET on bucket to get the versioning configuration. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return BucketVersioningConfiguration{}, err + } + + if resp.StatusCode != http.StatusOK { + return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") + } + + versioningConfig := BucketVersioningConfiguration{} + if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { + return versioningConfig, err + } + + return versioningConfig, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/vendor/github.com/minio/minio-go/v7/api-compose-object.go new file mode 100644 index 000000000..bb595626e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-compose-object.go @@ -0,0 +1,594 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs +type CopyDestOptions struct { + Bucket string // points to destination bucket + Object string // points to destination object + + // `Encryption` is the key info for server-side-encryption with customer + // provided key. If it is nil, no encryption is performed. + Encryption encrypt.ServerSide + + // `userMeta` is the user-metadata key-value pairs to be set on the + // destination. The keys are automatically prefixed with `x-amz-meta-` + // if needed. If nil is passed, and if only a single source (of any + // size) is provided in the ComposeObject call, then metadata from the + // source is copied to the destination. + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + UserMetadata map[string]string + // UserMetadata is only set to destination if ReplaceMetadata is true + // other value is UserMetadata is ignored and we preserve src.UserMetadata + // NOTE: if you set this value to true and now metadata is present + // in UserMetadata your destination object will not have any metadata + // set. + ReplaceMetadata bool + + // `userTags` is the user defined object tags to be set on destination. + // This will be set only if the `replaceTags` field is set to true. + // Otherwise this field is ignored + UserTags map[string]string + ReplaceTags bool + + // Specifies whether you want to apply a Legal Hold to the copied object. + LegalHold LegalHoldStatus + + // Object Retention related fields + Mode RetentionMode + RetainUntilDate time.Time + + Size int64 // Needs to be specified if progress bar is specified. + // Progress of the entire copy operation will be sent here. + Progress io.Reader +} + +// Process custom-metadata to remove a `x-amz-meta-` prefix if +// present and validate that keys are distinct (after this +// prefix removal). +func filterCustomMeta(userMeta map[string]string) map[string]string { + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + continue + } + m[k] = v + } + return m +} + +// Marshal converts all the CopyDestOptions into their +// equivalent HTTP header representation +func (opts CopyDestOptions) Marshal(header http.Header) { + const replaceDirective = "REPLACE" + if opts.ReplaceTags { + header.Set(amzTaggingHeaderDirective, replaceDirective) + if tags := s3utils.TagEncode(opts.UserTags); tags != "" { + header.Set(amzTaggingHeader, tags) + } + } + + if opts.LegalHold != LegalHoldStatus("") { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { + header.Set(amzLockMode, opts.Mode.String()) + header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.Encryption != nil { + opts.Encryption.Marshal(header) + } + + if opts.ReplaceMetadata { + header.Set("x-amz-metadata-directive", replaceDirective) + for k, v := range filterCustomMeta(opts.UserMetadata) { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + } +} + +// toDestinationInfo returns a validated copyOptions object. +func (opts CopyDestOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Progress != nil && opts.Size < 0 { + return errInvalidArgument("For progress bar effective size needs to be specified") + } + return nil +} + +// CopySrcOptions represents a source object to be copied, using +// server-side copying APIs. +type CopySrcOptions struct { + Bucket, Object string + VersionID string + MatchETag string + NoMatchETag string + MatchModifiedSince time.Time + MatchUnmodifiedSince time.Time + MatchRange bool + Start, End int64 + Encryption encrypt.ServerSide +} + +// Marshal converts all the CopySrcOptions into their +// equivalent HTTP header representation +func (opts CopySrcOptions) Marshal(header http.Header) { + // Set the source header + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) + if opts.VersionID != "" { + header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) + } + + if opts.MatchETag != "" { + header.Set("x-amz-copy-source-if-match", opts.MatchETag) + } + if opts.NoMatchETag != "" { + header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) + } + + if !opts.MatchModifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) + } + if !opts.MatchUnmodifiedSince.IsZero() { + header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) + } + + if opts.Encryption != nil { + encrypt.SSECopy(opts.Encryption).Marshal(header) + } +} + +func (opts CopySrcOptions) validate() (err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { + return err + } + if err = s3utils.CheckValidObjectName(opts.Object); err != nil { + return err + } + if opts.Start > opts.End || opts.Start < 0 { + return errInvalidArgument("start must be non-negative, and start must be at most end.") + } + return nil +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + if !dstOpts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) + } + if !dstOpts.Internal.SourceMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if dstOpts.Internal.SourceETag != "" { + headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) + } + if dstOpts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "true") + } + if dstOpts.Internal.ReplicationValidityCheck { + headers.Set(minIOBucketReplicationCheck, "true") + } + if !dstOpts.Internal.LegalholdTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.RetentionTimestamp.IsZero() { + headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !dstOpts.Internal.TaggingTimestamp.IsZero() { + headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(dstOpts.UserTags) != 0 { + headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) + } + + reqMetadata := requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + } + if dstOpts.Internal.SourceVersionID != "" { + if dstOpts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { + return ObjectInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + if srcOpts.VersionID != "" { + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) + } + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, errInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header, +) (p CompletePart, err error) { + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObject - creates an object using server-side copying +// of existing objects. It takes a list of source objects (with optional offsets) +// and concatenates them into a new object using only server-side copying +// operations. Optionally takes progress reader hook for applications to +// look at current progress. +func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") + } + + for _, src := range srcs { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + srcObjectInfos := make([]ObjectInfo, len(srcs)) + srcObjectSizes := make([]int64, len(srcs)) + var totalSize, totalParts int64 + var err error + for i, src := range srcs { + opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} + srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts) + if err != nil { + return UploadInfo{}, err + } + + srcCopySize := srcObjectInfos[i].Size + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.MatchRange { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.End >= srcCopySize || src.Start < 0 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.Start, src.End, srcCopySize)) + } + srcCopySize = src.End - src.Start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if srcCopySize < absMinPartSize && i < len(srcs)-1 { + return UploadInfo{}, errInvalidArgument( + fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) + } + + // Is data to copy too large? + totalSize += srcCopySize + if totalSize > maxMultipartPutObjectSize { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcObjectSizes[i] = srcCopySize + + // calculate parts needed for current source + totalParts += partsRequired(srcCopySize) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObject(ctx, dst, srcs[0]) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + src.MatchETag = srcObjectInfos[i].ETag + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + var userMeta map[string]string + if dst.ReplaceMetadata { + userMeta = dst.UserMetadata + } else { + userMeta = srcObjectInfos[0].UserMetadata + } + + var userTags map[string]string + if dst.ReplaceTags { + userTags = dst.UserTags + } else { + userTags = srcObjectInfos[0].UserTags + } + + uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ + ServerSideEncryption: dst.Encryption, + UserMetadata: userMeta, + UserTags: userTags, + Mode: dst.Mode, + RetainUntilDate: dst.RetainUntilDate, + LegalHold: dst.LegalHold, + }) + if err != nil { + return UploadInfo{}, err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := make(http.Header) + src.Marshal(h) + if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { + dst.Encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.Bucket, + dst.Object, uploadID, partIndex, h) + if err != nil { + return UploadInfo{}, err + } + if dst.Progress != nil { + io.CopyN(io.Discard, dst.Progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, + completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption}) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalSize + return uploadInfo, nil +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.Start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go new file mode 100644 index 000000000..0c95d91ec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -0,0 +1,76 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" +) + +// CopyObject - copy a source object into a new object +func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { + if err := src.validate(); err != nil { + return UploadInfo{}, err + } + + if err := dst.validate(); err != nil { + return UploadInfo{}, err + } + + header := make(http.Header) + dst.Marshal(header) + src.Marshal(header) + + resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ + bucketName: dst.Bucket, + objectName: dst.Object, + customHeader: header, + }) + if err != nil { + return UploadInfo{}, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) + } + + // Update the progress properly after successful copy. + if dst.Progress != nil { + io.Copy(io.Discard, io.LimitReader(dst.Progress, dst.Size)) + } + + cpObjRes := copyObjectResult{} + if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { + return UploadInfo{}, err + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: dst.Bucket, + Key: dst.Object, + LastModified: cpObjRes.LastModified, + ETag: trimEtag(resp.Header.Get("ETag")), + VersionID: resp.Header.Get(amzVersionID), + Expiration: expTime, + ExpirationRuleID: ruleID, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go new file mode 100644 index 000000000..97a6f80b2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -0,0 +1,254 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// StringMap represents map with custom UnmarshalXML +type StringMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *StringMap) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) error { + *m = StringMap{} + for { + // Format is value + var e struct { + XMLName xml.Name + Value string `xml:",chardata"` + } + err := d.Decode(&e) + if err == io.EOF { + break + } + if err != nil { + return err + } + (*m)[e.XMLName.Local] = e.Value + } + return nil +} + +// URLMap represents map with custom UnmarshalXML +type URLMap map[string]string + +// UnmarshalXML unmarshals the XML into a map of string to strings, +// creating a key in the map for each tag and setting it's value to the +// tags contents. +// +// The fact this function is on the pointer of Map is important, so that +// if m is nil it can be initialized, which is often the case if m is +// nested in another xml structural. This is also why the first thing done +// on the first line is initialize it. +func (m *URLMap) UnmarshalXML(d *xml.Decoder, se xml.StartElement) error { + *m = URLMap{} + var tgs string + if err := d.DecodeElement(&tgs, &se); err != nil { + if err == io.EOF { + return nil + } + return err + } + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err := url.QueryUnescape(key) + if err != nil { + return err + } + + value, err = url.QueryUnescape(value) + if err != nil { + return err + } + (*m)[key] = value + } + return nil +} + +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +// Owner name. +type Owner struct { + XMLName xml.Name `xml:"Owner" json:"owner"` + DisplayName string `xml:"ID" json:"name"` + ID string `xml:"DisplayName" json:"id"` +} + +// UploadInfo contains information about the +// newly uploaded or copied object. +type UploadInfo struct { + Bucket string + Key string + ETag string + Size int64 + LastModified time.Time + Location string + VersionID string + + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + + // Verified checksum values, if any. + // Values are base64 (standard) encoded. + // For multipart objects this is a checksum of the checksum of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string +} + +// RestoreInfo contains information of the restore operation of an archived object +type RestoreInfo struct { + // Is the restoring operation is still ongoing + OngoingRestore bool + // When the restored copy of the archived object will be removed + ExpiryTime time.Time +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + // Only returned by MinIO servers. + UserMetadata StringMap `json:"userMetadata,omitempty"` + + // x-amz-tagging values in their k/v values. + // Only returned by MinIO servers. + UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` + + // x-amz-tagging-count value + UserTagCount int + + // Owner name. + Owner Owner + + // ACL grant. + Grant []Grant + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Versioning related information + IsLatest bool + IsDeleteMarker bool + VersionID string `xml:"VersionId"` + + // x-amz-replication-status value is either in one of the following states + // - COMPLETED + // - PENDING + // - FAILED + // - REPLICA (on the destination) + ReplicationStatus string `xml:"ReplicationStatus"` + // set to true if delete marker has backing object version on target, and eligible to replicate + ReplicationReady bool + // Lifecycle expiry-date and ruleID associated with the expiry + // not to be confused with `Expires` HTTP header. + Expiration time.Time + ExpirationRuleID string + + Restore *RestoreInfo + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + + Internal *struct { + K int // Data blocks + M int // Parity blocks + } `xml:"Internal"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/api-error-response.go b/vendor/github.com/minio/minio-go/v7/api-error-response.go new file mode 100644 index 000000000..7df211fda --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-error-response.go @@ -0,0 +1,284 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go/v7" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Empty http response. " + reportIssue + return errInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Server: resp.Header.Get("Server"), + } + + errBody, err := xmlDecodeAndBody(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, + } + default: + msg := resp.Status + if len(errBody) > 0 { + msg = string(errBody) + if len(msg) > 1024 { + msg = msg[:1024] + "..." + } + } + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: msg, + BucketName: bucketName, + } + } + } + + code := resp.Header.Get("x-minio-error-code") + if code != "" { + errResp.Code = code + } + desc := resp.Header.Get("x-minio-error-desc") + if desc != "" { + errResp.Message = strings.Trim(desc, `"`) + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func errTransferAccelerationBucket(bucketName string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + BucketName: bucketName, + } +} + +// errEntityTooLarge - Input size is larger than supported maximum. +func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errEntityTooSmall - Input size is smaller than supported minimum. +func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errUnexpectedEOF - Unexpected end of file reached. +func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// errInvalidArgument - Invalid argument response. +func errInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// errAPINotSupported - API not supported response +// The specified API call is not supported +func errAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go new file mode 100644 index 000000000..9041d99e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go @@ -0,0 +1,152 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "net/http" + "net/url" +) + +// Grantee represents the person being granted permissions. +type Grantee struct { + XMLName xml.Name `xml:"Grantee"` + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` +} + +// Grant holds grant information +type Grant struct { + XMLName xml.Name `xml:"Grant"` + Grantee Grantee + Permission string `xml:"Permission"` +} + +// AccessControlList contains the set of grantees and the permissions assigned to each grantee. +type AccessControlList struct { + XMLName xml.Name `xml:"AccessControlList"` + Grant []Grant + Permission string `xml:"Permission"` +} + +type accessControlPolicy struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + Owner Owner + AccessControlList AccessControlList +} + +// GetObjectACL get object ACLs +func (c *Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + objInfo.Owner.DisplayName = res.Owner.DisplayName + objInfo.Owner.ID = res.Owner.ID + + objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go new file mode 100644 index 000000000..e1155c372 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-attributes.go @@ -0,0 +1,201 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/xml" + "errors" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ObjectAttributesOptions are options used for the GetObjectAttributes API +// +// - MaxParts +// How many parts the caller wants to be returned (default: 1000) +// +// - VersionID +// The object version you want to attributes for +// +// - PartNumberMarker +// the listing will start AFTER the part matching PartNumberMarker +// +// - ServerSideEncryption +// The server-side encryption algorithm used when storing this object in Minio +type ObjectAttributesOptions struct { + MaxParts int + VersionID string + PartNumberMarker int + ServerSideEncryption encrypt.ServerSide +} + +// ObjectAttributes is the response object returned by the GetObjectAttributes API +// +// - VersionID +// The object version +// +// - LastModified +// The last time the object was modified +// +// - ObjectAttributesResponse +// Contains more information about the object +type ObjectAttributes struct { + VersionID string + LastModified time.Time + ObjectAttributesResponse +} + +// ObjectAttributesResponse contains details returned by the GetObjectAttributes API +// +// Noteworthy fields: +// +// - ObjectParts.PartsCount +// Contains the total part count for the object (not the current response) +// +// - ObjectParts.PartNumberMarker +// Pagination of parts will begin at (but not include) PartNumberMarker +// +// - ObjectParts.NextPartNumberMarket +// The next PartNumberMarker to be used in order to continue pagination +// +// - ObjectParts.IsTruncated +// Indicates if the last part is included in the request (does not check if parts are missing from the start of the list, ONLY the end) +// +// - ObjectParts.MaxParts +// Reflects the MaxParts used by the caller or the default MaxParts value of the API +type ObjectAttributesResponse struct { + ETag string `xml:",omitempty"` + StorageClass string + ObjectSize int + Checksum struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + } + ObjectParts struct { + PartsCount int + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + IsTruncated bool + Parts []*ObjectAttributePart `xml:"Part"` + } +} + +// ObjectAttributePart is used by ObjectAttributesResponse to describe an object part +type ObjectAttributePart struct { + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + PartNumber int + Size int +} + +func (o *ObjectAttributes) parseResponse(resp *http.Response) (err error) { + mod, err := parseRFC7231Time(resp.Header.Get("Last-Modified")) + if err != nil { + return err + } + o.LastModified = mod + o.VersionID = resp.Header.Get(amzVersionID) + + response := new(ObjectAttributesResponse) + if err := xml.NewDecoder(resp.Body).Decode(response); err != nil { + return err + } + o.ObjectAttributesResponse = *response + + return +} + +// GetObjectAttributes API combines HeadObject and ListParts. +// More details on usage can be found in the documentation for ObjectAttributesOptions{} +func (c *Client) GetObjectAttributes(ctx context.Context, bucketName, objectName string, opts ObjectAttributesOptions) (*ObjectAttributes, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Add("attributes", "") + if opts.VersionID != "" { + urlValues.Add("versionId", opts.VersionID) + } + + headers := make(http.Header) + headers.Set(amzObjectAttributes, GetObjectAttributesTags) + + if opts.PartNumberMarker > 0 { + headers.Set(amzPartNumberMarker, strconv.Itoa(opts.PartNumberMarker)) + } + + if opts.MaxParts > 0 { + headers.Set(amzMaxParts, strconv.Itoa(opts.MaxParts)) + } else { + headers.Set(amzMaxParts, strconv.Itoa(GetObjectAttributesMaxParts)) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(headers) + } + + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + if err != nil { + return nil, err + } + + defer closeResponse(resp) + + hasEtag := resp.Header.Get(ETag) + if hasEtag != "" { + return nil, errors.New("getObjectAttributes is not supported by the current endpoint version") + } + + if resp.StatusCode != http.StatusOK { + ER := new(ErrorResponse) + if err := xml.NewDecoder(resp.Body).Decode(ER); err != nil { + return nil, err + } + + return nil, *ER + } + + OA := new(ObjectAttributes) + err = OA.parseResponse(resp) + if err != nil { + return nil, err + } + + return OA, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go new file mode 100644 index 000000000..567a42e45 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object-file.go @@ -0,0 +1,127 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FGetObject - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return errInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0o700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + sum256Hex([]byte(objectStat.ETag)) + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) + if err != nil { + return err + } + + // If we return early with an error, be sure to close and delete + // filePart. If we have an error along the way there is a chance + // that filePart is somehow damaged, and we should discard it. + closeAndRemove := true + defer func() { + if closeAndRemove { + _ = filePart.Close() + _ = os.Remove(filePartPath) + } + }() + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + closeAndRemove = false + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go new file mode 100644 index 000000000..d7fd27835 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -0,0 +1,699 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "sync" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// GetObject wrapper function that accepts a request context +func (c *Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } + } + + gctx, cancel := context.WithCancel(ctx) + + // Detect if snowball is server location we are talking to. + var snowball bool + if location, ok := c.bucketLocCache.Get(bucketName); ok { + snowball = location == "snowball" + } + + var ( + err error + httpReader io.ReadCloser + objectInfo ObjectInfo + totalRead int + ) + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(resCh) + defer func() { + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + }() + defer cancel() + + // Used to verify if etag of object has changed since last read. + var etag string + + for req := range reqCh { + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(size) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: size, + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + objectInfo, err := c.StatObject(gctx, bucketName, objectName, StatObjectOptions(opts)) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + // Check whether this is snowball + // if yes do not use If-Match feature + // it doesn't work. + if etag != "" && !snowball { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } else { + // Remove range header if already set + delete(opts.headers, "Range") + } + httpReader, objectInfo, _, err = c.getObject(gctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + totalRead = 0 + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := readFull(httpReader, req.Buffer) + totalRead += size + if size > 0 && err == io.ErrUnexpectedEOF { + if int64(totalRead) < objectInfo.Size { + // In situations when returned size + // is less than the expected content + // length set by the server, make sure + // we return io.ErrUnexpectedEOF + err = io.ErrUnexpectedEOF + } else { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { + // Special cases when server writes more data + // than the content-length, net/http response + // body returns an error, instead of converting + // it to io.EOF - return unexpected EOF. + err = io.ErrUnexpectedEOF + } + + // Reply back how much was read. + resCh <- getResponse{ + Size: size, + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(gctx, cancel, reqCh, resCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + ctx context.Context + cancel context.CancelFunc + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + select { + case <-o.ctx.Done(): + return getResponse{}, o.ctx.Err() + case o.reqCh <- request: + } + + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, errInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return 0, o.prevErr + } + + // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. + o.currOffset = offset + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != nil && o.prevErr != io.EOF { + return 0, o.prevErr + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + newOffset := o.currOffset + + // Switch through whence. + switch whence { + default: + return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + newOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + newOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + newOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source when necessary + o.seekData = (newOffset != o.currOffset) || o.seekData + o.currOffset = newOffset + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return errInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + o.cancel() + + // Close the request channel to indicate the internal go-routine to exit. + close(o.reqCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(ctx context.Context, cancel context.CancelFunc, reqCh chan<- getRequest, resCh <-chan getResponse) *Object { + return &Object{ + ctx: ctx, + cancel: cancel, + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c *Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) + if err != nil { + closeResponse(resp) + return nil, ObjectInfo{}, nil, err + } + + // do not close body here, caller will close + return resp.Body, objectStat, resp.Header, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-get-options.go b/vendor/github.com/minio/minio-go/v7/api-get-options.go new file mode 100644 index 000000000..a0216e201 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-get-options.go @@ -0,0 +1,203 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// AdvancedGetOptions for internal use by MinIO server - not intended for client use. +type AdvancedGetOptions struct { + ReplicationDeleteMarker bool + IsReplicationReadyForDeleteMarker bool + ReplicationProxyRequest string +} + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + reqParams url.Values + ServerSideEncryption encrypt.ServerSide + VersionID string + PartNumber int + + // Include any checksums, if object was uploaded with checksum. + // For multipart objects this is a checksum of part checksums. + // https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + Checksum bool + + // To be not used by external applications + Internal AdvancedGetOptions +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions = GetObjectOptions + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + // this header is set for active-active replication scenario where GET/HEAD + // to site A is proxy'd to site B if object/version missing on site A. + if o.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) + } + if o.Checksum { + headers.Set("x-amz-checksum-mode", "ENABLED") + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetReqParam - set request query string parameter +// supported key: see supportedQueryValues and allowedCustomQueryPrefix. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) SetReqParam(key, value string) { + if !isCustomQueryValue(key) && !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Set(key, value) +} + +// AddReqParam - add request query string parameter +// supported key: see supportedQueryValues and allowedCustomQueryPrefix. +// If an unsupported key is passed in, it will be ignored and nothing will be done. +func (o *GetObjectOptions) AddReqParam(key, value string) { + if !isCustomQueryValue(key) && !isStandardQueryValue(key) { + // do nothing + return + } + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add(key, value) +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return errInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return errInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return errInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} + +// toQueryValues - Convert the versionId, partNumber, and reqParams in Options to query string parameters. +func (o *GetObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.VersionID != "" { + urlValues.Set("versionId", o.VersionID) + } + if o.PartNumber > 0 { + urlValues.Set("partNumber", strconv.Itoa(o.PartNumber)) + } + + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go new file mode 100644 index 000000000..31b6edf2e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-list.go @@ -0,0 +1,1057 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets(context.Background()) { +// fmt.Println(message) +// } +func (c *Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +// Bucket List Operations. +func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return objectStatCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(objectStatCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer func() { + if contextCanceled(ctx) { + objectStatCh <- ObjectInfo{ + Err: ctx.Err(), + } + } + close(objectStatCh) + }() + + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, + fetchOwner, opts.WithMetadata, delimiter, opts.StartAfter, opts.MaxKeys, opts.headers) + if err != nil { + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + object.ETag = trimEtag(object.ETag) + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + + // Add this to catch broken S3 API implementations. + if continuationToken == "" { + sendObjectInfo(ObjectInfo{ + Err: fmt.Errorf("listObjectsV2 is truncated without continuationToken, %s S3 server is incompatible with S3 API", c.endpointURL), + }) + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?continuation-token - Used to continue iterating over a set of objects +// ?metadata - Specifies if we want metadata for the objects as part of list operation. +// ?delimiter - A delimiter is a character you use to group keys. +// ?start-after - Sets a marker to start listing lexically at this key onwards. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter, startAfter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + if metadata { + urlValues.Set("metadata", "true") + } + + // Set this conditionally if asked + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + + // Always set encoding-type in ListObjects V2 + urlValues.Set("encoding-type", "url") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, ErrorResponse{ + Code: "NotImplemented", + Message: "Truncated response should have continuation token set", + } + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + // Success. + return listBucketResult, nil +} + +func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + sendObjectInfo := func(info ObjectInfo) { + select { + case objectStatCh <- info: + case <-ctx.Done(): + } + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return objectStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(objectStatCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer func() { + if contextCanceled(ctx) { + objectStatCh <- ObjectInfo{ + Err: ctx.Err(), + } + } + close(objectStatCh) + }() + + marker := opts.StartAfter + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) + if err != nil { + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + object.ETag = trimEtag(object.ETag) + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + // Allocate new list objects channel. + resultCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if opts.Recursive { + // If recursive we do not delimit. + delimiter = "" + } + + sendObjectInfo := func(info ObjectInfo) { + select { + case resultCh <- info: + case <-ctx.Done(): + } + } + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return resultCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + defer close(resultCh) + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return resultCh + } + + // Initiate list objects goroutine here. + go func(resultCh chan<- ObjectInfo) { + defer func() { + if contextCanceled(ctx) { + resultCh <- ObjectInfo{ + Err: ctx.Err(), + } + } + close(resultCh) + }() + + var ( + keyMarker = "" + versionIDMarker = "" + ) + + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectVersionsQuery(ctx, bucketName, opts, keyMarker, versionIDMarker, delimiter) + if err != nil { + sendObjectInfo(ObjectInfo{ + Err: err, + }) + return + } + + // If contents are available loop through and send over channel. + for _, version := range result.Versions { + info := ObjectInfo{ + ETag: trimEtag(version.ETag), + Key: version.Key, + LastModified: version.LastModified.Truncate(time.Millisecond), + Size: version.Size, + Owner: version.Owner, + StorageClass: version.StorageClass, + IsLatest: version.IsLatest, + VersionID: version.VersionID, + IsDeleteMarker: version.isDeleteMarker, + UserTags: version.UserTags, + UserMetadata: version.UserMetadata, + Internal: version.Internal, + } + select { + // Send object version info. + case resultCh <- info: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case resultCh <- ObjectInfo{Key: obj.Prefix}: + // If receives done from the caller, return here. + case <-ctx.Done(): + return + } + } + + // If next key marker is present, save it for next request. + if result.NextKeyMarker != "" { + keyMarker = result.NextKeyMarker + } + + // If next version id marker is present, save it for next request. + if result.NextVersionIDMarker != "" { + versionIDMarker = result.NextVersionIDMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(resultCh) + return resultCh +} + +// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects +// and their versions in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?key-marker - Specifies the key to start with when listing objects in a bucket. +// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectVersionsQuery(ctx context.Context, bucketName string, opts ListObjectsOptions, keyMarker, versionIDMarker, delimiter string) (ListVersionsResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListVersionsResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { + return ListVersionsResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set versions to trigger versioning API + urlValues.Set("versions", "") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", opts.Prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + + // Set max keys. + if opts.MaxKeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", opts.MaxKeys)) + } + + // Set version ID marker + if versionIDMarker != "" { + urlValues.Set("version-id-marker", versionIDMarker) + } + + if opts.WithMetadata { + urlValues.Set("metadata", "true") + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: opts.headers, + }) + defer closeResponse(resp) + if err != nil { + return ListVersionsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode ListVersionsResult XML. + listObjectVersionsOutput := ListVersionsResult{} + err = xmlDecoder(resp.Body, &listObjectVersionsOutput) + if err != nil { + return ListVersionsResult{}, err + } + + for i, obj := range listObjectVersionsOutput.Versions { + listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + for i, obj := range listObjectVersionsOutput.CommonPrefixes { + listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + if listObjectVersionsOutput.NextKeyMarker != "" { + listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) + if err != nil { + return listObjectVersionsOutput, err + } + } + + return listObjectVersionsOutput, nil +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c *Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // Set max keys. + if maxkeys > 0 { + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + } + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + + for i, obj := range listBucketResult.Contents { + listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + listBucketResult.Contents[i].LastModified = listBucketResult.Contents[i].LastModified.Truncate(time.Millisecond) + } + + for i, obj := range listBucketResult.CommonPrefixes { + listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + if listBucketResult.NextMarker != "" { + listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) + if err != nil { + return listBucketResult, err + } + } + + return listBucketResult, nil +} + +// ListObjectsOptions holds all options of a list object request +type ListObjectsOptions struct { + // Include objects versions in the listing + WithVersions bool + // Include objects metadata in the listing + WithMetadata bool + // Only list objects with the prefix + Prefix string + // Ignore '/' delimiter + Recursive bool + // The maximum number of objects requested per + // batch, advanced use-case not useful for most + // applications + MaxKeys int + // StartAfter start listing lexically at this + // object onwards, this value can also be set + // for Marker when `UseV1` is set to true. + StartAfter string + + // Use the deprecated list objects V1 API + UseV1 bool + + headers http.Header +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *ListObjectsOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(http.Header) + } + o.headers.Set(key, value) +} + +// ListObjects returns objects list after evaluating the passed options. +// +// api := client.New(....) +// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { +// fmt.Println(object) +// } +// +// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error() +// caller must drain the channel entirely and wait until channel is closed before proceeding, without +// waiting on the channel to be closed completely you might leak goroutines. +func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { + if opts.WithVersions { + return c.listObjectVersions(ctx, bucketName, opts) + } + + // Use legacy list objects v1 API + if opts.UseV1 { + return c.listObjects(ctx, bucketName, opts) + } + + // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. + if location, ok := c.bucketLocCache.Get(bucketName); ok { + if location == "snowball" { + return c.listObjects(ctx, bucketName, opts) + } + } + + return c.listObjectsV2(ctx, bucketName, opts) +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) +} + +// contextCanceled returns whether a context is canceled. +func contextCanceled(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// listIncompleteUploads lists all incomplete uploads. +func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer func() { + if contextCanceled(ctx) { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: ctx.Err(), + } + } + close(objectMultipartStatCh) + }() + + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If the context is canceled + case <-ctx.Done(): + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: + // If context is canceled. + case <-ctx.Done(): + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploadsQuery - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c *Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Always set encoding-type + urlValues.Set("encoding-type", "url") + + // maxUploads should be 1000 or less. + if maxUploads > 0 { + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + } + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + + for i, obj := range listMultipartUploadsResult.Uploads { + listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + for i, obj := range listMultipartUploadsResult.CommonPrefixes { + listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) + if err != nil { + return listMultipartUploadsResult, err + } + } + + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +// +//lint:ignore U1000 Keep this around +func (c *Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = trimEtag(part.ETag) + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c *Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c *Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts > 0 { + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + } + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} + +// Decode an S3 object name according to the encoding type +func decodeS3Name(name, encodingType string) (string, error) { + switch encodingType { + case "url": + return url.QueryUnescape(name) + default: + return name, nil + } +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go new file mode 100644 index 000000000..0c027d550 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go @@ -0,0 +1,176 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectLegalHold - object legal hold specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html +type objectLegalHold struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"LegalHold"` + Status LegalHoldStatus `xml:"Status,omitempty"` +} + +// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call +type PutObjectLegalHoldOptions struct { + VersionID string + Status *LegalHoldStatus +} + +// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call +type GetObjectLegalHoldOptions struct { + VersionID string +} + +// LegalHoldStatus - object legal hold status. +type LegalHoldStatus string + +const ( + // LegalHoldEnabled indicates legal hold is enabled + LegalHoldEnabled LegalHoldStatus = "ON" + + // LegalHoldDisabled indicates legal hold is disabled + LegalHoldDisabled LegalHoldStatus = "OFF" +) + +func (r LegalHoldStatus) String() string { + return string(r) +} + +// IsValid - check whether this legal hold status is valid or not. +func (r LegalHoldStatus) IsValid() bool { + return r == LegalHoldEnabled || r == LegalHoldDisabled +} + +func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { + if status == nil { + return nil, fmt.Errorf("Status not set") + } + if !status.IsValid() { + return nil, fmt.Errorf("invalid legal hold status `%v`", status) + } + legalHold := &objectLegalHold{ + Status: *status, + } + return legalHold, nil +} + +// PutObjectLegalHold : sets object legal hold for a given object and versionID. +func (c *Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + lh, err := newObjectLegalHold(opts.Status) + if err != nil { + return err + } + + lhData, err := xml.Marshal(lh) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(lhData), + contentLength: int64(len(lhData)), + contentMD5Base64: sumMD5Base64(lhData), + contentSHA256Hex: sum256Hex(lhData), + } + + // Execute PUT Object Legal Hold. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectLegalHold gets legal-hold status of given object. +func (c *Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + urlValues := make(url.Values) + urlValues.Set("legal-hold", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + lh := &objectLegalHold{} + if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { + return nil, err + } + + return &lh.Status, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/vendor/github.com/minio/minio-go/v7/api-object-lock.go new file mode 100644 index 000000000..f0a439853 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-lock.go @@ -0,0 +1,241 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// RetentionMode - object retention mode. +type RetentionMode string + +const ( + // Governance - governance mode. + Governance RetentionMode = "GOVERNANCE" + + // Compliance - compliance mode. + Compliance RetentionMode = "COMPLIANCE" +) + +func (r RetentionMode) String() string { + return string(r) +} + +// IsValid - check whether this retention mode is valid or not. +func (r RetentionMode) IsValid() bool { + return r == Governance || r == Compliance +} + +// ValidityUnit - retention validity unit. +type ValidityUnit string + +const ( + // Days - denotes no. of days. + Days ValidityUnit = "DAYS" + + // Years - denotes no. of years. + Years ValidityUnit = "YEARS" +) + +func (unit ValidityUnit) String() string { + return string(unit) +} + +// IsValid - check whether this validity unit is valid or not. +func (unit ValidityUnit) isValid() bool { + return unit == Days || unit == Years +} + +// Retention - bucket level retention configuration. +type Retention struct { + Mode RetentionMode + Validity time.Duration +} + +func (r Retention) String() string { + return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) +} + +// IsEmpty - returns whether retention is empty or not. +func (r Retention) IsEmpty() bool { + return r.Mode == "" || r.Validity == 0 +} + +// objectLockConfig - object lock configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectLockConfig struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"ObjectLockConfiguration"` + ObjectLockEnabled string `xml:"ObjectLockEnabled"` + Rule *struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + } `xml:"Rule,omitempty"` +} + +func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { + config := &objectLockConfig{ + ObjectLockEnabled: "Enabled", + } + + if mode != nil && validity != nil && unit != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + + if !unit.isValid() { + return nil, fmt.Errorf("invalid validity unit `%v`", unit) + } + + config.Rule = &struct { + DefaultRetention struct { + Mode RetentionMode `xml:"Mode"` + Days *uint `xml:"Days"` + Years *uint `xml:"Years"` + } `xml:"DefaultRetention"` + }{} + + config.Rule.DefaultRetention.Mode = *mode + if *unit == Days { + config.Rule.DefaultRetention.Days = validity + } else { + config.Rule.DefaultRetention.Years = validity + } + + return config, nil + } + + if mode == nil && validity == nil && unit == nil { + return config, nil + } + + return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") +} + +// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + config, err := newObjectLockConfig(mode, validity, unit) + if err != nil { + return err + } + + configData, err := xml.Marshal(config) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(configData), + contentLength: int64(len(configData)), + contentMD5Base64: sumMD5Base64(configData), + contentSHA256Hex: sum256Hex(configData), + } + + // Execute PUT bucket object lock configuration. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// GetObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", nil, nil, nil, err + } + + urlValues := make(url.Values) + urlValues.Set("object-lock", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return "", nil, nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") + } + } + config := &objectLockConfig{} + if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { + return "", nil, nil, nil, err + } + + if config.Rule != nil { + mode = &config.Rule.DefaultRetention.Mode + if config.Rule.DefaultRetention.Days != nil { + validity = config.Rule.DefaultRetention.Days + days := Days + unit = &days + } else { + validity = config.Rule.DefaultRetention.Years + years := Years + unit = &years + } + return config.ObjectLockEnabled, mode, validity, unit, nil + } + return config.ObjectLockEnabled, nil, nil, nil, nil +} + +// GetBucketObjectLockConfig gets object lock configuration of given bucket. +func (c *Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { + _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) + return mode, validity, unit, err +} + +// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. +func (c *Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { + return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/vendor/github.com/minio/minio-go/v7/api-object-retention.go new file mode 100644 index 000000000..b29cb1f8d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-retention.go @@ -0,0 +1,165 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// objectRetention - object retention specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html +type objectRetention struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"Retention"` + Mode RetentionMode `xml:"Mode,omitempty"` + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` +} + +func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { + objectRetention := &objectRetention{} + + if date != nil && !date.IsZero() { + objectRetention.RetainUntilDate = date + } + if mode != nil { + if !mode.IsValid() { + return nil, fmt.Errorf("invalid retention mode `%v`", mode) + } + objectRetention.Mode = *mode + } + + return objectRetention, nil +} + +// PutObjectRetentionOptions represents options specified by user for PutObject call +type PutObjectRetentionOptions struct { + GovernanceBypass bool + Mode *RetentionMode + RetainUntilDate *time.Time + VersionID string +} + +// PutObjectRetention sets object retention for a given object and versionID. +func (c *Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("retention", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) + if err != nil { + return err + } + + retentionData, err := xml.Marshal(retention) + if err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(retentionData), + contentLength: int64(len(retentionData)), + contentMD5Base64: sumMD5Base64(retentionData), + contentSHA256Hex: sum256Hex(retentionData), + customHeader: headers, + } + + // Execute PUT Object Retention. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectRetention gets retention of given object. +func (c *Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, nil, err + } + + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, nil, err + } + urlValues := make(url.Values) + urlValues.Set("retention", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return nil, nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + retention := &objectRetention{} + if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { + return nil, nil, err + } + + return &retention.Mode, retention.RetainUntilDate, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go new file mode 100644 index 000000000..6623e262a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-object-tagging.go @@ -0,0 +1,177 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// PutObjectTaggingOptions holds an object version id +// to update tag(s) of a specific object version +type PutObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// AdvancedObjectTaggingOptions for internal use by MinIO server - not intended for client use. +type AdvancedObjectTaggingOptions struct { + ReplicationProxyRequest string +} + +// PutObjectTagging replaces or creates object tag(s) and can target +// a specific object version in a versioned bucket. +func (c *Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + reqBytes, err := xml.Marshal(otags) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: bytes.NewReader(reqBytes), + contentLength: int64(len(reqBytes)), + contentMD5Base64: sumMD5Base64(reqBytes), + customHeader: headers, + } + + // Execute PUT to set a object tagging. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return nil +} + +// GetObjectTaggingOptions holds the object version ID +// to fetch the tagging key/value pairs +type GetObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// GetObjectTagging fetches object tag(s) with options to target +// a specific object version in a versioned bucket. +func (c *Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + // Execute GET on object to get object tag(s) + resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: headers, + }) + + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return tags.ParseObjectXML(resp.Body) +} + +// RemoveObjectTaggingOptions holds the version id of the object to remove +type RemoveObjectTaggingOptions struct { + VersionID string + Internal AdvancedObjectTaggingOptions +} + +// RemoveObjectTagging removes object tag(s) with options to control a specific object +// version in a versioned bucket +func (c *Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("tagging", "") + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + headers := make(http.Header, 0) + if opts.Internal.ReplicationProxyRequest != "" { + headers.Set(minIOBucketReplicationProxyRequest, opts.Internal.ReplicationProxyRequest) + } + // Execute DELETE on object to remove object tag(s) + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: headers, + }) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + // S3 returns "204 No content" after Object tag deletion. + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go new file mode 100644 index 000000000..9e85f8181 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -0,0 +1,228 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c *Client) presignURL(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, errInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + extraPresignHeader: extraHeaders, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedGetObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedHeadObject - Returns a presigned URL to access +// object metadata without credentials. URL can have a maximum expiry +// of upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c *Client) PresignedHeadObject(ctx context.Context, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c *Client) PresignedPutObject(ctx context.Context, bucketName, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil, nil) +} + +// PresignHeader - similar to Presign() but allows including HTTP headers that +// will be used to build the signature. The request using the resulting URL will +// need to have the exact same headers to be added for signature validation to +// pass. +// +// FIXME: The extra header parameter should be included in Presign() in the next +// major version bump, and this function should then be deprecated. +func (c *Client) PresignHeader(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders) +} + +// Presign - returns a presigned URL for any http method of your choice along +// with custom request params and extra signed headers. URL can have a maximum +// expiry of upto 7days or a minimum of 1sec. +func (c *Client) Presign(ctx context.Context, method, bucketName, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams, nil) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(ctx, bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go new file mode 100644 index 000000000..737666937 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-bucket.go @@ -0,0 +1,123 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Bucket operations +func (c *Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) + if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { + if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { + err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) + } + } + return err +} + +func (c *Client) doMakeBucket(ctx context.Context, bucketName, location string, objectLockEnabled bool) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, + } + + if objectLockEnabled { + headers := make(http.Header) + headers.Add("x-amz-bucket-object-lock-enabled", "true") + reqMetadata.customHeader = headers + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// MakeBucketOptions holds all options to tweak bucket creation +type MakeBucketOptions struct { + // Bucket location + Region string + // Enable object locking + ObjectLocking bool +} + +// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c *Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { + return c.makeBucket(ctx, bucketName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go new file mode 100644 index 000000000..9ccb97cbb --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-common.go @@ -0,0 +1,149 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +const nullVersionID = "null" + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } else { + _, ok = reader.(io.ReaderAt) + } + return +} + +// OptimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 16MiB +// maxMultipartPutObjectSize - 5TiB +func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + var unknownSize bool + if objectSize == -1 { + unknownSize = true + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + + var partSizeFlt float64 + if configuredPartSize > 0 { + if int64(configuredPartSize) > objectSize { + err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") + return + } + + if !unknownSize { + if objectSize > (int64(configuredPartSize) * maxPartsCount) { + err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") + return + } + } + + if configuredPartSize < absMinPartSize { + err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") + return + } + + if configuredPartSize > maxPartSize { + err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") + return + } + + partSizeFlt = float64(configuredPartSize) + if unknownSize { + // If input has unknown size and part size is configured + // keep it to maximum allowed as per 10000 parts. + objectSize = int64(configuredPartSize) * maxPartsCount + } + } else { + configuredPartSize = minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt = float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + } + + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c *Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go new file mode 100644 index 000000000..0ae9142e1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -0,0 +1,164 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "encoding/json" + "errors" + "io" + "mime/multipart" + "net/http" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// PutObjectFanOutEntry is per object entry fan-out metadata +type PutObjectFanOutEntry struct { + Key string `json:"key"` + UserMetadata map[string]string `json:"metadata,omitempty"` + UserTags map[string]string `json:"tags,omitempty"` + ContentType string `json:"contentType,omitempty"` + ContentEncoding string `json:"contentEncoding,omitempty"` + ContentDisposition string `json:"contentDisposition,omitempty"` + ContentLanguage string `json:"contentLanguage,omitempty"` + CacheControl string `json:"cacheControl,omitempty"` + Retention RetentionMode `json:"retention,omitempty"` + RetainUntilDate *time.Time `json:"retainUntil,omitempty"` +} + +// PutObjectFanOutRequest this is the request structure sent +// to the server to fan-out the stream to multiple objects. +type PutObjectFanOutRequest struct { + Entries []PutObjectFanOutEntry + Checksum Checksum + SSE encrypt.ServerSide +} + +// PutObjectFanOutResponse this is the response structure sent +// by the server upon success or failure for each object +// fan-out keys. Additionally, this response carries ETag, +// VersionID and LastModified for each object fan-out. +type PutObjectFanOutResponse struct { + Key string `json:"key"` + ETag string `json:"etag,omitempty"` + VersionID string `json:"versionId,omitempty"` + LastModified *time.Time `json:"lastModified,omitempty"` + Error string `json:"error,omitempty"` +} + +// PutObjectFanOut - is a variant of PutObject instead of writing a single object from a single +// stream multiple objects are written, defined via a list of PutObjectFanOutRequests. Each entry +// in PutObjectFanOutRequest carries an object keyname and its relevant metadata if any. `Key` is +// mandatory, rest of the other options in PutObjectFanOutRequest are optional. +func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData io.Reader, fanOutReq PutObjectFanOutRequest) ([]PutObjectFanOutResponse, error) { + if len(fanOutReq.Entries) == 0 { + return nil, errInvalidArgument("fan out requests cannot be empty") + } + + policy := NewPostPolicy() + policy.SetBucket(bucket) + policy.SetKey(strconv.FormatInt(time.Now().UnixNano(), 16)) + + // Expires in 15 minutes. + policy.SetExpires(time.Now().UTC().Add(15 * time.Minute)) + + // Set encryption headers if any. + policy.SetEncryption(fanOutReq.SSE) + + // Set checksum headers if any. + policy.SetChecksum(fanOutReq.Checksum) + + url, formData, err := c.PresignedPostPolicy(ctx, policy) + if err != nil { + return nil, err + } + + r, w := io.Pipe() + + req, err := http.NewRequest(http.MethodPost, url.String(), r) + if err != nil { + w.Close() + return nil, err + } + + var b strings.Builder + enc := json.NewEncoder(&b) + for _, req := range fanOutReq.Entries { + if req.Key == "" { + w.Close() + return nil, errors.New("PutObjectFanOutRequest.Key is mandatory and cannot be empty") + } + if err = enc.Encode(&req); err != nil { + w.Close() + return nil, err + } + } + + mwriter := multipart.NewWriter(w) + req.Header.Add("Content-Type", mwriter.FormDataContentType()) + + go func() { + defer w.Close() + defer mwriter.Close() + + for k, v := range formData { + if err := mwriter.WriteField(k, v); err != nil { + return + } + } + + if err := mwriter.WriteField("x-minio-fanout-list", b.String()); err != nil { + return + } + + mw, err := mwriter.CreateFormFile("file", "fanout-content") + if err != nil { + return + } + + if _, err = io.Copy(mw, fanOutData); err != nil { + return + } + }() + + resp, err := c.do(req) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucket, "fanout-content") + } + + dec := json.NewDecoder(resp.Body) + fanOutResp := make([]PutObjectFanOutResponse, 0, len(fanOutReq.Entries)) + for dec.More() { + var m PutObjectFanOutResponse + if err = dec.Decode(&m); err != nil { + return nil, err + } + fanOutResp = append(fanOutResp, m) + } + + return fanOutResp, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go new file mode 100644 index 000000000..4d29dfc18 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c *Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return UploadInfo{}, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return UploadInfo{}, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go new file mode 100644 index 000000000..a70cbea9e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -0,0 +1,465 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) + if len(hashSums) == 0 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + for partNumber <= totalPartsCount { + length, rErr := readFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return UploadInfo{}, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + v.Close() + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + if len(hashSums) == 0 { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + // Proceed to upload the part. + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) + } + } + urlValues.Set("versionId", opts.Internal.SourceVersionID) + } + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +type uploadPartParams struct { + bucketName string + objectName string + uploadID string + reader io.Reader + partNumber int + md5Base64 string + sha256Hex string + size int64 + sse encrypt.ServerSide + streamSha256 bool + customHeader http.Header + trailer http.Header +} + +// uploadPart - Uploads a part in a multipart upload. +func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(p.objectName); err != nil { + return ObjectPart{}, err + } + if p.size > maxPartSize { + return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) + } + if p.size <= -1 { + return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) + } + if p.partNumber <= 0 { + return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") + } + if p.uploadID == "" { + return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) + // Set upload id. + urlValues.Set("uploadId", p.uploadID) + + // Set encryption headers, if any. + if p.customHeader == nil { + p.customHeader = make(http.Header) + } + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if p.sse != nil && p.sse.Type() == encrypt.SSEC { + p.sse.Marshal(p.customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: p.bucketName, + objectName: p.objectName, + queryValues: urlValues, + customHeader: p.customHeader, + contentBody: p.reader, + contentLength: p.size, + contentMD5Base64: p.md5Base64, + contentSHA256Hex: p.sha256Hex, + streamSha256: p.streamSha256, + trailer: p.trailer, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) + } + } + // Once successfully uploaded, return completed part. + h := resp.Header + objPart := ObjectPart{ + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + } + objPart.Size = p.size + objPart.PartNumber = p.partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = trimEtag(h.Get("ETag")) + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return UploadInfo{}, err + } + + headers := opts.Header() + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload + headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload + headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + customHeader: headers, + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = io.ReadAll(resp.Body) + if err != nil { + return UploadInfo{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return UploadInfo{}, err + } + return UploadInfo{}, completeMultipartUploadErr + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + + return UploadInfo{ + Bucket: completeMultipartUploadResult.Bucket, + Key: completeMultipartUploadResult.Key, + ETag: trimEtag(completeMultipartUploadResult.ETag), + VersionID: resp.Header.Get(amzVersionID), + Location: completeMultipartUploadResult.Location, + Expiration: expTime, + ExpirationRuleID: ruleID, + + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go new file mode 100644 index 000000000..eef976c8c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -0,0 +1,831 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "sync" + + "github.com/google/uuid" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + info, err = c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } else if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + } + return info, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + } + withChecksum := c.trailingHeaderSupport + if withChecksum { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + } + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + uploadPartsCh := make(chan uploadPartReq) + + // Declare a channel that sends back the response of a part upload. + uploadedPartsCh := make(chan uploadedPartRes) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + partitionCtx, partitionCancel := context.WithCancel(ctx) + defer partitionCancel() + // Send each part number to the channel to be processed. + go func() { + defer close(uploadPartsCh) + + for p := 1; p <= totalPartsCount; p++ { + select { + case <-partitionCtx.Done(): + return + case uploadPartsCh <- uploadPartReq{PartNum: p}: + } + } + }() + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(partSize int64) { + for { + var uploadReq uploadPartReq + var ok bool + select { + case <-ctx.Done(): + return + case uploadReq, ok = <-uploadPartsCh: + if !ok { + return + } + // Each worker will draw from the part channel and upload in parallel. + } + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = size - lastPartSize + partSize = lastPartSize + } + + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + trailer := make(http.Header, 1) + if withChecksum { + crc := opts.AutoChecksum.Hasher() + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) + sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash)) + }) + } + + // Proceed to upload the part. + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: sectionReader, + partNumber: uploadReq.PartNum, + size: partSize, + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + sha256Hex: "", + trailer: trailer, + } + objPart, err := c.uploadPart(ctx, p) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + } + } + }(partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + select { + case <-ctx.Done(): + return UploadInfo{}, ctx.Err() + case uploadRes := <-uploadedPartsCh: + if uploadRes.Error != nil { + return UploadInfo{}, uploadRes.Error + } + + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, + }) + } + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if withChecksum { + // Add hash of hashes. + crc := opts.AutoChecksum.Hasher() + for _, part := range complMultipartUpload.Parts { + cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum)) + if err == nil { + crc.Write(cs) + } + } + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + opts.SendContentMd5 = false + } + + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + md5Hash := c.md5Hasher() + defer md5Hash.Close() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Avoid declaring variables in the for loop + var md5Base64 string + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, rerr + } + + // Calculate md5sum. + if opts.SendContentMd5 { + md5Hash.Reset() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + } else { + // Add CRC32C instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + hooked := newHook(bytes.NewReader(buf[:length]), opts.Progress) + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: hooked, partNumber: partNumber, md5Base64: md5Base64, size: partSize, sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObjectMultipartStreamParallel uploads opts.NumThreads parts in parallel. +// This is expected to take opts.PartSize * opts.NumThreads * (GOGC / 100) bytes of buffer. +func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketName, objectName string, + reader io.Reader, opts PutObjectOptions, +) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + opts.AutoChecksum = opts.Checksum + } + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + } + + // Cancel all when an error occurs. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + crc := opts.AutoChecksum.Hasher() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + nBuffers := int64(opts.NumThreads) + bufs := make(chan []byte, nBuffers) + all := make([]byte, nBuffers*partSize) + for i := int64(0); i < nBuffers; i++ { + bufs <- all[i*partSize : i*partSize+partSize] + } + + var wg sync.WaitGroup + var mu sync.Mutex + errCh := make(chan error, opts.NumThreads) + + reader = newHook(reader, opts.Progress) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Proceed to upload the part. + var buf []byte + select { + case buf = <-bufs: + case err = <-errCh: + cancel() + wg.Wait() + return UploadInfo{}, err + } + + if int64(len(buf)) != partSize { + return UploadInfo{}, fmt.Errorf("read buffer < %d than expected partSize: %d", len(buf), partSize) + } + + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + // Done + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { + cancel() + wg.Wait() + return UploadInfo{}, rerr + } + + // Calculate md5sum. + customHeader := make(http.Header) + if !opts.SendContentMd5 { + // Add Checksum instead. + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + wg.Add(1) + go func(partNumber int) { + // Avoid declaring variables in the for loop + var md5Base64 string + + if opts.SendContentMd5 { + md5Hash := c.md5Hasher() + md5Hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(md5Hash.Sum(nil)) + md5Hash.Close() + } + + defer wg.Done() + p := uploadPartParams{ + bucketName: bucketName, + objectName: objectName, + uploadID: uploadID, + reader: bytes.NewReader(buf[:length]), + partNumber: partNumber, + md5Base64: md5Base64, + size: int64(length), + sse: opts.ServerSideEncryption, + streamSha256: !opts.DisableContentSha256, + customHeader: customHeader, + } + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + errCh <- uerr + return + } + + // Save successfully uploaded part metadata. + mu.Lock() + partsInfo[partNumber] = objPart + mu.Unlock() + + // Send buffer back so it can be reused. + bufs <- buf + }(partNumber) + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + } + wg.Wait() + + // Collect any error + select { + case err = <-errCh: + return UploadInfo{}, err + default: + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} + +// putObject special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c *Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) + } + + if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { + return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") + } + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + } + + var readSeeker io.Seeker + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, ok := reader.(io.Seeker) + if ok { + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + readSeeker = reader.(io.Seeker) + } + } + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + + if readSeeker != nil { + if _, err := io.Copy(hash, reader); err != nil { + return UploadInfo{}, err + } + // Seek back to beginning of io.NewSectionReader's offset. + _, err = readSeeker.Seek(0, io.SeekStart) + if err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } else { + // Create a buffer. + buf := make([]byte, size) + + length, err := readFull(reader, buf) + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { + return UploadInfo{}, err + } + + hash.Write(buf[:length]) + reader = bytes.NewReader(buf[:length]) + } + + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + progressReader := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + return c.putObjectDo(ctx, bucketName, objectName, progressReader, md5Base64, "", size, opts) +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + streamSha256: !opts.DisableContentSha256, + } + // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. + addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) + if opts.Checksum.IsSet() { + reqMetadata.addCrc = &opts.Checksum + } else if addCrc { + // If user has added checksums, don't add them ourselves. + for k := range opts.UserMetadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + addCrc = false + } + } + if addCrc { + opts.AutoChecksum.SetDefault(ChecksumCRC32C) + reqMetadata.addCrc = &opts.AutoChecksum + } + } + + if opts.Internal.SourceVersionID != "" { + if opts.Internal.SourceVersionID != nullVersionID { + if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { + return UploadInfo{}, errInvalidArgument(err.Error()) + } + } + urlValues := make(url.Values) + urlValues.Set("versionId", opts.Internal.SourceVersionID) + reqMetadata.queryValues = urlValues + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) + defer closeResponse(resp) + if err != nil { + return UploadInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) + h := resp.Header + return UploadInfo{ + Bucket: bucketName, + Key: objectName, + ETag: trimEtag(h.Get("ETag")), + VersionID: h.Get(amzVersionID), + Size: size, + Expiration: expTime, + ExpirationRuleID: ruleID, + + // Checksum values + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go new file mode 100644 index 000000000..d769648a7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -0,0 +1,511 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "sort" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/net/http/httpguts" +) + +// ReplicationStatus represents replication status of object +type ReplicationStatus string + +const ( + // ReplicationStatusPending indicates replication is pending + ReplicationStatusPending ReplicationStatus = "PENDING" + // ReplicationStatusComplete indicates replication completed ok + ReplicationStatusComplete ReplicationStatus = "COMPLETED" + // ReplicationStatusFailed indicates replication failed + ReplicationStatusFailed ReplicationStatus = "FAILED" + // ReplicationStatusReplica indicates object is a replica of a source + ReplicationStatusReplica ReplicationStatus = "REPLICA" +) + +// Empty returns true if no replication status set. +func (r ReplicationStatus) Empty() bool { + return r == "" +} + +// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition +// implementation on MinIO server +type AdvancedPutOptions struct { + SourceVersionID string + SourceETag string + ReplicationStatus ReplicationStatus + SourceMTime time.Time + ReplicationRequest bool + RetentionTimestamp time.Time + TaggingTimestamp time.Time + LegalholdTimestamp time.Time + ReplicationValidityCheck bool +} + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + UserTags map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + Expires time.Time + Mode RetentionMode + RetainUntilDate time.Time + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string + PartSize uint64 + LegalHold LegalHoldStatus + SendContentMd5 bool + DisableContentSha256 bool + DisableMultipart bool + + // AutoChecksum is the type of checksum that will be added if no other checksum is added, + // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type. + // If none is specified CRC32C is used, since it is generally the fastest. + AutoChecksum ChecksumType + + // Checksum will force a checksum of the specific type. + // This requires that the client was created with "TrailingHeaders:true" option, + // and that the destination server supports it. + // Unavailable with V2 signatures & Google endpoints. + // This will disable content MD5 checksums if set. + Checksum ChecksumType + + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, + // fill them serially and upload them in parallel. + // This can be used for faster uploads on non-seekable or slow-to-seek input. + ConcurrentStreamParts bool + Internal AdvancedPutOptions + + customHeaders http.Header +} + +// SetMatchETag if etag matches while PUT MinIO returns an error +// this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETag(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + if etag == "*" { + opts.customHeaders.Set("If-Match", "*") + } else { + opts.customHeaders.Set("If-Match", "\""+etag+"\"") + } +} + +// SetMatchETagExcept if etag does not match while PUT MinIO returns an +// error this is a MinIO specific extension to support optimistic locking +// semantics. +func (opts *PutObjectOptions) SetMatchETagExcept(etag string) { + if opts.customHeaders == nil { + opts.customHeaders = http.Header{} + } + if etag == "*" { + opts.customHeaders.Set("If-None-Match", "*") + } else { + opts.customHeaders.Set("If-None-Match", "\""+etag+"\"") + } +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + contentType := opts.ContentType + if contentType == "" { + contentType = "application/octet-stream" + } + header.Set("Content-Type", contentType) + + if opts.ContentEncoding != "" { + header.Set("Content-Encoding", opts.ContentEncoding) + } + if opts.ContentDisposition != "" { + header.Set("Content-Disposition", opts.ContentDisposition) + } + if opts.ContentLanguage != "" { + header.Set("Content-Language", opts.ContentLanguage) + } + if opts.CacheControl != "" { + header.Set("Cache-Control", opts.CacheControl) + } + + if !opts.Expires.IsZero() { + header.Set("Expires", opts.Expires.UTC().Format(http.TimeFormat)) + } + + if opts.Mode != "" { + header.Set(amzLockMode, opts.Mode.String()) + } + + if !opts.RetainUntilDate.IsZero() { + header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) + } + + if opts.LegalHold != "" { + header.Set(amzLegalHoldHeader, opts.LegalHold.String()) + } + + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + + if opts.StorageClass != "" { + header.Set(amzStorageClass, opts.StorageClass) + } + + if opts.WebsiteRedirectLocation != "" { + header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) + } + + if !opts.Internal.ReplicationStatus.Empty() { + header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if !opts.Internal.SourceMTime.IsZero() { + header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) + } + if opts.Internal.SourceETag != "" { + header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) + } + if opts.Internal.ReplicationRequest { + header.Set(minIOBucketReplicationRequest, "true") + } + if opts.Internal.ReplicationValidityCheck { + header.Set(minIOBucketReplicationCheck, "true") + } + if !opts.Internal.LegalholdTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectLegalHoldTimestamp, opts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.RetentionTimestamp.IsZero() { + header.Set(minIOBucketReplicationObjectRetentionTimestamp, opts.Internal.RetentionTimestamp.Format(time.RFC3339Nano)) + } + if !opts.Internal.TaggingTimestamp.IsZero() { + header.Set(minIOBucketReplicationTaggingTimestamp, opts.Internal.TaggingTimestamp.Format(time.RFC3339Nano)) + } + + if len(opts.UserTags) != 0 { + header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) + } + + for k, v := range opts.UserMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + header.Set(k, v) + } else { + header.Set("x-amz-meta-"+k, v) + } + } + + // set any other additional custom headers. + for k, v := range opts.customHeaders { + header[k] = v + } + + return +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate(c *Client) (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { + return errInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return errInvalidArgument(v + " unsupported user defined metadata value") + } + } + if opts.Mode != "" && !opts.Mode.IsValid() { + return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") + } + if opts.LegalHold != "" && !opts.LegalHold.IsValid() { + return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") + } + if opts.Checksum.IsSet() { + switch { + case !c.trailingHeaderSupport: + return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled") + case c.overrideSignerType.IsV2(): + return errInvalidArgument("Checksum cannot be used with v2 signatures") + case s3utils.IsGoogleEndpoint(*c.endpointURL): + return errInvalidArgument("Checksum cannot be used with GCS endpoints") + } + } + + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 16MiB PutObject automatically does a +// single atomic PUT operation. +// +// - For size larger than 16MiB PutObject automatically does a +// multipart upload operation. +// +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +// +// WARNING: Passing down '-1' will use memory and these cannot +// be reused for best outcomes for PutObject(), pass the size always. +// +// NOTE: Upon errors during upload multipart operation is entirely aborted. +func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions, +) (info UploadInfo, err error) { + if objectSize < 0 && opts.DisableMultipart { + return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") + } + + err = opts.validate(c) + if err != nil { + return UploadInfo{}, err + } + + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} + +func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + opts.AutoChecksum.SetDefault(ChecksumCRC32C) + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + partSize := opts.PartSize + if opts.PartSize == 0 { + partSize = minPartSize + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + + if size < 0 { + if opts.DisableMultipart { + return UploadInfo{}, errors.New("no length provided and multipart disabled") + } + if opts.ConcurrentStreamParts && opts.NumThreads > 1 { + return c.putObjectMultipartStreamParallel(ctx, bucketName, objectName, reader, opts) + } + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size <= int64(partSize) || opts.DisableMultipart { + return c.putObject(ctx, bucketName, objectName, reader, size, opts) + } + + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return UploadInfo{}, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return UploadInfo{}, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) + if err != nil { + return UploadInfo{}, err + } + + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + opts.AutoChecksum = opts.Checksum + } + if !opts.SendContentMd5 { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return UploadInfo{}, err + } + delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + + // Create checksums + // CRC32C is ~50% faster on AMD64 @ 30GB/s + var crcBytes []byte + customHeader := make(http.Header) + crc := opts.AutoChecksum.Hasher() + + for partNumber <= totalPartsCount { + length, rerr := readFull(reader, buf) + if rerr == io.EOF && partNumber > 1 { + break + } + + if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { + return UploadInfo{}, rerr + } + + var md5Base64 string + if opts.SendContentMd5 { + // Calculate md5sum. + hash := c.md5Hasher() + hash.Write(buf[:length]) + md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + hash.Close() + } else { + crc.Reset() + crc.Write(buf[:length]) + cSum := crc.Sum(nil) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) + crcBytes = append(crcBytes, cSum...) + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} + objPart, uerr := c.uploadPart(ctx, p) + if uerr != nil { + return UploadInfo{}, uerr + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rerr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + + opts = PutObjectOptions{ + ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, + } + if len(crcBytes) > 0 { + // Add hash of hashes. + crc.Reset() + crc.Write(crcBytes) + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + } + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) + if err != nil { + return UploadInfo{}, err + } + + uploadInfo.Size = totalUploadedSize + return uploadInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go new file mode 100644 index 000000000..6b6559bf7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -0,0 +1,246 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/s2" +) + +// SnowballOptions contains options for PutObjectsSnowball calls. +type SnowballOptions struct { + // Opts is options applied to all objects. + Opts PutObjectOptions + + // Processing options: + + // InMemory specifies that all objects should be collected in memory + // before they are uploaded. + // If false a temporary file will be created. + InMemory bool + + // Compress enabled content compression before upload. + // Compression will typically reduce memory and network usage, + // Compression can safely be enabled with MinIO hosts. + Compress bool + + // SkipErrs if enabled will skip any errors while reading the + // object content while creating the snowball archive + SkipErrs bool +} + +// SnowballObject contains information about a single object to be added to the snowball. +type SnowballObject struct { + // Key is the destination key, including prefix. + Key string + + // Size is the content size of this object. + Size int64 + + // Modtime to apply to the object. + // If Modtime is the zero value current time will be used. + ModTime time.Time + + // Content of the object. + // Exactly 'Size' number of bytes must be provided. + Content io.Reader + + // VersionID of the object; if empty, a new versionID will be generated + VersionID string + + // Headers contains more options for this object upload, the same as you + // would include in a regular PutObject operation, such as user metadata + // and content-disposition, expires, .. + Headers http.Header + + // Close will be called when an object has finished processing. + // Note that if PutObjectsSnowball returns because of an error, + // objects not consumed from the input will NOT have been closed. + // Leave as nil for no callback. + Close func() +} + +type nopReadSeekCloser struct { + io.ReadSeeker +} + +func (n nopReadSeekCloser) Close() error { + return nil +} + +// This is available as io.ReadSeekCloser from go1.16 +type readSeekCloser interface { + io.Reader + io.Closer + io.Seeker +} + +// PutObjectsSnowball will put multiple objects with a single put call. +// A (compressed) TAR file will be created which will contain multiple objects. +// The key for each object will be used for the destination in the specified bucket. +// Total size should be < 5TB. +// This function blocks until 'objs' is closed and the content has been uploaded. +func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { + err = opts.Opts.validate(&c) + if err != nil { + return err + } + var tmpWriter io.Writer + var getTmpReader func() (rc readSeekCloser, sz int64, err error) + if opts.InMemory { + b := bytes.NewBuffer(nil) + tmpWriter = b + getTmpReader = func() (readSeekCloser, int64, error) { + return nopReadSeekCloser{bytes.NewReader(b.Bytes())}, int64(b.Len()), nil + } + } else { + f, err := os.CreateTemp("", "s3-putsnowballobjects-*") + if err != nil { + return err + } + name := f.Name() + tmpWriter = f + var once sync.Once + defer once.Do(func() { + f.Close() + }) + defer os.Remove(name) + getTmpReader = func() (readSeekCloser, int64, error) { + once.Do(func() { + f.Close() + }) + f, err := os.Open(name) + if err != nil { + return nil, 0, err + } + st, err := f.Stat() + if err != nil { + return nil, 0, err + } + return f, st.Size(), nil + } + } + flush := func() error { return nil } + if !opts.Compress { + if !opts.InMemory { + // Insert buffer for writes. + buf := bufio.NewWriterSize(tmpWriter, 1<<20) + flush = buf.Flush + tmpWriter = buf + } + } else { + s2c := s2.NewWriter(tmpWriter, s2.WriterBetterCompression()) + flush = s2c.Close + defer s2c.Close() + tmpWriter = s2c + } + t := tar.NewWriter(tmpWriter) + +objectLoop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case obj, ok := <-objs: + if !ok { + break objectLoop + } + + closeObj := func() {} + if obj.Close != nil { + closeObj = obj.Close + } + + // Trim accidental slash prefix. + obj.Key = strings.TrimPrefix(obj.Key, "/") + header := tar.Header{ + Typeflag: tar.TypeReg, + Name: obj.Key, + Size: obj.Size, + ModTime: obj.ModTime, + Format: tar.FormatPAX, + } + if header.ModTime.IsZero() { + header.ModTime = time.Now().UTC() + } + + header.PAXRecords = make(map[string]string) + if obj.VersionID != "" { + header.PAXRecords["minio.versionId"] = obj.VersionID + } + for k, vals := range obj.Headers { + header.PAXRecords["minio.metadata."+k] = strings.Join(vals, ",") + } + + if err := t.WriteHeader(&header); err != nil { + closeObj() + return err + } + n, err := io.Copy(t, obj.Content) + if err != nil { + closeObj() + if opts.SkipErrs { + continue + } + return err + } + if n != obj.Size { + closeObj() + if opts.SkipErrs { + continue + } + return io.ErrUnexpectedEOF + } + closeObj() + } + } + // Flush tar + err = t.Flush() + if err != nil { + return err + } + // Flush compression + err = flush() + if err != nil { + return err + } + if opts.Opts.UserMetadata == nil { + opts.Opts.UserMetadata = map[string]string{} + } + opts.Opts.UserMetadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + opts.Opts.DisableMultipart = true + rc, sz, err := getTmpReader() + if err != nil { + return err + } + defer rc.Close() + rand := c.random.Uint64() + _, err = c.PutObject(ctx, bucketName, fmt.Sprintf("snowball-upload-%x.tar", rand), rc, sz, opts.Opts) + return err +} diff --git a/vendor/github.com/minio/minio-go/v7/api-remove.go b/vendor/github.com/minio/minio-go/v7/api-remove.go new file mode 100644 index 000000000..d2e932923 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-remove.go @@ -0,0 +1,548 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +//revive:disable + +// Deprecated: BucketOptions will be renamed to RemoveBucketOptions in future versions. +type BucketOptions = RemoveBucketOptions + +//revive:enable + +// RemoveBucketOptions special headers to purge buckets, only +// useful when endpoint is MinIO +type RemoveBucketOptions struct { + ForceDelete bool +} + +// RemoveBucketWithOptions deletes the bucket name. +// +// All objects (including all object versions and delete markers) +// in the bucket will be deleted forcibly if bucket options set +// ForceDelete to 'true'. +func (c *Client) RemoveBucketWithOptions(ctx context.Context, bucketName string, opts RemoveBucketOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Build headers. + headers := make(http.Header) + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + return nil +} + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c *Client) RemoveBucket(ctx context.Context, bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// AdvancedRemoveOptions intended for internal use by replication +type AdvancedRemoveOptions struct { + ReplicationDeleteMarker bool + ReplicationStatus ReplicationStatus + ReplicationMTime time.Time + ReplicationRequest bool + ReplicationValidityCheck bool // check permissions +} + +// RemoveObjectOptions represents options specified by user for RemoveObject call +type RemoveObjectOptions struct { + ForceDelete bool + GovernanceBypass bool + VersionID string + Internal AdvancedRemoveOptions +} + +// RemoveObject removes an object from a bucket. +func (c *Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + res := c.removeObject(ctx, bucketName, objectName, opts) + return res.Err +} + +func (c *Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) RemoveObjectResult { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + if opts.VersionID != "" { + urlValues.Set("versionId", opts.VersionID) + } + + // Build headers. + headers := make(http.Header) + + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if !opts.Internal.ReplicationMTime.IsZero() { + headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) + } + if !opts.Internal.ReplicationStatus.Empty() { + headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) + } + if opts.Internal.ReplicationRequest { + headers.Set(minIOBucketReplicationRequest, "true") + } + if opts.Internal.ReplicationValidityCheck { + headers.Set(minIOBucketReplicationCheck, "true") + } + if opts.ForceDelete { + headers.Set(minIOForceDelete, "true") + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + queryValues: urlValues, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return RemoveObjectResult{Err: err} + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + err := httpRespToErrorResponse(resp, bucketName, objectName) + return RemoveObjectResult{Err: err} + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return RemoveObjectResult{ + ObjectName: objectName, + ObjectVersionID: opts.VersionID, + DeleteMarker: resp.Header.Get("x-amz-delete-marker") == "true", + DeleteMarkerVersionID: resp.Header.Get("x-amz-version-id"), + } +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + VersionID string + Err error +} + +// RemoveObjectResult - container of Multi Delete S3 API result +type RemoveObjectResult struct { + ObjectName string + ObjectVersionID string + + DeleteMarker bool + DeleteMarkerVersionID string + + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { + delObjects := []deleteObject{} + for _, obj := range objects { + delObjects = append(delObjects, deleteObject{ + Key: obj.Key, + VersionID: obj.VersionID, + }) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: false}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, resultCh chan<- RemoveObjectResult) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + resultCh <- RemoveObjectResult{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned success + for _, obj := range rmResult.DeletedObjects { + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + // Only filled with versioned buckets + ObjectVersionID: obj.VersionID, + DeleteMarker: obj.DeleteMarker, + DeleteMarkerVersionID: obj.DeleteMarkerVersionID, + } + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + // Version does not exist is not an error ignore and continue. + switch obj.Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + resultCh <- RemoveObjectResult{ + ObjectName: obj.Key, + ObjectVersionID: obj.VersionID, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsOptions represents options specified by user for RemoveObjects call +type RemoveObjectsOptions struct { + GovernanceBypass bool +} + +// RemoveObjects removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove failures are sent back via error channel. +func (c *Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + resultCh := make(chan RemoveObjectResult, 1) + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + go func() { + defer close(errorCh) + for res := range resultCh { + // Send only errors to the error channel + if res.Err == nil { + continue + } + errorCh <- RemoveObjectError{ + ObjectName: res.ObjectName, + VersionID: res.ObjectVersionID, + Err: res.Err, + } + } + }() + + return errorCh +} + +// RemoveObjectsWithResult removes multiple objects from a bucket while +// it is possible to specify objects versions which are received from +// objectsCh. Remove results, successes and failures are sent back via +// RemoveObjectResult channel +func (c *Client) RemoveObjectsWithResult(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectResult { + resultCh := make(chan RemoveObjectResult, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: err, + } + return resultCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(resultCh) + resultCh <- RemoveObjectResult{ + Err: errInvalidArgument("Objects channel cannot be nil"), + } + return resultCh + } + + go c.removeObjects(ctx, bucketName, objectsCh, resultCh, opts) + return resultCh +} + +// Return true if the character is within the allowed characters in an XML 1.0 document +// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets +func validXMLChar(r rune) (ok bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +func hasInvalidXMLChar(str string) bool { + for _, s := range str { + if !validXMLChar(s) { + return true + } + } + return false +} + +// Generate and call MultiDelete S3 requests based on entries received from objectsCh +func (c *Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, resultCh chan<- RemoveObjectResult, opts RemoveObjectsOptions) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close result channel when Multi delete finishes. + defer close(resultCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []ObjectInfo + + // Try to gather 1000 entries + for object := range objectsCh { + if hasInvalidXMLChar(object.Key) { + // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. + removeResult := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ + VersionID: object.VersionID, + GovernanceBypass: opts.GovernanceBypass, + }) + if err := removeResult.Err; err != nil { + // Version does not exist is not an error ignore and continue. + switch ToErrorResponse(err).Code { + case "InvalidArgument", "NoSuchVersion": + continue + } + resultCh <- removeResult + } + + resultCh <- removeResult + continue + } + + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Build headers. + headers := make(http.Header) + if opts.GovernanceBypass { + // Set the bypass goverenance retention header + headers.Set(amzBypassGovernance, "true") + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute POST on bucket to remove objects. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + customHeader: headers, + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + resultCh <- RemoveObjectResult{ObjectName: "", Err: e} + } + } + if err != nil { + for _, b := range batch { + resultCh <- RemoveObjectResult{ + ObjectName: b.Key, + ObjectVersionID: b.VersionID, + Err: err, + } + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, resultCh) + + closeResponse(resp) + } +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c *Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c *Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-restore.go b/vendor/github.com/minio/minio-go/v7/api-restore.go new file mode 100644 index 000000000..9ec8f4f24 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-restore.go @@ -0,0 +1,182 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "net/http" + "net/url" + + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// RestoreType represents the restore request type +type RestoreType string + +const ( + // RestoreSelect represents the restore SELECT operation + RestoreSelect = RestoreType("SELECT") +) + +// TierType represents a retrieval tier +type TierType string + +const ( + // TierStandard is the standard retrieval tier + TierStandard = TierType("Standard") + // TierBulk is the bulk retrieval tier + TierBulk = TierType("Bulk") + // TierExpedited is the expedited retrieval tier + TierExpedited = TierType("Expedited") +) + +// GlacierJobParameters represents the retrieval tier parameter +type GlacierJobParameters struct { + Tier TierType +} + +// Encryption contains the type of server-side encryption used during object retrieval +type Encryption struct { + EncryptionType string + KMSContext string + KMSKeyID string `xml:"KMSKeyId"` +} + +// MetadataEntry represents a metadata information of the restored object. +type MetadataEntry struct { + Name string + Value string +} + +// S3 holds properties of the copy of the archived object +type S3 struct { + AccessControlList *AccessControlList `xml:"AccessControlList,omitempty"` + BucketName string + Prefix string + CannedACL *string `xml:"CannedACL,omitempty"` + Encryption *Encryption `xml:"Encryption,omitempty"` + StorageClass *string `xml:"StorageClass,omitempty"` + Tagging *tags.Tags `xml:"Tagging,omitempty"` + UserMetadata *MetadataEntry `xml:"UserMetadata,omitempty"` +} + +// SelectParameters holds the select request parameters +type SelectParameters struct { + XMLName xml.Name `xml:"SelectParameters"` + ExpressionType QueryExpressionType + Expression string + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization +} + +// OutputLocation holds properties of the copy of the archived object +type OutputLocation struct { + XMLName xml.Name `xml:"OutputLocation"` + S3 S3 `xml:"S3"` +} + +// RestoreRequest holds properties of the restore object request +type RestoreRequest struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest"` + Type *RestoreType `xml:"Type,omitempty"` + Tier *TierType `xml:"Tier,omitempty"` + Days *int `xml:"Days,omitempty"` + GlacierJobParameters *GlacierJobParameters `xml:"GlacierJobParameters,omitempty"` + Description *string `xml:"Description,omitempty"` + SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"` + OutputLocation *OutputLocation `xml:"OutputLocation,omitempty"` +} + +// SetDays sets the days parameter of the restore request +func (r *RestoreRequest) SetDays(v int) { + r.Days = &v +} + +// SetGlacierJobParameters sets the GlacierJobParameters of the restore request +func (r *RestoreRequest) SetGlacierJobParameters(v GlacierJobParameters) { + r.GlacierJobParameters = &v +} + +// SetType sets the type of the restore request +func (r *RestoreRequest) SetType(v RestoreType) { + r.Type = &v +} + +// SetTier sets the retrieval tier of the restore request +func (r *RestoreRequest) SetTier(v TierType) { + r.Tier = &v +} + +// SetDescription sets the description of the restore request +func (r *RestoreRequest) SetDescription(v string) { + r.Description = &v +} + +// SetSelectParameters sets SelectParameters of the restore select request +func (r *RestoreRequest) SetSelectParameters(v SelectParameters) { + r.SelectParameters = &v +} + +// SetOutputLocation sets the properties of the copy of the archived object +func (r *RestoreRequest) SetOutputLocation(v OutputLocation) { + r.OutputLocation = &v +} + +// RestoreObject is a implementation of https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html AWS S3 API +func (c *Client) RestoreObject(ctx context.Context, bucketName, objectName, versionID string, req RestoreRequest) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + restoreRequestBytes, err := xml.Marshal(req) + if err != nil { + return err + } + + urlValues := make(url.Values) + urlValues.Set("restore", "") + if versionID != "" { + urlValues.Set("versionId", versionID) + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentMD5Base64: sumMD5Base64(restoreRequestBytes), + contentSHA256Hex: sum256Hex(restoreRequestBytes), + contentBody: bytes.NewReader(restoreRequestBytes), + contentLength: int64(len(restoreRequestBytes)), + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go new file mode 100644 index 000000000..790606c50 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -0,0 +1,406 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "errors" + "io" + "reflect" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// Version is an element in the list object versions response +type Version struct { + ETag string + IsLatest bool + Key string + LastModified time.Time + Owner Owner + Size int64 + StorageClass string + VersionID string `xml:"VersionId"` + + // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. + // Only returned by MinIO servers. + UserMetadata StringMap `json:"userMetadata,omitempty"` + + // x-amz-tagging values in their k/v values. + // Only returned by MinIO servers. + UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"` + + Internal *struct { + K int // Data blocks + M int // Parity blocks + } `xml:"Internal"` + + isDeleteMarker bool +} + +// ListVersionsResult is an element in the list object versions response +// and has a special Unmarshaler because we need to preserver the order +// of and in ListVersionsResult.Versions slice +type ListVersionsResult struct { + Versions []Version + + CommonPrefixes []CommonPrefix + Name string + Prefix string + Delimiter string + MaxKeys int64 + EncodingType string + IsTruncated bool + KeyMarker string + VersionIDMarker string + NextKeyMarker string + NextVersionIDMarker string +} + +// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom +// code will unmarshal and tags and save them in Versions field to +// preserve the lexical order of the listing. +func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, _ xml.StartElement) (err error) { + for { + // Read tokens from the XML document in a stream. + t, err := d.Token() + if err != nil { + if err == io.EOF { + break + } + return err + } + + se, ok := t.(xml.StartElement) + if ok { + tagName := se.Name.Local + switch tagName { + case "Name", "Prefix", + "Delimiter", "EncodingType", + "KeyMarker", "NextKeyMarker": + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + v := reflect.ValueOf(l).Elem().FieldByName(tagName) + if v.IsValid() { + v.SetString(s) + } + case "VersionIdMarker": + // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.VersionIDMarker = s + case "NextVersionIdMarker": + // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name + var s string + if err = d.DecodeElement(&s, &se); err != nil { + return err + } + l.NextVersionIDMarker = s + case "IsTruncated": // bool + var b bool + if err = d.DecodeElement(&b, &se); err != nil { + return err + } + l.IsTruncated = b + case "MaxKeys": // int64 + var i int64 + if err = d.DecodeElement(&i, &se); err != nil { + return err + } + l.MaxKeys = i + case "CommonPrefixes": + var cp CommonPrefix + if err = d.DecodeElement(&cp, &se); err != nil { + return err + } + l.CommonPrefixes = append(l.CommonPrefixes, cp) + case "DeleteMarker", "Version": + var v Version + if err = d.DecodeElement(&v, &se); err != nil { + return err + } + if tagName == "DeleteMarker" { + v.isDeleteMarker = true + } + l.Versions = append(l.Versions, v) + default: + return errors.New("unrecognized option:" + tagName) + } + + } + } + return nil +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 + + // Checksum values of each part. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string + + // Checksum values, hash of hashes of parts. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + // Part number identifies the part. + PartNumber int + ETag string + + // Checksum values + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` +} + +// Checksum will return the checksum for the given type. +// Will return the empty string if not set. +func (c CompletePart) Checksum(t ChecksumType) string { + switch { + case t.Is(ChecksumCRC32C): + return c.ChecksumCRC32C + case t.Is(ChecksumCRC32): + return c.ChecksumCRC32 + case t.Is(ChecksumSHA1): + return c.ChecksumSHA1 + case t.Is(ChecksumSHA256): + return c.ChecksumSHA256 + } + return "" +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"` +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string + VersionID string `xml:"VersionId"` +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/v7/api-select.go b/vendor/github.com/minio/minio-go/v7/api-select.go new file mode 100644 index 000000000..628d967ff --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-select.go @@ -0,0 +1,757 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore CSVFileHeaderInfo = "IGNORE" + CSVFileHeaderInfoUse CSVFileHeaderInfo = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP SelectCompressionType = "GZIP" + SelectCompressionBZIP SelectCompressionType = "BZIP2" + + // Non-standard compression schemes, supported by MinIO hosts: + + SelectCompressionZSTD SelectCompressionType = "ZSTD" // Zstandard compression. + SelectCompressionLZ4 SelectCompressionType = "LZ4" // LZ4 Stream + SelectCompressionS2 SelectCompressionType = "S2" // S2 Stream + SelectCompressionSNAPPY SelectCompressionType = "SNAPPY" // Snappy stream +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded CSVQuoteFields = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType JSONType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + fileHeaderInfoSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool + + Comments string + commentsSet bool +} + +// SetFileHeaderInfo sets the file header info in the CSV input options +func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { + c.FileHeaderInfo = val + c.fileHeaderInfoSet = true +} + +// SetRecordDelimiter sets the record delimiter in the CSV input options +func (c *CSVInputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter in the CSV input options +func (c *CSVInputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV input options +func (c *CSVInputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options +func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// SetComments sets the comments character in the CSV input options +func (c *CSVInputOptions) SetComments(val string) { + c.Comments = val + c.commentsSet = true +} + +// MarshalXML - produces the xml representation of the CSV input options struct +func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { + if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + if c.Comments != "" || c.commentsSet { + if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + quoteFieldsSet bool + + RecordDelimiter string + recordDelimiterSet bool + + FieldDelimiter string + fieldDelimiterSet bool + + QuoteCharacter string + quoteCharacterSet bool + + QuoteEscapeCharacter string + quoteEscapeCharacterSet bool +} + +// SetQuoteFields sets the quote field parameter in the CSV output options +func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { + c.QuoteFields = val + c.quoteFieldsSet = true +} + +// SetRecordDelimiter sets the record delimiter character in the CSV output options +func (c *CSVOutputOptions) SetRecordDelimiter(val string) { + c.RecordDelimiter = val + c.recordDelimiterSet = true +} + +// SetFieldDelimiter sets the field delimiter character in the CSV output options +func (c *CSVOutputOptions) SetFieldDelimiter(val string) { + c.FieldDelimiter = val + c.fieldDelimiterSet = true +} + +// SetQuoteCharacter sets the quote character in the CSV output options +func (c *CSVOutputOptions) SetQuoteCharacter(val string) { + c.QuoteCharacter = val + c.quoteCharacterSet = true +} + +// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options +func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { + c.QuoteEscapeCharacter = val + c.quoteEscapeCharacterSet = true +} + +// MarshalXML - produces the xml representation of the CSVOutputOptions struct +func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if c.QuoteFields != "" || c.quoteFieldsSet { + if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { + return err + } + } + + if c.RecordDelimiter != "" || c.recordDelimiterSet { + if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + if c.FieldDelimiter != "" || c.fieldDelimiterSet { + if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { + return err + } + } + + if c.QuoteCharacter != "" || c.quoteCharacterSet { + if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { + return err + } + } + + if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { + if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType + typeSet bool +} + +// SetType sets the JSON type in the JSON input options +func (j *JSONInputOptions) SetType(typ JSONType) { + j.Type = typ + j.typeSet = true +} + +// MarshalXML - produces the xml representation of the JSONInputOptions struct +func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.Type != "" || j.typeSet { + if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string + recordDelimiterSet bool +} + +// SetRecordDelimiter sets the record delimiter in the JSON output options +func (j *JSONOutputOptions) SetRecordDelimiter(val string) { + j.RecordDelimiter = val + j.recordDelimiterSet = true +} + +// MarshalXML - produces the xml representation of the JSONOutputOptions struct +func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + if j.RecordDelimiter != "" || j.recordDelimiterSet { + if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType `xml:"CompressionType,omitempty"` + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON SelectObjectType = "JSON" + SelectObjectTypeParquet SelectObjectType = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg messageType = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent eventType = "Records" + progressEvent eventType = "Progress" + statsEvent eventType = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c *Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + return NewSelectResults(resp, bucketName) +} + +// NewSelectResults creates a Select Result parser that parses the response +// and returns a Reader that will return parsed and assembled select output. +func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + headers := make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + pInfo := preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := readFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-stat.go b/vendor/github.com/minio/minio-go/v7/api-stat.go new file mode 100644 index 000000000..11455beb3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-stat.go @@ -0,0 +1,124 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to +// control cancellations and timeouts. +func (c *Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// StatObject verifies if object exists, you have permission to access it +// and returns information about the object. +func (c *Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } + } + headers := opts.Header() + if opts.Internal.ReplicationDeleteMarker { + headers.Set(minIOBucketReplicationDeleteMarker, "true") + } + if opts.Internal.IsReplicationReadyForDeleteMarker { + headers.Set(isMinioTgtReplicationReady, "true") + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + contentSHA256Hex: emptySHA256Hex, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + if resp != nil { + deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" + replicationReady := resp.Header.Get(minioTgtReplicationReady) == "true" + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + if resp.StatusCode == http.StatusMethodNotAllowed && opts.VersionID != "" && deleteMarker { + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "MethodNotAllowed", + Message: "The specified method is not allowed against this resource.", + BucketName: bucketName, + Key: objectName, + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + }, errResp + } + return ObjectInfo{ + VersionID: resp.Header.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationReady: replicationReady, // whether delete marker can be replicated + }, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + return ToObjectInfo(bucketName, objectName, resp.Header) +} diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go new file mode 100644 index 000000000..1d6b66502 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -0,0 +1,1010 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httptrace" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" + "golang.org/x/net/publicsuffix" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + // Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + httpTrace *httptrace.ClientTrace + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceErrorsOnly bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + // S3 dual-stack endpoints are enabled by default. + s3DualstackEnabled bool + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType + + // Factory for MD5 hash functions. + md5Hasher func() md5simd.Hasher + sha256Hasher func() md5simd.Hasher + + healthStatus int32 + + trailingHeaderSupport bool +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Transport http.RoundTripper + Trace *httptrace.ClientTrace + Region string + BucketLookup BucketLookupType + + // Allows setting a custom region lookup based on URL pattern + // not all URL patterns are covered by this library so if you + // have a custom endpoints with many regions you can use this + // function to perform region lookups appropriately. + CustomRegionViaURL func(u url.URL) string + + // TrailingHeaders indicates server support of trailing headers. + // Only supported for v4 signatures. + TrailingHeaders bool + + // Custom hash routines. Leave nil to use standard. + CustomMD5 func() md5simd.Hasher + CustomSHA256 func() md5simd.Hasher +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v7.0.77" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// MinIO (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// New - instantiate minio client with options +func New(endpoint string, opts *Options) (*Client, error) { + if opts == nil { + return nil, errors.New("no options provided") + } + clnt, err := privateNew(endpoint, opts) + if err != nil { + return nil, err + } + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + // If Amazon S3 set to signature v4. + clnt.overrideSignerType = credentials.SignatureV4 + // Amazon S3 endpoints are resolved into dual-stack endpoints by default + // for backwards compatibility. + clnt.s3DualstackEnabled = true + } + + return clnt, nil +} + +// EndpointURL returns the URL of the S3 endpoint. +func (c *Client) EndpointURL() *url.URL { + endpoint := *c.endpointURL // copy to prevent callers from modifying internal state + return &endpoint +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +func privateNew(endpoint string, opts *Options) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, opts.Secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = opts.Creds + + // Remember whether we are using https or not + clnt.secure = opts.Secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + transport := opts.Transport + if transport == nil { + transport, err = DefaultTransport(opts.Secure) + if err != nil { + return nil, err + } + } + + clnt.httpTrace = opts.Trace + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: transport, + CheckRedirect: func(_ *http.Request, _ []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if opts.Region == "" { + if opts.CustomRegionViaURL != nil { + opts.Region = opts.CustomRegionViaURL(*clnt.endpointURL) + } else { + opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + } + clnt.region = opts.Region + + // Instantiate bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Add default md5 hasher. + clnt.md5Hasher = opts.CustomMD5 + clnt.sha256Hasher = opts.CustomSHA256 + if clnt.md5Hasher == nil { + clnt.md5Hasher = newMd5Hasher + } + if clnt.sha256Hasher == nil { + clnt.sha256Hasher = newSHA256Hasher + } + + clnt.trailingHeaderSupport = opts.TrailingHeaders && clnt.overrideSignerType.IsV4() + + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = opts.BucketLookup + + // healthcheck is not initialized + clnt.healthStatus = unknown + + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. +func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { + c.TraceOn(outputStream) + c.traceErrorsOnly = true +} + +// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. +// If all tracing needs to be turned off, call TraceOff(). +func (c *Client) TraceErrorsOnlyOff() { + c.traceErrorsOnly = false +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false + c.traceErrorsOnly = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// SetS3EnableDualstack turns s3 dual-stack endpoints on or off for all requests. +// The feature is only specific to S3 and is on by default. To read more about +// Amazon S3 dual-stack endpoints visit - +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html +func (c *Client) SetS3EnableDualstack(enabled bool) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3DualstackEnabled = enabled + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials(isMd5Requested, isSha256Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]md5simd.Hasher) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = c.md5Hasher() + } else { + if isSha256Requested { + hashAlgos["sha256"] = c.sha256Hasher() + } + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = c.md5Hasher() + } + } + if isMd5Requested { + hashAlgos["md5"] = c.md5Hasher() + } + return hashAlgos, hashSums +} + +const ( + unknown = -1 + offline = 0 + online = 1 +) + +// IsOnline returns true if healthcheck enabled and client is online. +// If HealthCheck function has not been called this will always return true. +func (c *Client) IsOnline() bool { + return !c.IsOffline() +} + +// sets online healthStatus to offline +func (c *Client) markOffline() { + atomic.CompareAndSwapInt32(&c.healthStatus, online, offline) +} + +// IsOffline returns true if healthcheck enabled and client is offline +// If HealthCheck function has not been called this will always return false. +func (c *Client) IsOffline() bool { + return atomic.LoadInt32(&c.healthStatus) == offline +} + +// HealthCheck starts a healthcheck to see if endpoint is up. +// Returns a context cancellation function, to stop the health check, +// and an error if health check is already started. +func (c *Client) HealthCheck(hcDuration time.Duration) (context.CancelFunc, error) { + if atomic.LoadInt32(&c.healthStatus) != unknown { + return nil, fmt.Errorf("health check is running") + } + if hcDuration < 1*time.Second { + return nil, fmt.Errorf("health check duration should be at least 1 second") + } + probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-health-") + ctx, cancelFn := context.WithCancel(context.Background()) + atomic.StoreInt32(&c.healthStatus, offline) + { + // Change to online, if we can connect. + gctx, gcancel := context.WithTimeout(ctx, 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if !IsNetworkOrHostDown(err, false) { + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + + go func(duration time.Duration) { + timer := time.NewTimer(duration) + defer timer.Stop() + for { + select { + case <-ctx.Done(): + atomic.StoreInt32(&c.healthStatus, unknown) + return + case <-timer.C: + // Do health check the first time and ONLY if the connection is marked offline + if c.IsOffline() { + gctx, gcancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := c.getBucketLocation(gctx, probeBucketName) + gcancel() + if !IsNetworkOrHostDown(err, false) { + switch ToErrorResponse(err).Code { + case "NoSuchBucket", "AccessDenied", "": + atomic.CompareAndSwapInt32(&c.healthStatus, offline, online) + } + } + } + + timer.Reset(duration) + } + } + }(hcDuration) + return cancelFn, nil +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + extraPresignHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum + streamSha256 bool + addCrc *ChecksumType + trailer http.Header // (http.Request).Trailer. Requires v4 signature. +} + +// dumpHTTP - dump HTTP request and response. +func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c *Client) do(req *http.Request) (resp *http.Response, err error) { + defer func() { + if IsNetworkOrHostDown(err, false) { + c.markOffline() + } + }() + + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, errInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response, + // except when the traceErrorsOnly enabled and the response's status code is ok + if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c *Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + if c.IsOffline() { + return nil, errors.New(c.endpointURL.String() + " is offline.") + } + + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + reqRetry := MaxRetry // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, retryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + retryable = false + } + // Retry only when reader is seekable + if !retryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + if metadata.addCrc != nil && metadata.contentLength > 0 { + if metadata.trailer == nil { + metadata.trailer = make(http.Header, 1) + } + crc := metadata.addCrc.Hasher() + metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { + // Update trailer when done. + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash)) + }) + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) + } + + // Create cancel context to control 'newRetryTimer' go routine. + retryCtx, cancel := context.WithCancel(ctx) + + // Indicate to our routine to exit cleanly upon return. + defer cancel() + + for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if retryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(ctx, method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + if isRequestErrorRetryable(ctx, err) { + // Retry the request + continue + } + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := io.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = io.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = io.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally, we should only retry if bucketLocation and custom + // region is empty. + if c.region == "" { + switch errResponse.Code { + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResponse.Region == "" { + // Region is empty we simply return the error. + return res, err + } + // Region is not empty figure out a way to + // handle this appropriately. + if metadata.bucketName != "" { + // Gather Cached location only if bucketName is present. + if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } else { + // This is for ListBuckets() fallback. + if errResponse.Region != metadata.bucketLocation { + // Retry if the error response has a different region + // than the request we just made. + metadata.bucketLocation = errResponse.Region + continue // Retry + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + + // Return an error when retry is canceled or deadlined + if e := retryCtx.Err(); e != nil { + return nil, e + } + + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c *Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = http.MethodPost + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(ctx, metadata.bucketName) + if err != nil { + return nil, err + } + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + // We explicitly disallow MakeBucket calls to not use virtual DNS style, + // since the resolution may fail. + isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, + isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + if c.httpTrace != nil { + ctx = httptrace.WithClientTrace(ctx, c.httpTrace) + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if metadata.extraPresignHeader != nil { + if signerType.IsV2() { + return nil, errInvalidArgument("Extra signed headers for Presign with Signature V2 is not supported.") + } + for k, v := range metadata.extraPresignHeader { + req.Header.Set(k, v[0]) + } + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = io.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.streamSha256 && !c.secure: + if len(metadata.trailer) > 0 { + req.Trailer = metadata.trailer + } + // Streaming signature is used by default for a PUT object request. + // Additionally, we also look if the initialized client is secure, + // if yes then we don't need to perform streaming signature. + req = signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC(), c.sha256Hasher()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + if len(metadata.trailer) > 0 { + // Sanity check, we should not end up here if upstream is sane. + return nil, errors.New("internal error: contentSHA256Hex with trailer not supported") + } + } else if len(metadata.trailer) > 0 { + shaHeader = unsignedPayloadTrailer + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = signer.SignV4Trailer(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.trailer) + } + + // Return request. + return req, nil +} + +// set User agent. +func (c *Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c *Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, errTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint or a S3 PrivateLink interface endpoint + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) && !s3utils.IsAmazonPrivateLinkEndpoint(*c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation, c.s3DualstackEnabled) + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + if ip := net.ParseIP(h); ip != nil && ip.To4() == nil { + host = "[" + h + "]" + } + } + } + + urlStr := scheme + "://" + host + "/" + + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr += s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go new file mode 100644 index 000000000..b1d3b3852 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -0,0 +1,256 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "sync" + + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/signer" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c *Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(ctx, bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c *Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(ctx, bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + switch errResp.Code { + case "NotImplemented": + switch errResp.Server { + case "AmazonSnowball": + return "snowball", nil + case "cloudflare": + return "us-east-1", nil + } + case "AuthorizationHeaderMalformed": + fallthrough + case "InvalidRegion": + fallthrough + case "AccessDenied": + if errResp.Region == "" { + return "us-east-1", nil + } + return errResp.Region, nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := *c.endpointURL + + // as it works in makeTargetURL method from api.go file + if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { + if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { + targetURL.Host = h + if ip := net.ParseIP(h); ip != nil && ip.To16() != nil { + targetURL.Host = "[" + h + "]" + } + } + } + + isVirtualStyle := c.isVirtualHostStyleRequest(targetURL, bucketName) + + var urlStr string + + if isVirtualStyle { + urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" + } else { + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + urlStr = targetURL.String() + } + + // Get a new HTTP request for the method. + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualStyle) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go new file mode 100644 index 000000000..7eb1bf25a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -0,0 +1,223 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/base64" + "hash" + "hash/crc32" + "io" + "math/bits" + "net/http" +) + +// ChecksumType contains information about the checksum type. +type ChecksumType uint32 + +const ( + + // ChecksumSHA256 indicates a SHA256 checksum. + ChecksumSHA256 ChecksumType = 1 << iota + // ChecksumSHA1 indicates a SHA-1 checksum. + ChecksumSHA1 + // ChecksumCRC32 indicates a CRC32 checksum with IEEE table. + ChecksumCRC32 + // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table. + ChecksumCRC32C + + // Keep after all valid checksums + checksumLast + + // checksumMask is a mask for valid checksum types. + checksumMask = checksumLast - 1 + + // ChecksumNone indicates no checksum. + ChecksumNone ChecksumType = 0 + + amzChecksumAlgo = "x-amz-checksum-algorithm" + amzChecksumCRC32 = "x-amz-checksum-crc32" + amzChecksumCRC32C = "x-amz-checksum-crc32c" + amzChecksumSHA1 = "x-amz-checksum-sha1" + amzChecksumSHA256 = "x-amz-checksum-sha256" +) + +// Is returns if c is all of t. +func (c ChecksumType) Is(t ChecksumType) bool { + return c&t == t +} + +// Key returns the header key. +// returns empty string if invalid or none. +func (c ChecksumType) Key() string { + switch c & checksumMask { + case ChecksumCRC32: + return amzChecksumCRC32 + case ChecksumCRC32C: + return amzChecksumCRC32C + case ChecksumSHA1: + return amzChecksumSHA1 + case ChecksumSHA256: + return amzChecksumSHA256 + } + return "" +} + +// KeyCapitalized returns the capitalized key as used in HTTP headers. +func (c ChecksumType) KeyCapitalized() string { + return http.CanonicalHeaderKey(c.Key()) +} + +// RawByteLen returns the size of the un-encoded checksum. +func (c ChecksumType) RawByteLen() int { + switch c & checksumMask { + case ChecksumCRC32, ChecksumCRC32C: + return 4 + case ChecksumSHA1: + return sha1.Size + case ChecksumSHA256: + return sha256.Size + } + return 0 +} + +// Hasher returns a hasher corresponding to the checksum type. +// Returns nil if no checksum. +func (c ChecksumType) Hasher() hash.Hash { + switch c & checksumMask { + case ChecksumCRC32: + return crc32.NewIEEE() + case ChecksumCRC32C: + return crc32.New(crc32.MakeTable(crc32.Castagnoli)) + case ChecksumSHA1: + return sha1.New() + case ChecksumSHA256: + return sha256.New() + } + return nil +} + +// IsSet returns whether the type is valid and known. +func (c ChecksumType) IsSet() bool { + return bits.OnesCount32(uint32(c)) == 1 +} + +// SetDefault will set the checksum if not already set. +func (c *ChecksumType) SetDefault(t ChecksumType) { + if !c.IsSet() { + *c = t + } +} + +// String returns the type as a string. +// CRC32, CRC32C, SHA1, and SHA256 for valid values. +// Empty string for unset and "" if not valid. +func (c ChecksumType) String() string { + switch c & checksumMask { + case ChecksumCRC32: + return "CRC32" + case ChecksumCRC32C: + return "CRC32C" + case ChecksumSHA1: + return "SHA1" + case ChecksumSHA256: + return "SHA256" + case ChecksumNone: + return "" + } + return "" +} + +// ChecksumReader reads all of r and returns a checksum of type c. +// Returns any error that may have occurred while reading. +func (c ChecksumType) ChecksumReader(r io.Reader) (Checksum, error) { + h := c.Hasher() + if h == nil { + return Checksum{}, nil + } + _, err := io.Copy(h, r) + if err != nil { + return Checksum{}, err + } + return NewChecksum(c, h.Sum(nil)), nil +} + +// ChecksumBytes returns a checksum of the content b with type c. +func (c ChecksumType) ChecksumBytes(b []byte) Checksum { + h := c.Hasher() + if h == nil { + return Checksum{} + } + n, err := h.Write(b) + if err != nil || n != len(b) { + // Shouldn't happen with these checksummers. + return Checksum{} + } + return NewChecksum(c, h.Sum(nil)) +} + +// Checksum is a type and encoded value. +type Checksum struct { + Type ChecksumType + r []byte +} + +// NewChecksum sets the checksum to the value of b, +// which is the raw hash output. +// If the length of c does not match t.RawByteLen, +// a checksum with ChecksumNone is returned. +func NewChecksum(t ChecksumType, b []byte) Checksum { + if t.IsSet() && len(b) == t.RawByteLen() { + return Checksum{Type: t, r: b} + } + return Checksum{} +} + +// NewChecksumString sets the checksum to the value of s, +// which is the base 64 encoded raw hash output. +// If the length of c does not match t.RawByteLen, it is not added. +func NewChecksumString(t ChecksumType, s string) Checksum { + b, _ := base64.StdEncoding.DecodeString(s) + if t.IsSet() && len(b) == t.RawByteLen() { + return Checksum{Type: t, r: b} + } + return Checksum{} +} + +// IsSet returns whether the checksum is valid and known. +func (c Checksum) IsSet() bool { + return c.Type.IsSet() && len(c.r) == c.Type.RawByteLen() +} + +// Encoded returns the encoded value. +// Returns the empty string if not set or valid. +func (c Checksum) Encoded() string { + if !c.IsSet() { + return "" + } + return base64.StdEncoding.EncodeToString(c.r) +} + +// Raw returns the raw checksum value if set. +func (c Checksum) Raw() []byte { + if !c.IsSet() { + return nil + } + return c.r +} diff --git a/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md new file mode 100644 index 000000000..cb232c3c6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/code_of_conduct.md @@ -0,0 +1,80 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior, in compliance with the +licensing terms applying to the Project developments. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. However, these actions shall respect the +licensing terms of the Project Developments that will always supersede such +Code of Conduct. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at dev@min.io. The project team +will review and investigate all complaints, and will respond in a way that it deems +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +This version includes a clarification to ensure that the code of conduct is in +compliance with the free software licensing terms of the project. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/minio/minio-go/v7/constants.go b/vendor/github.com/minio/minio-go/v7/constants.go new file mode 100644 index 000000000..4099a37f9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/constants.go @@ -0,0 +1,130 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 16MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 16 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// unsignedPayloadTrailer value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload, but have a trailer. +const unsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +const ( + // GetObjectAttributesTags are tags used to defined + // return values for the GetObjectAttributes API + GetObjectAttributesTags = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts" + // GetObjectAttributesMaxParts defined the default maximum + // number of parts returned by GetObjectAttributes + GetObjectAttributesMaxParts = 1000 +) + +const ( + // Response Headers + + // ETag is a common response header + ETag = "ETag" + + // Storage class header. + amzStorageClass = "X-Amz-Storage-Class" + + // Website redirect location header + amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" + + // GetObjectAttributes headers + amzPartNumberMarker = "X-Amz-Part-Number-Marker" + amzExpectedBucketOnwer = "X-Amz-Expected-Bucket-Owner" + amzMaxParts = "X-Amz-Max-Parts" + amzObjectAttributes = "X-Amz-Object-Attributes" + + // Object Tagging headers + amzTaggingHeader = "X-Amz-Tagging" + amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" + + amzVersionID = "X-Amz-Version-Id" + amzTaggingCount = "X-Amz-Tagging-Count" + amzExpiration = "X-Amz-Expiration" + amzRestore = "X-Amz-Restore" + amzReplicationStatus = "X-Amz-Replication-Status" + amzDeleteMarker = "X-Amz-Delete-Marker" + + // Object legal hold header + amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" + + // Object retention header + amzLockMode = "X-Amz-Object-Lock-Mode" + amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" + amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" + + // Replication status + amzBucketReplicationStatus = "X-Amz-Replication-Status" + // Minio specific Replication/lifecycle transition extension + minIOBucketSourceMTime = "X-Minio-Source-Mtime" + + minIOBucketSourceETag = "X-Minio-Source-Etag" + minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" + minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" + minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" + minIOBucketReplicationCheck = "X-Minio-Source-Replication-Check" + + // Header indicates last tag update time on source + minIOBucketReplicationTaggingTimestamp = "X-Minio-Source-Replication-Tagging-Timestamp" + // Header indicates last retention update time on source + minIOBucketReplicationObjectRetentionTimestamp = "X-Minio-Source-Replication-Retention-Timestamp" + // Header indicates last legalhold update time on source + minIOBucketReplicationObjectLegalHoldTimestamp = "X-Minio-Source-Replication-LegalHold-Timestamp" + minIOForceDelete = "x-minio-force-delete" + // Header indicates delete marker replication request can be sent by source now. + minioTgtReplicationReady = "X-Minio-Replication-Ready" + // Header asks if delete marker replication request can be sent by source now. + isMinioTgtReplicationReady = "X-Minio-Check-Replication-Ready" +) diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go new file mode 100644 index 000000000..99b99db9b --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/core.go @@ -0,0 +1,151 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "net/http" + + "github.com/minio/minio-go/v7/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, opts *Options) (*Core, error) { + var s3Client Core + client, err := New(endpoint, opts) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, startAfter, continuationToken, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, true, false, delimiter, startAfter, maxkeys, nil) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { + return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { + return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { + hookReader := newHook(data, opts.Progress) + return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPartOptions contains options for PutObjectPart API +type PutObjectPartOptions struct { + Md5Base64, Sha256Hex string + SSE encrypt.ServerSide + CustomHeader, Trailer http.Header + DisableContentSha256 bool +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, + data io.Reader, size int64, opts PutObjectPartOptions, +) (ObjectPart, error) { + p := uploadPartParams{ + bucketName: bucket, + objectName: object, + uploadID: uploadID, + reader: data, + partNumber: partID, + md5Base64: opts.Md5Base64, + sha256Hex: opts.Sha256Hex, + size: size, + sse: opts.SSE, + streamSha256: !opts.DisableContentSha256, + customHeader: opts.CustomHeader, + trailer: opts.Trailer, + } + return c.uploadPart(ctx, p) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (UploadInfo, error) { + res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }, opts) + return res, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { + return c.abortMultipartUpload(ctx, bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { + return c.getBucketPolicy(ctx, bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { + return c.putBucketPolicy(ctx, bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { + return c.getObject(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go new file mode 100644 index 000000000..780dc8997 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -0,0 +1,15046 @@ +//go:build mint +// +build mint + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "archive/zip" + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "log/slog" + "math/rand" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/google/uuid" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/cors" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/notification" + "github.com/minio/minio-go/v7/pkg/tags" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +func baseLogger(testName, function string, args map[string]interface{}, startTime time.Time) *slog.Logger { + // calculate the test case duration + duration := time.Since(startTime) + // log with the fields as per mint + l := slog.With( + "name", "minio-go: "+testName, + "duration", duration.Nanoseconds()/1000000, + ) + if function != "" { + l = l.With("function", function) + } + if len(args) > 0 { + l = l.With("args", args) + } + return l +} + +// log successful test runs +func logSuccess(testName, function string, args map[string]interface{}, startTime time.Time) { + baseLogger(testName, function, args, startTime). + With("status", "PASS"). + Info("") +} + +// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, +// and log as NA in that case and continue execution. Otherwise log as failure and return +func logError(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { + // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests + // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in + // addition to NotImplemented error returned from server + if isErrNotImplemented(err) { + logIgnored(testName, function, args, startTime, message) + } else { + logFailure(testName, function, args, startTime, alert, message, err) + if !isRunOnFail() { + panic(err) + } + } +} + +// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run +func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { + l := baseLogger(testName, function, args, startTime).With( + "status", "FAIL", + "alert", alert, + "message", message, + ) + + if err != nil { + l = l.With("error", err) + } + + l.Error("") +} + +// log not applicable test runs +func logIgnored(testName, function string, args map[string]interface{}, startTime time.Time, alert string) { + baseLogger(testName, function, args, startTime). + With( + "status", "NA", + "alert", strings.Split(alert, " ")[0]+" is NotImplemented", + ).Info("") +} + +// Delete objects in given bucket, recursively +func cleanupBucket(bucketName string, c *minio.Client) error { + // Create a done channel to control 'ListObjectsV2' go routine. + doneCh := make(chan struct{}) + // Exit cleanly upon return. + defer close(doneCh) + // Iterate over all objects in the bucket via listObjectsV2 and delete + for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { + if objCh.Err != nil { + return objCh.Err + } + if objCh.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + return c.RemoveBucket(context.Background(), bucketName) +} + +func cleanupVersionedBucket(bucketName string, c *minio.Client) error { + doneCh := make(chan struct{}) + defer close(doneCh) + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + if obj.Err != nil { + return obj.Err + } + if obj.Key != "" { + err := c.RemoveObject(context.Background(), bucketName, obj.Key, + minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) + if err != nil { + return err + } + } + } + for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { + if objPartInfo.Err != nil { + return objPartInfo.Err + } + if objPartInfo.Key != "" { + err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) + if err != nil { + return err + } + } + } + // objects are already deleted, clear the buckets now + err := c.RemoveBucket(context.Background(), bucketName) + if err != nil { + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { + slog.Info("found object", "key", obj.Key, "version", obj.VersionID) + } + } + return err +} + +func isErrNotImplemented(err error) bool { + return minio.ToErrorResponse(err).Code == "NotImplemented" +} + +func isRunOnFail() bool { + return os.Getenv("RUN_ON_FAIL") == "1" +} + +func init() { + // If server endpoint is not set, all tests default to + // using https://play.min.io + if os.Getenv(serverEndpoint) == "" { + os.Setenv(serverEndpoint, "play.min.io") + os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") + os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") + os.Setenv(enableHTTPS, "1") + } +} + +var mintDataDir = os.Getenv("MINT_DATA_DIR") + +func getMintDataDirFilePath(filename string) (fp string) { + if mintDataDir == "" { + return + } + return filepath.Join(mintDataDir, filename) +} + +func newRandomReader(seed, size int64) io.Reader { + return io.LimitReader(rand.New(rand.NewSource(seed)), size) +} + +func mustCrcReader(r io.Reader) uint32 { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + return crc.Sum32() +} + +func crcMatches(r io.Reader, want uint32) error { + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +func crcMatchesName(r io.Reader, name string) error { + want := dataFileCRC32[name] + crc := crc32.NewIEEE() + _, err := io.Copy(crc, r) + if err != nil { + panic(err) + } + got := crc.Sum32() + if got != want { + return fmt.Errorf("crc mismatch, want %x, got %x", want, got) + } + return nil +} + +// read data from file if it exists or optionally create a buffer of particular size +func getDataReader(fileName string) io.ReadCloser { + if mintDataDir == "" { + size := int64(dataFileMap[fileName]) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) + } + return io.NopCloser(newRandomReader(size, size)) + } + reader, _ := os.Open(getMintDataDirFilePath(fileName)) + if _, ok := dataFileCRC32[fileName]; !ok { + dataFileCRC32[fileName] = mustCrcReader(reader) + reader.Close() + reader, _ = os.Open(getMintDataDirFilePath(fileName)) + } + return reader +} + +// randString generates random names and prepends them with a known prefix. +func randString(n int, src rand.Source, prefix string) string { + b := make([]byte, n) + // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +var dataFileMap = map[string]int{ + "datafile-0-b": 0, + "datafile-1-b": 1, + "datafile-1-kB": 1 * humanize.KiByte, + "datafile-10-kB": 10 * humanize.KiByte, + "datafile-33-kB": 33 * humanize.KiByte, + "datafile-100-kB": 100 * humanize.KiByte, + "datafile-1.03-MB": 1056 * humanize.KiByte, + "datafile-1-MB": 1 * humanize.MiByte, + "datafile-5-MB": 5 * humanize.MiByte, + "datafile-6-MB": 6 * humanize.MiByte, + "datafile-11-MB": 11 * humanize.MiByte, + "datafile-65-MB": 65 * humanize.MiByte, + "datafile-129-MB": 129 * humanize.MiByte, +} + +var dataFileCRC32 = map[string]uint32{} + +func isFullMode() bool { + return os.Getenv("MINT_MODE") == "full" +} + +func getFuncName() string { + return getFuncNameLoc(2) +} + +func getFuncNameLoc(caller int) string { + pc, _, _, _ := runtime.Caller(caller) + return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") +} + +// Tests bucket re-create errors. +func testMakeBucketError() { + region := "eu-central-1" + + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket Failed", err) + return + } + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "Bucket already exists", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testMetadataSizeLimit() { + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts.UserMetadata": "", + } + rand.Seed(startTime.Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const HeaderSizeLimit = 8 * 1024 + const UserMetadataLimit = 2 * 1024 + + // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail + metadata := make(map[string]string) + metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) + return + } + + // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail + metadata = make(map[string]string) + metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) + args["metadata"] = fmt.Sprint(metadata) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) + if err == nil { + logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests various bucket supported formats. +func testMakeBucketRegions() { + region := "eu-central-1" + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + // initialize logging params + args := map[string]interface{}{ + "bucketName": "", + "region": region, + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + region = "us-west-2" + args["region"] = region + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + logSuccess(testName, function, args, startTime) +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectReadAt() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "objectContentType", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object content type + objectContentType := "binary/octet-stream" + args["objectContentType"] = objectContentType + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Get Object failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat Object failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) + return + } + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content types don't match", err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testListObjectVersions() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjectVersions(bucketName, prefix, recursive)" + args := map[string]interface{}{ + "bucketName": "", + "prefix": "", + "recursive": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected object deletion", err) + return + } + + var deleteMarkers, versions int + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if info.Key != objectName { + logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) + return + } + if info.IsDeleteMarker { + deleteMarkers++ + if !info.IsLatest { + logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) + return + } + } else { + versions++ + } + } + + if deleteMarkers != 1 { + logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) + return + } + + if versions != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStatObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "StatObject" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-10-kB"] + reader := getDataReader("datafile-10-kB") + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + bufSize = dataFileMap["datafile-1-b"] + reader = getDataReader("datafile-1-b") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + reader.Close() + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + for i := 0; i < len(results); i++ { + opts := minio.StatObjectOptions{VersionID: results[i].VersionID} + statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during HEAD object", err) + return + } + if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Save the contents of datafiles to check with GetObject() reader output later + var buffers [][]byte + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + buffers = append(buffers, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != 2 { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.SliceStable(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testPutObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + const n = 10 + // Read input... + + // Save the data concurrently. + var wg sync.WaitGroup + wg.Add(n) + buffers := make([][]byte, n) + var errs [n]error + for i := 0; i < n; i++ { + r := newRandomReader(int64((1<<20)*i+i), int64(i)) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + buffers[i] = buf + + go func(i int) { + defer wg.Done() + _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + if len(results) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + sort.Slice(results, func(i, j int) bool { + return results[i].Size < results[j].Size + }) + + sort.Slice(buffers, func(i, j int) bool { + return len(buffers[i]) < len(buffers[j]) + }) + + for i := 0; i < len(results); i++ { + opts := minio.GetObjectOptions{VersionID: results[i].VersionID} + reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "error during GET object", err) + return + } + statInfo, err := reader.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) + return + } + if statInfo.ETag != results[i].ETag { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) + return + } + if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) + return + } + if statInfo.Size != results[i].Size { + logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) + return + } + + tmpBuffer := bytes.NewBuffer([]byte{}) + _, err = io.Copy(tmpBuffer, reader) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) + return + } + + if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { + logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) + return + } + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testListMultipartUpload() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject()" + args := map[string]interface{}{} + + // Instantiate new minio client object. + opts := &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + } + c, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + ctx := context.Background() + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer func() { + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + } + }() + objName := "prefix/objectName" + + want := minio.ListMultipartUploadsResult{ + Bucket: bucketName, + KeyMarker: "", + UploadIDMarker: "", + NextKeyMarker: "", + NextUploadIDMarker: "", + EncodingType: "url", + MaxUploads: 1000, + IsTruncated: false, + Prefix: "prefix/objectName", + Delimiter: "/", + CommonPrefixes: nil, + } + for i := 0; i < 5; i++ { + uid, err := core.NewMultipartUpload(ctx, bucketName, objName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload failed", err) + return + } + want.Uploads = append(want.Uploads, minio.ObjectMultipartInfo{ + Initiated: time.Time{}, + StorageClass: "", + Key: objName, + Size: 0, + UploadID: uid, + Err: nil, + }) + + for j := 0; j < 5; j++ { + cmpGot := func(call string, got minio.ListMultipartUploadsResult) bool { + for i := range got.Uploads { + got.Uploads[i].Initiated = time.Time{} + } + if !reflect.DeepEqual(want, got) { + err := fmt.Errorf("want: %#v\ngot : %#v", want, got) + logError(testName, function, args, startTime, "", call+" failed", err) + } + return true + } + got, err := core.ListMultipartUploads(ctx, bucketName, objName, "", "", "/", 1000) + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-prefix", got) { + return + } + got, err = core.ListMultipartUploads(ctx, bucketName, objName, objName, "", "/", 1000) + got.KeyMarker = "" + if err != nil { + logError(testName, function, args, startTime, "", "ListMultipartUploads failed", err) + return + } + if !cmpGot("ListMultipartUploads-marker", got) { + return + } + } + if i > 2 { + err = core.AbortMultipartUpload(ctx, bucketName, objName, uid) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + want.Uploads = want.Uploads[:len(want.Uploads)-1] + } + } + for _, up := range want.Uploads { + err = core.AbortMultipartUpload(ctx, bucketName, objName, up.UploadID) + if err != nil { + logError(testName, function, args, startTime, "", "AbortMultipartUpload failed", err) + return + } + } + logSuccess(testName, function, args, startTime) +} + +func testCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-1-b", "datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testConcurrentCopyObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + testFiles := []string{"datafile-10-kB"} + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var infos []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + infos = append(infos, info) + } + + sort.Slice(infos, func(i, j int) bool { + return infos[i].Size < infos[j].Size + }) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) + return + } + + oldestContent, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) + return + } + + // Copy Source + srcOpts := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: infos[0].VersionID, + } + args["src"] = srcOpts + + dstOpts := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + args["dst"] = dstOpts + + // Perform the Copy concurrently + const n = 10 + var wg sync.WaitGroup + wg.Add(n) + var errs [n]error + for i := 0; i < n; i++ { + go func(i int) { + defer wg.Done() + _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) + }(i) + } + wg.Wait() + for _, err := range errs { + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) + infos = []minio.ObjectInfo{} + for info := range objectsInfo { + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer readerCopy.Close() + + newestContent, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) + return + } + + if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + infos = append(infos, info) + } + + if len(infos) != n { + logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testComposeObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} + testFiles := []string{"datafile-5-MB", "datafile-10-kB"} + var testFilesBytes [][]byte + + for _, testFile := range testFiles { + r := getDataReader(testFile) + buf, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "unexpected failure", err) + return + } + r.Close() + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + testFilesBytes = append(testFilesBytes, buf) + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var results []minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + results = append(results, info) + } + + sort.SliceStable(results, func(i, j int) bool { + return results[i].Size > results[j].Size + }) + + // Source objects to concatenate. We also specify decryption + // key for each + src1 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[0].VersionID, + } + + src2 := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + VersionID: results[1].VersionID, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName + "-copy", + } + + _, err = c.ComposeObject(context.Background(), dst, src1, src2) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) + return + } + defer readerCopy.Close() + + copyContentBytes, err := io.ReadAll(readerCopy) + if err != nil { + logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) + return + } + + var expectedContent []byte + for _, fileBytes := range testFilesBytes { + expectedContent = append(expectedContent, fileBytes...) + } + + if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { + logError(testName, function, args, startTime, "", "Unexpected destination object content", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testRemoveObjectWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObject()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + var version minio.ObjectInfo + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + version = info + break + } + + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + // test delete marker version id is non-null + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // create delete marker + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObject failed", err) + return + } + objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + idx := 0 + for info := range objectsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + if idx == 0 { + if !info.IsDeleteMarker { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to have been created", err) + return + } + if info.VersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected error - expected delete marker to be versioned", err) + return + } + } + idx++ + } + + defer cleanupBucket(bucketName, c) + + logSuccess(testName, function, args, startTime) +} + +func testRemoveObjectsWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "DeleteObjects()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objectsVersions := make(chan minio.ObjectInfo) + go func() { + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, + minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for info := range objectsVersionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + objectsVersions <- info + } + close(objectsVersions) + }() + + removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) + return + } + + for e := range removeErrors { + if e.Err != nil { + logError(testName, function, args, startTime, "", "Single delete operation failed", err) + return + } + } + + objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + for range objectsVersionsInfo { + logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) + return + } + + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testObjectTaggingWithVersioning() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "{Get,Set,Remove}ObjectTagging()" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + err = c.EnableVersioning(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "Enable versioning failed", err) + return + } + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { + _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) + + var versions []minio.ObjectInfo + for info := range versionsInfo { + if info.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) + return + } + versions = append(versions, info) + } + + sort.SliceStable(versions, func(i, j int) bool { + return versions[i].Size < versions[j].Size + }) + + tagsV1 := map[string]string{"key1": "val1"} + t1, err := tags.MapToObjectTags(tagsV1) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + tagsV2 := map[string]string{"key2": "val2"} + t2, err := tags.MapToObjectTags(tagsV2) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) + return + } + + err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + tagsEqual := func(tags1, tags2 map[string]string) bool { + for k1, v1 := range tags1 { + v2, found := tags2[k1] + if found { + if v1 != v2 { + return false + } + } + } + return true + } + + gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) + return + } + + gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) + return + } + + if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) + return + } + + emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, + minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) + return + } + + if len(emptyTags.ToMap()) != 0 { + logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) + return + } + + // Delete all objects and their versions as long as the bucket itself + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with custom checksums. +func testPutObjectWithChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + + for _, test := range tests { + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.cs.Hasher() + h.Reset() + + // Test with Wrong CRC. + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + args["metadata"] = meta + args["range"] = "false" + args["checksum"] = test.cs.String() + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + }) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err) + return + } + + // Set correct CRC. + h.Write(b) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + reader.Close() + + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + args["range"] = "true" + err = gopts.SetRange(100, 1000) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + b, err = io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Range requests should return empty checksums... + cmpChecksum(st.ChecksumSHA256, "") + cmpChecksum(st.ChecksumSHA1, "") + cmpChecksum(st.ChecksumCRC32, "") + cmpChecksum(st.ChecksumCRC32C, "") + + delete(args, "range") + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with custom checksums. +func testPutObjectWithTrailingChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + + for _, test := range tests { + function := "PutObject(bucketName, objectName, reader,size, opts)" + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.cs.Hasher() + h.Reset() + + // Test with Wrong CRC. + args["metadata"] = meta + args["range"] = "false" + args["checksum"] = test.cs.String() + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + Checksum: test.cs, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + h.Write(b) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + + function = "GetObject(...)" + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + function = "GetObject( Range...)" + args["range"] = "true" + err = gopts.SetRange(100, 1000) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + b, err = io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Range requests should return empty checksums... + cmpChecksum(st.ChecksumSHA256, "") + cmpChecksum(st.ChecksumSHA1, "") + cmpChecksum(st.ChecksumCRC32, "") + cmpChecksum(st.ChecksumCRC32C, "") + + function = "GetObjectAttributes(...)" + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + delete(args, "range") + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with custom checksums. +func testPutMultipartObjectWithChecksums(trailing bool) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing), + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: trailing, + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { + r := bytes.NewReader(b) + tmp := make([]byte, partSize) + parts := 0 + var all []byte + for { + n, err := io.ReadFull(r, tmp) + if err != nil && err != io.ErrUnexpectedEOF { + logError(testName, function, args, startTime, "", "Calc crc failed", err) + } + if n == 0 { + break + } + parts++ + hasher.Reset() + hasher.Write(tmp[:n]) + all = append(all, hasher.Sum(nil)...) + if err != nil { + break + } + } + hasher.Reset() + hasher.Write(all) + return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) + } + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + + for _, test := range tests { + bufSize := dataFileMap["datafile-129-MB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + args["checksum"] = test.cs.String() + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + //fmt.Printf("want %s, got %s\n", want, got) + return + } + } + + const partSize = 10 << 20 + reader := getDataReader("datafile-129-MB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + reader.Close() + h := test.cs.Hasher() + h.Reset() + want := hashMultiPart(b, partSize, test.cs.Hasher()) + + var cs minio.ChecksumType + rd := io.Reader(io.NopCloser(bytes.NewReader(b))) + if trailing { + cs = test.cs + rd = bytes.NewReader(b) + } + // Set correct CRC. + resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: partSize, + AutoChecksum: test.cs, + Checksum: cs, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(resp.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(resp.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(resp.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(resp.ChecksumSHA256, want) + } + + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + want = want[:strings.IndexByte(want, '-')] + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(s.Checksum.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(s.Checksum.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(s.Checksum.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(s.Checksum.ChecksumSHA256, want) + } + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + gopts.PartNumber = 2 + + // We cannot use StatObject, since it ignores partnumber. + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + io.Copy(io.Discard, r) + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Test part 2 checksum... + h.Reset() + h.Write(b[partSize : 2*partSize]) + want = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, want) + } + + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with trailing checksums. +func testTrailingChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { + r := bytes.NewReader(b) + tmp := make([]byte, partSize) + parts := 0 + var all []byte + for { + n, err := io.ReadFull(r, tmp) + if err != nil && err != io.ErrUnexpectedEOF { + logError(testName, function, args, startTime, "", "Calc crc failed", err) + } + if n == 0 { + break + } + parts++ + hasher.Reset() + hasher.Write(tmp[:n]) + all = append(all, hasher.Sum(nil)...) + if err != nil { + break + } + } + hasher.Reset() + hasher.Write(all) + return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) + } + defer cleanupBucket(bucketName, c) + tests := []struct { + header string + hasher hash.Hash + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + PO minio.PutObjectOptions + }{ + // Currently there is no way to override the checksum type. + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 5 << 20, + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: true, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 6_645_654, // Rather arbitrary size + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: false, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 5 << 20, + }, + }, + { + header: "x-amz-checksum-crc32c", + hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), + ChecksumCRC32C: "set", + PO: minio.PutObjectOptions{ + DisableContentSha256: false, + DisableMultipart: false, + UserMetadata: nil, + PartSize: 6_645_654, // Rather arbitrary size + }, + }, + } + + for _, test := range tests { + bufSize := dataFileMap["datafile-11-MB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %q, got %q", want, got)) + return + } + } + + reader := getDataReader("datafile-11-MB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + reader.Close() + h := test.hasher + h.Reset() + test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) + + // Set correct CRC. + // c.TraceOn(os.Stderr) + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // c.TraceOff() + cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) + cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) + cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) + cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + gopts.PartNumber = 2 + + // We cannot use StatObject, since it ignores partnumber. + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + io.Copy(io.Discard, r) + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Test part 2 checksum... + h.Reset() + p2 := b[test.PO.PartSize:] + if len(p2) > int(test.PO.PartSize) { + p2 = p2[:test.PO.PartSize] + } + h.Write(p2) + got := base64.StdEncoding.EncodeToString(h.Sum(nil)) + if test.ChecksumSHA256 != "" { + cmpChecksum(st.ChecksumSHA256, got) + } + if test.ChecksumSHA1 != "" { + cmpChecksum(st.ChecksumSHA1, got) + } + if test.ChecksumCRC32 != "" { + cmpChecksum(st.ChecksumCRC32, got) + } + if test.ChecksumCRC32C != "" { + cmpChecksum(st.ChecksumCRC32C, got) + } + + delete(args, "metadata") + } +} + +// Test PutObject with custom checksums. +func testPutObjectWithAutomaticChecksums() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + header string + hasher hash.Hash + + // Checksum values + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + }{ + // Built-in will only add crc32c, when no MD5 nor SHA256. + {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + // defer c.TraceOff() + + for i, test := range tests { + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + + h := test.hasher + h.Reset() + h.Write(b) + meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + args["metadata"] = meta + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: nil, + DisableContentSha256: true, + SendContentMd5: false, + }) + if err == nil { + if i == 0 && resp.ChecksumCRC32C == "" { + logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend") + return + } + } else { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Usually this will be the same as above, since we skip automatic checksum when SHA256 content is sent. + // When/if we add a checksum control to PutObjectOptions this will make more sense. + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: nil, + DisableContentSha256: false, + SendContentMd5: false, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // The checksum will not be enabled on HTTP, since it uses SHA256 blocks. + if mustParseBool(os.Getenv(enableHTTPS)) { + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + } + + // Set SHA256 header manually + sh256 := sha256.Sum256(b) + meta = map[string]string{"x-amz-checksum-sha256": base64.StdEncoding.EncodeToString(sh256[:])} + args["metadata"] = meta + resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + UserMetadata: meta, + DisableContentSha256: true, + SendContentMd5: false, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributes() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Unable to enable versioning", err) + return + } + + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + testFiles := make(map[string]*objectAttributesNewObject) + testFiles["file1"] = &objectAttributesNewObject{ + Object: "file1", + ObjectReaderType: "datafile-1.03-MB", + Bucket: bucketNameV, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + testFiles["file2"] = &objectAttributesNewObject{ + Object: "file2", + ObjectReaderType: "datafile-129-MB", + Bucket: bucketName, + ContentType: "custom/contenttype", + SendContentMd5: false, + } + + for i, v := range testFiles { + bufSize := dataFileMap[v.ObjectReaderType] + + reader := getDataReader(v.ObjectReaderType) + + args["objectName"] = v.Object + testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: v.ContentType, + SendContentMd5: v.SendContentMd5, + Checksum: minio.ChecksumCRC32C, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + } + + testTable := make(map[string]objectAttributesTableTest) + + testTable["none-versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-0-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 0, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-marker-to-max"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 10000, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["0-to-1-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 0, + MaxParts: 1, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["7-to-6-marker"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{ + PartNumberMarker: 7, + MaxParts: 6, + }, + test: objectAttributesTestOptions{ + TestFileName: "file2", + StorageClass: "STANDARD", + HasFullChecksum: true, + HasPartChecksums: true, + HasParts: true, + }, + } + + testTable["versioned"] = objectAttributesTableTest{ + opts: minio.ObjectAttributesOptions{}, + test: objectAttributesTestOptions{ + TestFileName: "file1", + StorageClass: "STANDARD", + HasFullChecksum: true, + }, + } + + for i, v := range testTable { + + tf, ok := testFiles[v.test.TestFileName] + if !ok { + continue + } + + args["objectName"] = tf.Object + args["bucketName"] = tf.Bucket + if tf.UploadInfo.VersionID != "" { + v.opts.VersionID = tf.UploadInfo.VersionID + } + + s, err := c.GetObjectAttributes(context.Background(), tf.Bucket, tf.Object, v.opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + + v.test.NumberOfParts = s.ObjectParts.PartsCount + v.test.ETag = tf.UploadInfo.ETag + v.test.ObjectSize = int(tf.UploadInfo.Size) + + err = validateObjectAttributeRequest(s, &v.opts, &v.test) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed, table test: "+i, err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributesSSECEncryption() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := "encrypted-object" + args["objectName"] = objectName + bufSize := dataFileMap["datafile-11-MB"] + reader := getDataReader("datafile-11-MB") + + sse := encrypt.DefaultPBKDF([]byte("word1 word2 word3 word4"), []byte(bucketName+objectName)) + + info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: "content/custom", + SendContentMd5: false, + ServerSideEncryption: sse, + PartSize: uint64(bufSize) / 2, + Checksum: minio.ChecksumCRC32C, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + opts := minio.ObjectAttributesOptions{ + ServerSideEncryption: sse, + } + attr, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + err = validateObjectAttributeRequest(attr, &opts, &objectAttributesTestOptions{ + TestFileName: info.Key, + ETag: info.ETag, + NumberOfParts: 2, + ObjectSize: int(info.Size), + HasFullChecksum: true, + HasParts: true, + HasPartChecksums: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testGetObjectAttributesErrorCases() { + startTime := time.Now() + testName := getFuncName() + function := "GetObjectAttributes(ctx, bucketName, objectName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.ObjectAttributesOptions{}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") + unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") + + _, err = c.GetObjectAttributes(context.Background(), unknownBucket, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse := err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchBucket" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchBucket but got "+errorResponse.Code, nil) + return + } + + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + err = c.MakeBucket( + context.Background(), + bucketName, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + bucketNameV := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-versioned-") + args["bucketName"] = bucketNameV + err = c.MakeBucket( + context.Background(), + bucketNameV, + minio.MakeBucketOptions{Region: "us-east-1"}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + err = c.EnableVersioning(context.Background(), bucketNameV) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + defer cleanupVersionedBucket(bucketNameV, c) + + _, err = c.GetObjectAttributes(context.Background(), bucketName, unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", nil) + return + } + + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchKey" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchKey but got "+errorResponse.Code, nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketName, "", minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty object name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), "", unknownObject, minio.ObjectAttributesOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + + _, err = c.GetObjectAttributes(context.Background(), bucketNameV, unknownObject, minio.ObjectAttributesOptions{ + VersionID: uuid.NewString(), + }) + if err == nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes with empty bucket name should have failed", nil) + return + } + errorResponse = err.(minio.ErrorResponse) + if errorResponse.Code != "NoSuchVersion" { + logError(testName, function, args, startTime, "", "Invalid error code, expected NoSuchVersion but got "+errorResponse.Code, nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +type objectAttributesNewObject struct { + Object string + ObjectReaderType string + Bucket string + ContentType string + SendContentMd5 bool + UploadInfo minio.UploadInfo +} + +type objectAttributesTableTest struct { + opts minio.ObjectAttributesOptions + test objectAttributesTestOptions +} + +type objectAttributesTestOptions struct { + TestFileName string + ETag string + NumberOfParts int + StorageClass string + ObjectSize int + HasPartChecksums bool + HasFullChecksum bool + HasParts bool +} + +func validateObjectAttributeRequest(OA *minio.ObjectAttributes, opts *minio.ObjectAttributesOptions, test *objectAttributesTestOptions) (err error) { + if opts.VersionID != "" { + if OA.VersionID != opts.VersionID { + err = fmt.Errorf("Expected versionId %s but got versionId %s", opts.VersionID, OA.VersionID) + return + } + } + + partsMissingChecksum := false + foundPartChecksum := false + for _, v := range OA.ObjectParts.Parts { + checksumFound := false + if v.ChecksumSHA256 != "" { + checksumFound = true + } else if v.ChecksumSHA1 != "" { + checksumFound = true + } else if v.ChecksumCRC32 != "" { + checksumFound = true + } else if v.ChecksumCRC32C != "" { + checksumFound = true + } + if !checksumFound { + partsMissingChecksum = true + } else { + foundPartChecksum = true + } + } + + if test.HasPartChecksums { + if partsMissingChecksum { + err = fmt.Errorf("One or all parts were missing a checksum") + return + } + } else { + if foundPartChecksum { + err = fmt.Errorf("Did not expect ObjectParts to have checksums but found one") + return + } + } + + hasFullObjectChecksum := true + if OA.Checksum.ChecksumCRC32 == "" { + if OA.Checksum.ChecksumCRC32C == "" { + if OA.Checksum.ChecksumSHA1 == "" { + if OA.Checksum.ChecksumSHA256 == "" { + hasFullObjectChecksum = false + } + } + } + } + + if test.HasFullChecksum { + if !hasFullObjectChecksum { + err = fmt.Errorf("Full object checksum not found") + return + } + } else { + if hasFullObjectChecksum { + err = fmt.Errorf("Did not expect a full object checksum but we got one") + return + } + } + + if OA.ETag != test.ETag { + err = fmt.Errorf("Etags do not match, got %s but expected %s", OA.ETag, test.ETag) + return + } + + if test.HasParts { + if len(OA.ObjectParts.Parts) < 1 { + err = fmt.Errorf("Was expecting ObjectParts but none were present") + return + } + } + + if OA.StorageClass == "" { + err = fmt.Errorf("Was expecting a StorageClass but got none") + return + } + + if OA.ObjectSize != test.ObjectSize { + err = fmt.Errorf("Was expecting a ObjectSize but got none") + return + } + + if test.HasParts { + if opts.MaxParts == 0 { + if len(OA.ObjectParts.Parts) != OA.ObjectParts.PartsCount { + err = fmt.Errorf("expected %s parts but got %d", OA.ObjectParts.PartsCount, len(OA.ObjectParts.Parts)) + return + } + } else if (opts.MaxParts + opts.PartNumberMarker) > OA.ObjectParts.PartsCount { + if len(OA.ObjectParts.Parts) != (OA.ObjectParts.PartsCount - opts.PartNumberMarker) { + err = fmt.Errorf("expected %d parts but got %d", (OA.ObjectParts.PartsCount - opts.PartNumberMarker), len(OA.ObjectParts.Parts)) + return + } + } else if opts.MaxParts != 0 { + if opts.MaxParts != len(OA.ObjectParts.Parts) { + err = fmt.Errorf("expected %d parts but got %d", opts.MaxParts, len(OA.ObjectParts.Parts)) + return + } + } + } + + if OA.ObjectParts.NextPartNumberMarker == OA.ObjectParts.PartsCount { + if OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to NOT be truncated, but it was") + return + } + } + + if OA.ObjectParts.NextPartNumberMarker != OA.ObjectParts.PartsCount { + if !OA.ObjectParts.IsTruncated { + err = fmt.Errorf("Expected ObjectParts to be truncated, but it was NOT") + return + } + } + + return +} + +// Test PutObject using a large data to trigger multipart readat +func testPutObjectWithMetadata() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Object custom metadata + customContentType := "custom/contenttype" + + args["metadata"] = map[string][]string{ + "Content-Type": {customContentType}, + "X-Amz-Meta-CustomKey": {"extra spaces in value"}, + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ + ContentType: customContentType, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) + return + } + if err := crcMatchesName(r, "datafile-129-MB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testPutObjectWithContentLanguage() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + data := []byte{} + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ + ContentLanguage: "en", + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with streaming signature. +func testPutObjectStreaming() { + // initialize logging params + objectName := "test-object" + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": objectName, + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + sizes := []int64{0, 64*1024 - 1, 64 * 1024} + + for _, size := range sizes { + data := newRandomReader(size, size) + ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != size { + logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) + return + } + + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if objInfo.Size != size { + logError(testName, function, args, startTime, "", "Unexpected size", err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object seeker from the end, using whence set to '2'. +func testGetObjectSeekEnd() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + pos, err := r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + buf2 := make([]byte, 100) + m, err := readFull(r, buf2) + if err != nil { + logError(testName, function, args, startTime, "", "Error reading through readFull", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) + return + } + hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) + hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) + if hexBuf1 != hexBuf2 { + logError(testName, function, args, startTime, "", "Values at same index dont match", err) + return + } + pos, err = r.Seek(-100, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Object Seek failed", err) + return + } + if pos != st.Size-100 { + logError(testName, function, args, startTime, "", "Incorrect position", err) + return + } + if err = r.Close(); err != nil { + logError(testName, function, args, startTime, "", "ObjectClose failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwice() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test RemoveObjects request where context cancels after timeout +func testRemoveObjectsContext() { + // Initialize logging params. + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(ctx, bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate put data. + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 20 objects. + nrObjects := 20 + objectsCh := make(chan minio.ObjectInfo) + go func() { + defer close(objectsCh) + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + // Set context to cancel in 1 nanosecond. + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Call RemoveObjects API with short timeout. + errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + // Check for error. + select { + case r := <-errorCh: + if r.Err == nil { + logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) + return + } + } + // Set context with longer timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. + errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) + select { + case r, more := <-errorCh: + if more || r.Err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing multiple objects with Remove API +func testRemoveMultipleObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + // Multi remove of 1100 objects + nrObjects := 200 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + continue + } + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + // Check if errorCh doesn't receive any error + select { + case r, more := <-errorCh: + if more { + logError(testName, function, args, startTime, "", "Unexpected error", r.Err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing multiple objects and check for results +func testRemoveMultipleObjectsWithResult() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh)" + args := map[string]interface{}{ + "bucketName": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupVersionedBucket(bucketName, c) + + r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) + + nrObjects := 10 + nrLockedObjects := 5 + + objectsCh := make(chan minio.ObjectInfo) + + go func() { + defer close(objectsCh) + // Upload objects and send them to objectsCh + for i := 0; i < nrObjects; i++ { + objectName := "sample" + strconv.Itoa(i) + ".txt" + info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, + minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if i < nrLockedObjects { + // t := time.Date(2130, time.April, 25, 14, 0, 0, 0, time.UTC) + t := time.Now().Add(5 * time.Minute) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + VersionID: info.VersionID, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + } + + objectsCh <- minio.ObjectInfo{ + Key: info.Key, + VersionID: info.VersionID, + } + } + }() + + // Call RemoveObjects API + resultCh := c.RemoveObjectsWithResult(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) + + var foundNil, foundErr int + + for { + // Check if errorCh doesn't receive any error + select { + case deleteRes, ok := <-resultCh: + if !ok { + goto out + } + if deleteRes.ObjectName == "" { + logError(testName, function, args, startTime, "", "Unexpected object name", nil) + return + } + if deleteRes.ObjectVersionID == "" { + logError(testName, function, args, startTime, "", "Unexpected object version ID", nil) + return + } + + if deleteRes.Err == nil { + foundNil++ + } else { + foundErr++ + } + } + } +out: + if foundNil+foundErr != nrObjects { + logError(testName, function, args, startTime, "", "Unexpected number of results", nil) + return + } + + if foundNil != nrObjects-nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of nil errors", nil) + return + } + + if foundErr != nrLockedObjects { + logError(testName, function, args, startTime, "", "Unexpected number of errors", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject of a big file to trigger multipart +func testFPutObjectMultipart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. + fileName := getMintDataDirFilePath("datafile-129-MB") + if fileName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + fileName = file.Name() + args["fileName"] = fileName + } + totalSize := dataFileMap["datafile-129-MB"] + // Set base object name + objectName := bucketName + "FPutObject" + "-standard" + args["objectName"] = objectName + + objectContentType := "testapplication/octet-stream" + args["objectContentType"] = objectContentType + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Unexpected error", err) + return + } + if objInfo.Size != int64(totalSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) + return + } + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType doesn't match", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject with null contentType (default = application/octet-stream) +func testFPutObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + + // Make a new bucket. + args["bucketName"] = bucketName + args["location"] = location + function = "MakeBucket(bucketName, location)" + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-129-MB") + if fName == "" { + // Make a temp file with minPartSize bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. + if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + function = "FPutObject(bucketName, objectName, fileName, opts)" + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + "-standard" + args["fileName"] = fName + args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + if ui.Size != int64(dataFileMap["datafile-129-MB"]) { + logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + srcFile, err := os.Open(fName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + defer srcFile.Close() + // Add extension to temp file name + tmpFile, err := os.Create(fName + ".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "File create failed", err) + return + } + _, err = io.Copy(tmpFile, srcFile) + if err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + tmpFile.Close() + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-standard" + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-Octet" + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) + return + } + + function = "StatObject(bucketName, objectName, opts)" + args["objectName"] = objectName + "-GTar" + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-tar or application/octet-stream, got "+rGTar.ContentType, err) + return + } + + os.Remove(fName + ".gtar") + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject request when context cancels after timeout +func testFPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": "minio.PutObjectOptions{ContentType:objectContentType}", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload 1 parts worth of data to use multipart upload. + // Use different data in part for multipart tests to check parts are uploaded in correct order. + fName := getMintDataDirFilePath("datafile-1-MB") + if fName == "" { + // Make a temp file with 1 MiB bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectContextTest") + if err != nil { + logError(testName, function, args, startTime, "", "Temp file creation failed", err) + return + } + + // Upload 1 parts to trigger multipart upload + if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + // Close the file pro-actively for windows. + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + defer os.Remove(file.Name()) + fName = file.Name() + } + + // Set base object name + objectName := bucketName + "FPutObjectContext" + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + // Perform FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + // Perform FPutObject with a long timeout. Expect the put object to succeed + _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) + return + } + + _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject with context to see if request cancellation is honored. +func testPutObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "opts": "", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + cancel() + args["ctx"] = ctx + args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object with s3zip extensions. +func testGetObjectS3Zip() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{"x-minio-extract": true} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + ".zip" + args["objectName"] = objectName + + var zipFile bytes.Buffer + zw := zip.NewWriter(&zipFile) + rng := rand.New(rand.NewSource(0xc0cac01a)) + const nFiles = 500 + for i := 0; i <= nFiles; i++ { + if i == nFiles { + // Make one large, compressible file. + i = 1000000 + } + b := make([]byte, i) + if i < nFiles { + rng.Read(b) + } + wc, err := zw.Create(fmt.Sprintf("test/small/file-%d.bin", i)) + if err != nil { + logError(testName, function, args, startTime, "", "zw.Create failed", err) + return + } + wc.Write(b) + } + err = zw.Close() + if err != nil { + logError(testName, function, args, startTime, "", "zw.Close failed", err) + return + } + buf := zipFile.Bytes() + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(len(buf))+", got "+string(st.Size), err) + return + } + r.Close() + + zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf))) + if err != nil { + logError(testName, function, args, startTime, "", "zip.NewReader failed", err) + return + } + lOpts := minio.ListObjectsOptions{} + lOpts.Set("x-minio-extract", "true") + lOpts.Prefix = objectName + "/" + lOpts.Recursive = true + list := c.ListObjects(context.Background(), bucketName, lOpts) + listed := map[string]minio.ObjectInfo{} + for item := range list { + if item.Err != nil { + break + } + listed[item.Key] = item + } + if len(listed) == 0 { + // Assume we are running against non-minio. + args["SKIPPED"] = true + logIgnored(testName, function, args, startTime, "s3zip does not appear to be present") + return + } + + for _, file := range zr.File { + if file.FileInfo().IsDir() { + continue + } + args["zipfile"] = file.Name + zfr, err := file.Open() + if err != nil { + logError(testName, function, args, startTime, "", "file.Open failed", err) + return + } + want, err := io.ReadAll(zfr) + if err != nil { + logError(testName, function, args, startTime, "", "fzip file read failed", err) + return + } + + opts := minio.GetObjectOptions{} + opts.Set("x-minio-extract", "true") + key := path.Join(objectName, file.Name) + r, err = c.GetObject(context.Background(), bucketName, key, opts) + if err != nil { + terr := minio.ToErrorResponse(err) + if terr.StatusCode != http.StatusNotFound { + logError(testName, function, args, startTime, "", "GetObject failed", err) + } + return + } + got, err := io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + r.Close() + if !bytes.Equal(want, got) { + logError(testName, function, args, startTime, "", "Content mismatch", err) + return + } + oi, ok := listed[key] + if !ok { + logError(testName, function, args, startTime, "", "Object Missing", fmt.Errorf("%s not present in listing", key)) + return + } + if int(oi.Size) != len(got) { + logError(testName, function, args, startTime, "", "Object Size Incorrect", fmt.Errorf("listing %d, read %d", oi.Size, len(got))) + return + } + delete(listed, key) + } + delete(args, "zipfile") + if len(listed) > 0 { + logError(testName, function, args, startTime, "", "Extra listed objects", fmt.Errorf("left over: %v", listed)) + return + } + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + // Generic seek error for errors other than io.EOF + seekErr := errors.New("seek error") + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, seekErr, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, seekErr, false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + // We expect an error + if testCase.err == seekErr && err == nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // We expect a specific error + if testCase.err != seekErr && testCase.err != err { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) + return + } + // If we expect an error go to the next loop + if testCase.err != nil { + continue + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Reproduces issue https://github.com/minio/minio-go/issues/1137 +func testGetObjectReadAtWhenEOFWasReached() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read directly + buf1 := make([]byte, len(buf)) + buf2 := make([]byte, 512) + + m, err := r.Read(buf1) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf) { + logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, 512) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[512:1024]) { + logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test Presigned Post Policy +func testPresignedPostPolicy() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + + // Add CRC32C + checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) + policy.SetChecksum(checksum) + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + writer.WriteField(k, v) + } + + // Get a 33KB file to upload and test if set post policy works + filePath := getMintDataDirFilePath("datafile-33-kB") + if filePath == "" { + // Make a temp file with 33 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + + // add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // make post request with correct form data + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "Http request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) + return + } + + // expected path should be absolute path of the object + var scheme string + if mustParseBool(os.Getenv(enableHTTPS)) { + scheme = "https://" + } else { + scheme = "http://" + } + + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName + + if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { + // Test when not against AWS S3. + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + } + want := checksum.Encoded() + if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests copy object +func testCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(dst, src)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + // Set copy conditions. + MatchETag: objInfo.ETag, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + } + args["src"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + + // Perform the Copy + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) + return + } + + if err := crcMatchesName(r, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "data CRC check failed", err) + return + } + if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { + logError(testName, function, args, startTime, "", "copy data CRC check failed", err) + return + } + // Close all the get readers before proceeding with CopyObject operations. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: objectName, + ReplaceMetadata: true, + UserMetadata: map[string]string{ + "Copy": "should be same", + }, + } + args["dst"] = dst + args["src"] = src + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) + return + } + + oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + stOpts := minio.StatObjectOptions{} + stOpts.SetMatchETag(oi.ETag) + objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) + return + } + + if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { + logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-C get object ReaderSeeker interface methods. +func testSSECEncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-S3 get object ReaderSeeker interface methods. +func testSSES3EncryptedGetObjectReadSeekFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer func() { + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + }() + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat object failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + // This following function helps us to compare data from the reader after seek + // with the data from the original buffer + cmpData := func(r io.Reader, start, end int) { + if end-start == 0 { + return + } + buffer := bytes.NewBuffer([]byte{}) + if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "CopyN failed", err) + return + } + } + if !bytes.Equal(buf[start:end], buffer.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + } + + testCases := []struct { + offset int64 + whence int + pos int64 + err error + shouldCmp bool + start int + end int + }{ + // Start from offset 0, fetch data and compare + {0, 0, 0, nil, true, 0, 0}, + // Start from offset 2048, fetch data and compare + {2048, 0, 2048, nil, true, 2048, bufSize}, + // Start from offset larger than possible + {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, + // Move to offset 0 without comparing + {0, 0, 0, nil, false, 0, 0}, + // Move one step forward and compare + {1, 1, 1, nil, true, 1, bufSize}, + // Move larger than possible + {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, + // Provide negative offset with CUR_SEEK + {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, + // Test with whence SEEK_END and with positive offset + {1024, 2, 0, io.EOF, false, 0, 0}, + // Test with whence SEEK_END and with negative offset + {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, + // Test with whence SEEK_END and with large negative offset + {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, + // Test with invalid whence + {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, + } + + for i, testCase := range testCases { + // Perform seek operation + n, err := r.Seek(testCase.offset, testCase.whence) + if err != nil && testCase.err == nil { + // We expected success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err == nil && testCase.err != nil { + // We expected failure, but got success. + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + if err != nil && testCase.err != nil { + if err.Error() != testCase.err.Error() { + // We expect a specific error + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) + return + } + } + // Check the returned seek pos + if n != testCase.pos { + logError(testName, function, args, startTime, "", + fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) + return + } + // Compare only if shouldCmp is activated + if testCase.shouldCmp { + cmpData(r, testCase.start, testCase.end) + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-C get object ReaderAt interface methods. +func testSSECEncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ + ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests SSE-S3 get object ReaderAt interface methods. +func testSSES3EncryptedGetObjectReadAtFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 129MiB of data. + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + ServerSideEncryption: encrypt.NewSSE(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + defer r.Close() + + offset := int64(2048) + + // read directly + buf1 := make([]byte, 512) + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + // Test readAt before stat is called such that objectInfo doesn't change. + m, err := r.ReadAt(buf1, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf1) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) + return + } + if !bytes.Equal(buf1, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) + return + } + + m, err = r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, len(buf)) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, len(buf)+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// testSSECEncryptionPutGet tests encryption with customer provided encryption keys +func testSSECEncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + logSuccess(testName, function, args, startTime) + + } + + logSuccess(testName, function, args, startTime) +} + +// TestEncryptionFPut tests encryption with customer specified encryption keys +func testSSECEncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + const password = "correct horse battery staple" // https://xkcd.com/936/ + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + logSuccess(testName, function, args, startTime) +} + +// testSSES3EncryptionPutGet tests SSE-S3 encryption +func testSSES3EncryptionPutGet() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutEncryptedObject(bucketName, objectName, reader, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Put encrypted data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) + return + } + + // Read the data back without any encryption headers + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + logSuccess(testName, function, args, startTime) + + } + + logSuccess(testName, function, args, startTime) +} + +// TestSSES3EncryptionFPut tests server side encryption +func testSSES3EncryptionFPut() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "filePath": "", + "contentType": "", + "sse": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Object custom metadata + customContentType := "custom/contenttype" + args["metadata"] = customContentType + + testCases := []struct { + buf []byte + }{ + {buf: bytes.Repeat([]byte("F"), 0)}, + {buf: bytes.Repeat([]byte("F"), 1)}, + {buf: bytes.Repeat([]byte("F"), 15)}, + {buf: bytes.Repeat([]byte("F"), 16)}, + {buf: bytes.Repeat([]byte("F"), 17)}, + {buf: bytes.Repeat([]byte("F"), 31)}, + {buf: bytes.Repeat([]byte("F"), 32)}, + {buf: bytes.Repeat([]byte("F"), 33)}, + {buf: bytes.Repeat([]byte("F"), 1024)}, + {buf: bytes.Repeat([]byte("F"), 1024*2)}, + {buf: bytes.Repeat([]byte("F"), 1024*1024)}, + } + + for i, testCase := range testCases { + // Generate a random object name + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Secured object + sse := encrypt.NewSSE() + args["sse"] = sse + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + _, err = file.Write(testCase.buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + file.Close() + // Put encrypted data + if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { + logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) + return + } + defer r.Close() + + // Compare the sent object with the received one + recvBuffer := bytes.NewBuffer([]byte{}) + if _, err = io.Copy(recvBuffer, r); err != nil { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) + return + } + if recvBuffer.Len() != len(testCase.buf) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) + return + } + if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { + logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) + return + } + + os.Remove(fileName) + } + + logSuccess(testName, function, args, startTime) +} + +func testBucketNotification() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketNotification(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + + if os.Getenv("NOTIFY_BUCKET") == "" || + os.Getenv("NOTIFY_SERVICE") == "" || + os.Getenv("NOTIFY_REGION") == "" || + os.Getenv("NOTIFY_ACCOUNTID") == "" || + os.Getenv("NOTIFY_RESOURCE") == "" { + logIgnored(testName, function, args, startTime, "Skipped notification test as it is not configured") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + bucketName := os.Getenv("NOTIFY_BUCKET") + args["bucketName"] = bucketName + + topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) + queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") + + topicConfig := notification.NewConfig(topicArn) + topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) + topicConfig.AddFilterSuffix("jpg") + + queueConfig := notification.NewConfig(queueArn) + queueConfig.AddEvents(notification.ObjectCreatedAll) + queueConfig.AddFilterPrefix("photos/") + + config := notification.Configuration{} + config.AddTopic(topicConfig) + + // Add the same topicConfig again, should have no effect + // because it is duplicated + config.AddTopic(topicConfig) + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Duplicate entry added", err) + return + } + + // Add and remove a queue config + config.AddQueue(queueConfig) + config.RemoveQueueByArn(queueArn) + + err = c.SetBucketNotification(context.Background(), bucketName, config) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) + return + } + + config, err = c.GetBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) + return + } + + if len(config.TopicConfigs) != 1 { + logError(testName, function, args, startTime, "", "Topic config is empty", err) + return + } + + if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { + logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) + return + } + + err = c.RemoveAllBucketNotification(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests comprehensive list of all methods. +func testFunctional() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctional()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket. + function = "MakeBucket(bucketName, region)" + functionAll = "MakeBucket(bucketName, region)" + args["bucketName"] = bucketName + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + + defer cleanupBucket(bucketName, c) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File creation failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "File write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find the bucket", err) + return + } + + // Asserting the default bucket policy. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + if nilPolicy != "" { + logError(testName, function, args, startTime, "", "policy should be set to nil", err) + return + } + + // Set the bucket policy to 'public readonly'. + function = "SetBucketPolicy(bucketName, readOnlyPolicy)" + functionAll += ", " + function + + readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readOnlyPolicy, + } + + err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public writeonly'. + function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" + functionAll += ", " + function + + writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": writeOnlyPolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `writeonly`. + function = "GetBucketPolicy(ctx, bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, readWritePolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + // should return policy `readwrite`. + function = "GetBucketPolicy(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + _, err = c.GetBucketPolicy(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("f"), 1<<19) + + function = "PutObject(bucketName, objectName, reader, contentType)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-nolength", + "contentType": "binary/octet-stream", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + objFound = false + isRecursive = true // Recursive is true. + function = "ListObjects()" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) + return + } + + incompObjNotFound := true + + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) + return + } + newReader.Close() + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject failed", err) + return + } + + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + transport := createHTTPTransport() + if err != nil { + logError(testName, function, args, startTime, "", "DefaultTransport failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: transport, + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) + return + } + resp.Body.Close() + + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedGetObject success", err) + return + } + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + "reqParams": reqParams, + } + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) + return + } + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": "", + "expires": 3600 * time.Second, + } + _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) + if err == nil { + logError(testName, function, args, startTime, "", "PresignedPutObject success", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + buf = bytes.Repeat([]byte("g"), 1<<19) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) + return + } + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + presignedURL, err := c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err != nil { + logError(testName, function, args, startTime, "", "Presigned failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + req.Header.Add("mysecret", "abcxxx") + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to Presigned URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presign-custom", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded custom-presigned object failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on custom-presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on custom-presigned object upload verification", err) + return + } + + function = "RemoveObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + args["objectName"] = objectName + "-f" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-nolength" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presigned" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + args["objectName"] = objectName + "-presign-custom" + err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveObject failed", err) + return + } + + function = "RemoveBucket(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + err = c.RemoveBucket(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + err = c.RemoveBucket(context.Background(), bucketName) + if err == nil { + logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) + return + } + if err.Error() != "The specified bucket does not exist" { + logError(testName, function, args, startTime, "", "RemoveBucket failed", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + logSuccess(testName, functionAll, args, startTime) +} + +// Test for validating GetObject Reader* methods functioning when the +// object is modified in the object store. +func testGetObjectModified() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload an object. + objectName := "myobject" + args["objectName"] = objectName + content := "helloworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) + + reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) + return + } + defer reader.Close() + + // Read a few bytes of the object. + b := make([]byte, 5) + n, err := reader.ReadAt(b, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) + return + } + + // Upload different contents to the same object while object is being read. + newContent := "goodbyeworld" + _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) + return + } + + // Confirm that a Stat() call in between doesn't change the Object's cached etag. + _, err = reader.Stat() + expectedError := "At least one of the pre-conditions you specified did not hold" + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + // Read again only to find object contents have been modified since last read. + _, err = reader.ReadAt(b, int64(n)) + if err.Error() != expectedError { + logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject to upload a file seeked at a given offset. +func testPutObjectUploadSeekedObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, fileToUpload, contentType)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileToUpload": "", + "contentType": "binary/octet-stream", + } + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + var tempfile *os.File + + if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { + tempfile, err = os.Open(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + args["fileToUpload"] = fileName + } else { + tempfile, err = os.CreateTemp("", "minio-go-upload-test-") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile create failed", err) + return + } + args["fileToUpload"] = tempfile.Name() + + // Generate 100kB data + if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { + logError(testName, function, args, startTime, "", "File copy failed", err) + return + } + + defer os.Remove(tempfile.Name()) + + // Seek back to the beginning of the file. + tempfile.Seek(0, 0) + } + length := 100 * humanize.KiByte + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + offset := length / 2 + if _, err = tempfile.Seek(int64(offset), 0); err != nil { + logError(testName, function, args, startTime, "", "TempFile seek failed", err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + tempfile.Close() + + obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer obj.Close() + + n, err := obj.Seek(int64(offset), 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != int64(offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) + return + } + + _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(length-offset) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests bucket re-create errors. +func testMakeBucketErrorV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + region := "eu-west-1" + args["bucketName"] = bucketName + args["region"] = region + + // Make a new bucket in 'eu-west-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { + logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) + return + } + // Verify valid error response from server. + if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && + minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { + logError(testName, function, args, startTime, "", "Invalid error returned by server", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object reader to not throw error on being closed twice. +func testGetObjectClosedTwiceV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests FPutObject hidden contentType setting +func testFPutObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FPutObject(bucketName, objectName, fileName, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "fileName": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Make a temp file with 11*1024*1024 bytes of data. + file, err := os.CreateTemp(os.TempDir(), "FPutObjectTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + + r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) + n, err := io.CopyN(file, r, 11*1024*1024) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if n != int64(11*1024*1024) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) + return + } + + // Close the file pro-actively for windows. + err = file.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File close failed", err) + return + } + + // Set base object name + objectName := bucketName + "FPutObject" + args["objectName"] = objectName + args["fileName"] = file.Name() + + // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/octet-stream) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Add extension to temp file name + fileName := file.Name() + err = os.Rename(fileName, fileName+".gtar") + if err != nil { + logError(testName, function, args, startTime, "", "Rename failed", err) + return + } + + // Perform FPutObject with no contentType provided (Expecting application/x-gtar) + args["objectName"] = objectName + "-Octet" + args["contentType"] = "" + args["fileName"] = fileName + ".gtar" + + _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FPutObject failed", err) + return + } + + // Check headers and sizes + rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if rStandard.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + if rStandard.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) + return + } + + rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rOctet.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) + return + } + + if rOctet.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + + rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if rGTar.Size != 11*1024*1024 { + logError(testName, function, args, startTime, "", "Unexpected size", nil) + return + } + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" && rGTar.ContentType != "application/x-tar" { + logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-tar , got "+rGTar.ContentType, err) + return + } + + os.Remove(fileName + ".gtar") + logSuccess(testName, function, args, startTime) +} + +// Tests various bucket supported formats. +func testMakeBucketRegionsV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "MakeBucket(bucketName, region)" + args := map[string]interface{}{ + "bucketName": "", + "region": "eu-west-1", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket in 'eu-central-1'. + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + // Make a new bucket with '.' in its name, in 'us-west-2'. This + // request is internally staged into a path style instead of + // virtual host style. + if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { + args["bucketName"] = bucketName + ".withperiod" + args["region"] = "us-west-2" + logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName+".withperiod", c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderSeeker interface methods. +func testGetObjectReadSeekFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + n, err := r.Seek(offset, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + n, err = r.Seek(0, 1) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) + return + } + _, err = r.Seek(offset, 2) + if err == nil { + logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) + return + } + n, err = r.Seek(-offset, 2) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != st.Size-offset { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) + return + } + + var buffer1 bytes.Buffer + if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + // Seek again and read again. + n, err = r.Seek(offset-1, 0) + if err != nil { + logError(testName, function, args, startTime, "", "Seek failed", err) + return + } + if n != (offset - 1) { + logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) + return + } + + var buffer2 bytes.Buffer + if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + } + // Verify now lesser bytes. + if !bytes.Equal(buf[2047:], buffer2.Bytes()) { + logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Tests get object ReaderAt interface methods. +func testGetObjectReadAtFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(bucketName, objectName)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + buf, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer r.Close() + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + offset := int64(2048) + + // Read directly + buf2 := make([]byte, 512) + buf3 := make([]byte, 512) + buf4 := make([]byte, 512) + + m, err := r.ReadAt(buf2, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf2) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) + return + } + if !bytes.Equal(buf2, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf3, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf3) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) + return + } + if !bytes.Equal(buf3, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + offset += 512 + m, err = r.ReadAt(buf4, offset) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + if m != len(buf4) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) + return + } + if !bytes.Equal(buf4, buf[offset:offset+512]) { + logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) + return + } + + buf5 := make([]byte, bufSize) + // Read the whole object. + m, err = r.ReadAt(buf5, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + if m != len(buf5) { + logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) + return + } + if !bytes.Equal(buf, buf5) { + logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) + return + } + + buf6 := make([]byte, bufSize+1) + // Read the whole object and beyond. + _, err = r.ReadAt(buf6, 0) + if err != nil { + if err != io.EOF { + logError(testName, function, args, startTime, "", "ReadAt failed", err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Tests copy object +func testCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Make a new bucket in 'us-east-1' (destination bucket). + err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName+"-copy", c) + + // Generate 33K of data. + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + r.Close() + + // Copy Source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + MatchETag: objInfo.ETag, + } + args["source"] = src + + // Set copy conditions. + dst := minio.CopyDestOptions{ + Bucket: bucketName + "-copy", + Object: objectName + "-copy", + } + args["destination"] = dst + + // Perform the Copy + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Source object + r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Destination object + readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + // Check the various fields of source object against destination object. + objInfo, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + objInfoCopy, err := readerCopy.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if objInfo.Size != objInfoCopy.Size { + logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) + return + } + + // Close all the readers. + r.Close() + readerCopy.Close() + + // CopyObject again but with wrong conditions + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), + NoMatchETag: objInfo.ETag, + } + + // Perform the Copy which should fail + _, err = c.CopyObject(context.Background(), dst, src) + if err == nil { + logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testComposeObjectErrorCasesWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Test that more than 10K source objects cannot be + // concatenated. + srcArr := [10001]minio.CopySrcOptions{} + srcSlice := srcArr[:] + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "object", + } + + args["destination"] = dst + // Just explain about srcArr in args["sourceList"] + // to stop having 10,001 null headers logged + args["sourceList"] = "source array of 10,001 elements" + if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { + logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) + return + } else if err.Error() != "There must be as least one and up to 10000 source objects." { + logError(testName, function, args, startTime, "", "Got unexpected error", err) + return + } + + // Create a source with invalid offset spec and check that + // error is returned: + // 1. Create the source object. + const badSrcSize = 5 * 1024 * 1024 + buf := bytes.Repeat([]byte("1"), badSrcSize) + _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + // 2. Set invalid range spec on the object (going beyond + // object size) + badSrc := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "badObject", + MatchRange: true, + Start: 1, + End: badSrcSize, + } + + // 3. ComposeObject call should fail. + if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { + logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) + return + } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { + logError(testName, function, args, startTime, "", "Got invalid error", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test expected error cases +func testComposeObjectErrorCasesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +func testComposeMultipleSources(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{ + "destination": "", + "sourceList": "", + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Upload a small source object + const srcSize = 1024 * 1024 * 5 + buf := bytes.Repeat([]byte("1"), srcSize) + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // We will append 10 copies of the object. + srcs := []minio.CopySrcOptions{} + for i := 0; i < 10; i++ { + srcs = append(srcs, minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + }) + } + + // make the last part very small + srcs[9].MatchRange = true + + args["sourceList"] = srcs + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + } + args["destination"] = dst + + ui, err := c.ComposeObject(context.Background(), dst, srcs...) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + if ui.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) + return + } + + objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if objProps.Size != 9*srcSize+1 { + logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test concatenating multiple 10K objects V2 +func testCompose10KSourcesV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +func testEncryptedEmptyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, objectSize, opts)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 0 + var buf []byte // Empty buffer + args["objectName"] = "object" + _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + // 2. Test CopyObject for an empty object + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "object", + Encryption: sse, + } + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) + return + } + + // 3. Test Key rotation + newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: sse, + } + + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "new-object", + Encryption: newSSE, + } + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + function = "CopyObject(dst, src)" + logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) + return + } + + // 4. Download the object. + reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) + return + } + + delete(args, "objectName") + logSuccess(testName, function, args, startTime) +} + +func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { + // initialize logging params + startTime := time.Now() + testName := getFuncNameLoc(2) + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + var srcEncryption, dstEncryption encrypt.ServerSide + + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // 1. create an sse-c encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ + ServerSideEncryption: sseSrc, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + if sseSrc != nil && sseSrc.Type() != encrypt.S3 { + srcEncryption = sseSrc + } + + // 2. copy object and change encryption key + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: srcEncryption, + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject", + Encryption: sseDst, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + if sseDst != nil && sseDst.Type() != encrypt.S3 { + dstEncryption = sseDst + } + // 3. get copied object and check if content is equal + coreClient := minio.Core{c} + reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test key rotation for source object in-place. + var newSSE encrypt.ServerSide + if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { + newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key + } + if sseSrc != nil && sseSrc.Type() == encrypt.S3 { + newSSE = encrypt.NewSSE() + } + if newSSE != nil { + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["destination"] = dst + + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + // Get copied object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + decBytes, err = io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + reader.Close() + + // Test in-place decryption. + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObject", + } + args["destination"] = dst + + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + Encryption: newSSE, + } + args["source"] = src + _, err = c.CopyObject(context.Background(), dst, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) + return + } + } + + // Get copied decrypted object and check if content is equal + reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + defer reader.Close() + + decBytes, err = io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(decBytes, buf) { + logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test encrypted copy object +func testUnencryptedToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc encrypt.ServerSide + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testUnencryptedToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSECToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSECCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToSSES3CopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedSSES3ToUnencryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.NewSSE() + var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +// Test encrypted copy object +func testEncryptedCopyObjectV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) + sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) + testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) +} + +func testDecryptedCopyObject() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + return + } + + bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" + if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ + ServerSideEncryption: encryption, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: objectName, + Encryption: encrypt.SSECopy(encryption), + } + args["source"] = src + + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "decrypted-" + objectName, + } + args["destination"] = dst + + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + logSuccess(testName, function, args, startTime) +} + +func testSSECMultipartEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 6MB of data + buf := bytes.Repeat([]byte("abcdef"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + // Upload a 6MB object using multipart mechanism + uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + var completeParts []minio.CompletePart + + part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, + bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, + minio.PutObjectPartOptions{SSE: srcencryption}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, + bytes.NewReader(buf[5*1024*1024:]), 1024*1024, + minio.PutObjectPartOptions{SSE: srcencryption}, + ) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) + return + } + completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = objInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (6*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 6*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 6*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) + return + } + + getOpts.SetRange(6*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 6*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:6*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) + return + } + if getBuf[6*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation +func testSSECEncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy +func testSSECEncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcencryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + var dstencryption encrypt.ServerSide + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy +func testSSECEncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + ServerSideEncryption: srcencryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part +func testUnencryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testUnencryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + putmetadata := map[string]string{ + "Content-Type": "binary/octet-stream", + } + opts := minio.PutObjectOptions{ + UserMetadata: putmetadata, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testUnencryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part +func testSSES3EncryptedToSSECCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy +func testSSES3EncryptedToUnencryptedCopyPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy +func testSSES3EncryptedToSSES3CopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + srcEncryption := encrypt.NewSSE() + opts := minio.PutObjectOptions{ + UserMetadata: map[string]string{ + "Content-Type": "binary/octet-stream", + }, + ServerSideEncryption: srcEncryption, + } + + uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) + return + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.NewSSE() + + uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + return + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + dstencryption.Marshal(header) + + for k, v := range header { + metadata[k] = v[0] + } + + metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag + + // First of three parts + fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + return + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + return + } + + // Stat the object and check its length matches + objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + return + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + return + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf := make([]byte, 5*1024*1024) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + return + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + return + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = readFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + return + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + return + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + return + } + + logSuccess(testName, function, args, startTime) + + // Do not need to remove destBucketName its same as bucketName. +} + +func testUserMetadataCopying() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testUserMetadataCopyingWrapper(c *minio.Client) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + // Make a new bucket in 'us-east-1' (source bucket). + err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + h.Add(k, vs[0]) + } + } + return h + } + + // 1. create a client encrypted object to copy by uploading + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB + metadata := make(http.Header) + metadata.Set("x-amz-meta-myheader", "myvalue") + m := make(map[string]string) + m["x-amz-meta-myheader"] = "myvalue" + _, err = c.PutObject(context.Background(), bucketName, "srcObject", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) + return + } + if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 2. create source + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObject", + } + + // 2.1 create destination with metadata set + dst1 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-1", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + // 3. Check that copying to an object with metadata set resets + // the headers on the copy. + args["source"] = src + args["destination"] = dst1 + _, err = c.CopyObject(context.Background(), dst1, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders := make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 4. create destination with no metadata set and same source + dst2 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-2", + } + + // 5. Check that copying to an object with no metadata set, + // copies metadata. + args["source"] = src + args["destination"] = dst2 + _, err = c.CopyObject(context.Background(), dst2, src) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed", err) + return + } + + expectedHeaders = metadata + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 6. Compose a pair of sources. + dst3 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-3", + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst3 + _, err = c.ComposeObject(context.Background(), dst3, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + // 7. Compose a pair of sources with dest user metadata set. + dst4 := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "dstObject-4", + UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, + ReplaceMetadata: true, + } + + function = "ComposeObject(destination, sources)" + args["source"] = []minio.CopySrcOptions{src, src} + args["destination"] = dst4 + _, err = c.ComposeObject(context.Background(), dst4, src, src) + if err != nil { + logError(testName, function, args, startTime, "", "ComposeObject failed", err) + return + } + + // Check that no headers are copied in this case + expectedHeaders = make(http.Header) + expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") + if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testUserMetadataCopyingV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObject(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // c.TraceOn(os.Stderr) + testUserMetadataCopyingWrapper(c) +} + +func testStorageClassMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClass") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStorageClassInvalidMetadataPutObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassInvalidMetadataPutObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB + + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) + if err == nil { + logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +func testStorageClassMetadataCopyObject() { + // initialize logging params + startTime := time.Now() + function := "testStorageClassMetadataCopyObject()" + args := map[string]interface{}{} + testName := getFuncName() + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + fetchMeta := func(object string) (h http.Header) { + objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) + args["bucket"] = bucketName + args["object"] = object + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + h = make(http.Header) + for k, vs := range objInfo.Metadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { + for _, v := range vs { + h.Add(k, v) + } + } + } + return h + } + + metadata := make(http.Header) + metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") + + emptyMetadata := make(http.Header) + + const srcSize = 1024 * 1024 + buf := bytes.Repeat([]byte("abcde"), srcSize) + + // Put an object with RRS Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src := minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClass", + } + dst := minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectRRSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) + return + } + + // Get the returned metadata + returnedMeta := fetchMeta("srcObjectRRSClassCopy") + + // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) + if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { + logError(testName, function, args, startTime, "", "Metadata match failed", err) + return + } + + metadata = make(http.Header) + metadata.Set("x-amz-storage-class", "STANDARD") + + // Put an object with Standard Storage class + _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", + bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Make server side copy of object uploaded in previous step + src = minio.CopySrcOptions{ + Bucket: bucketName, + Object: "srcObjectSSClass", + } + dst = minio.CopyDestOptions{ + Bucket: bucketName, + Object: "srcObjectSSClassCopy", + } + if _, err = c.CopyObject(context.Background(), dst, src); err != nil { + logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) + return + } + // Fetch the meta data of copied object + if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { + logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with size -1 byte object. +func testPutObjectNoLengthV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": -1, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + args["size"] = bufSize + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test put objects of unknown size. +func testPutObjectsUnknownV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size,opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Issues are revealed by trying to upload multiple files of unknown size + // sequentially (on 4GB machines) + for i := 1; i <= 4; i++ { + // Simulate that we could be receiving byte slices of data that we want + // to upload as a file + rpipe, wpipe := io.Pipe() + defer rpipe.Close() + go func() { + b := []byte("test") + wpipe.Write(b) + wpipe.Close() + }() + + // Upload the object. + objectName := fmt.Sprintf("%sunique%d", bucketName, i) + args["objectName"] = objectName + + ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) + return + } + + if ui.Size != 4 { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) + return + } + + if st.Size != int64(4) { + logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) + return + } + + } + + logSuccess(testName, function, args, startTime) +} + +// Test put object with 0 byte object. +func testPutObject0ByteV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "size": 0, + "opts": "", + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := bucketName + "unique" + args["objectName"] = objectName + args["opts"] = minio.PutObjectOptions{} + + // Upload an object. + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) + return + } + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) + return + } + if st.Size != 0 { + logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test expected error cases +func testComposeObjectErrorCases() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeObjectErrorCasesWrapper(c) +} + +// Test concatenating multiple 10K objects V4 +func testCompose10KSources() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ComposeObject(destination, sourceList)" + args := map[string]interface{}{} + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + testComposeMultipleSources(c) +} + +// Tests comprehensive list of all methods. +func testFunctionalV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "testFunctionalV2()" + functionAll := "" + args := map[string]interface{}{} + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + location := "us-east-1" + // Make a new bucket. + function = "MakeBucket(bucketName, location)" + functionAll = "MakeBucket(bucketName, location)" + args = map[string]interface{}{ + "bucketName": bucketName, + "location": location, + } + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + // Generate a random file name. + fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + file, err := os.Create(fileName) + if err != nil { + logError(testName, function, args, startTime, "", "file create failed", err) + return + } + for i := 0; i < 3; i++ { + buf := make([]byte, rand.Intn(1<<19)) + _, err = file.Write(buf) + if err != nil { + logError(testName, function, args, startTime, "", "file write failed", err) + return + } + } + file.Close() + + // Verify if bucket exits and you have access. + var exists bool + function = "BucketExists(bucketName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + } + exists, err = c.BucketExists(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "BucketExists failed", err) + return + } + if !exists { + logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) + return + } + + // Make the bucket 'public read/write'. + function = "SetBucketPolicy(bucketName, bucketPolicy)" + functionAll += ", " + function + + readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` + + args = map[string]interface{}{ + "bucketName": bucketName, + "bucketPolicy": readWritePolicy, + } + err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // List all buckets. + function = "ListBuckets()" + functionAll += ", " + function + args = nil + buckets, err := c.ListBuckets(context.Background()) + if len(buckets) == 0 { + logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) + return + } + if err != nil { + logError(testName, function, args, startTime, "", "ListBuckets failed", err) + return + } + + // Verify if previously created bucket is listed in list buckets. + bucketFound := false + for _, bucket := range buckets { + if bucket.Name == bucketName { + bucketFound = true + } + } + + // If bucket not found error out. + if !bucketFound { + logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) + return + } + + objectName := bucketName + "unique" + + // Generate data + buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) + + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "contentType": "", + } + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + objectNameNoLength := objectName + "-nolength" + args["objectName"] = objectNameNoLength + _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + if st.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) + return + } + + // Instantiate a done channel to close all listing. + doneCh := make(chan struct{}) + defer close(doneCh) + + objFound := false + isRecursive := true // Recursive is true. + function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { + if obj.Key == objectName { + objFound = true + break + } + } + if !objFound { + logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) + return + } + + incompObjNotFound := true + function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "isRecursive": isRecursive, + } + for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { + if objIncompl.Key != "" { + incompObjNotFound = false + break + } + } + if !incompObjNotFound { + logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) + return + } + + function = "GetObject(bucketName, objectName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + } + newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + newReadBytes, err := io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + function = "FGetObject(bucketName, objectName, fileName)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "fileName": fileName + "-f", + } + err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FgetObject failed", err) + return + } + + // Generate presigned HEAD object url. + function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) + return + } + + httpClient := &http.Client{ + // Setting a sensible time out of 30secs to wait for response + // headers. Request is pro-actively canceled after 30secs + // with no response. + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + + req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + + // Verify if presigned url works. + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) + return + } + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) + return + } + if resp.Header.Get("ETag") == "" { + logError(testName, function, args, startTime, "", "Got empty ETag", err) + return + } + resp.Body.Close() + + // Generate presigned GET object url. + function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName, + "expires": 3600 * time.Second, + } + presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + resp.Body.Close() + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + + // Set request parameters. + reqParams := make(url.Values) + reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") + // Generate presigned GET object url. + args["reqParams"] = reqParams + presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) + return + } + + // Verify if presigned url works. + req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) + return + } + + if resp.StatusCode != http.StatusOK { + logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) + return + } + newPresignedBytes, err = io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + if !bytes.Equal(newPresignedBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch", err) + return + } + // Verify content disposition. + if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { + logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) + return + } + + function = "PresignedPutObject(bucketName, objectName, expires)" + functionAll += ", " + function + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + "expires": 3600 * time.Second, + } + presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) + return + } + + // Generate data more than 32K + buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) + + req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + resp, err = httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) + return + } + + // Download the uploaded object to verify + args = map[string]interface{}{ + "bucketName": bucketName, + "objectName": objectName + "-presigned", + } + newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject of uploaded presigned object failed", err) + return + } + + newReadBytes, err = io.ReadAll(newReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed during get on presigned put object", err) + return + } + newReader.Close() + + if !bytes.Equal(newReadBytes, buf) { + logError(testName, function, args, startTime, "", "Bytes mismatch on presigned object upload verification", err) + return + } + + function = "PresignHeader(method, bucketName, objectName, expires, reqParams, extraHeaders)" + functionAll += ", " + function + presignExtraHeaders := map[string][]string{ + "mysecret": {"abcxxx"}, + } + args = map[string]interface{}{ + "method": "PUT", + "bucketName": bucketName, + "objectName": objectName + "-presign-custom", + "expires": 3600 * time.Second, + "extraHeaders": presignExtraHeaders, + } + _, err = c.PresignHeader(context.Background(), "PUT", bucketName, objectName+"-presign-custom", 3600*time.Second, nil, presignExtraHeaders) + if err == nil { + logError(testName, function, args, startTime, "", "Presigned with extra headers succeeded", err) + return + } + + os.Remove(fileName) + os.Remove(fileName + "-f") + logSuccess(testName, functionAll, args, startTime) +} + +// Test get object with GetObject with context +func testGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "object Close() call failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with FGetObject with a user provided context +func testFGetObjectContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with GetObject with a user provided context +func testGetObjectRanges() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName, fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + rng := rand.NewSource(time.Now().UnixNano()) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rng, "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rng, "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + tests := []struct { + start int64 + end int64 + }{ + { + start: 1024, + end: 1024 + 1<<20, + }, + { + start: 20e6, + end: 20e6 + 10000, + }, + { + start: 40e6, + end: 40e6 + 10000, + }, + { + start: 60e6, + end: 60e6 + 10000, + }, + { + start: 80e6, + end: 80e6 + 10000, + }, + { + start: 120e6, + end: int64(bufSize), + }, + } + for _, test := range tests { + wantRC := getDataReader("datafile-129-MB") + io.CopyN(io.Discard, wantRC, test.start) + want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) + opts := minio.GetObjectOptions{} + opts.SetRange(test.start, test.end) + args["opts"] = fmt.Sprintf("%+v", test) + obj, err := c.GetObject(ctx, bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) + return + } + err = crcMatches(obj, want) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) + return + } + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object ACLs with GetObjectACL with custom provided context +func testGetObjectACLContext() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(context.Background(), bucketName, + objectName, reader, int64(bufSize), + minio.PutObjectOptions{ + ContentType: "binary/octet-stream", + UserMetadata: metaData, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr != nil { + logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + // Do a very limited testing if this is not AWS S3 + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + if s[0] != "private" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"private\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + logSuccess(testName, function, args, startTime) + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + reader2 := getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + args["ctx"] = ctx + defer cancel() + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test validates putObject with context to see if request cancellation is honored for V2. +func testPutObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "size": "", + "opts": "", + } + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Make a new bucket. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + bufSize := dataFileMap["datatfile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + + objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) + args["objectName"] = objectName + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + args["ctx"] = ctx + args["size"] = bufSize + defer cancel() + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) + return + } + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + args["ctx"] = ctx + + defer cancel() + reader = getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with GetObject with custom context +func testGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObject(ctx, bucketName, objectName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + cancel() + + r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) + return + } + if _, err = r.Stat(); err == nil { + logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) + return + } + r.Close() + + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "object Stat call failed", err) + return + } + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", " object Close() call failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get object with FGetObject with custom context +func testFGetObjectContextV2() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "FGetObject(ctx, bucketName, objectName,fileName)" + args := map[string]interface{}{ + "ctx": "", + "bucketName": "", + "objectName": "", + "fileName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket call failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + bufSize := dataFileMap["datatfile-1-MB"] + reader := getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) + args["ctx"] = ctx + defer cancel() + + fileName := "tempfile-context" + args["fileName"] = fileName + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) + if err == nil { + logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) + return + } + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) + defer cancel() + + // Read the data back + err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) + return + } + + if err = os.Remove(fileName + "-fcontext"); err != nil { + logError(testName, function, args, startTime, "", "Remove file failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test list object v1 and V2 +func testListObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + testObjects := []struct { + name string + storageClass string + }{ + // Special characters + {"foo bar", "STANDARD"}, + {"foo-%", "STANDARD"}, + {"random-object-1", "STANDARD"}, + {"random-object-2", "REDUCED_REDUNDANCY"}, + } + + for i, object := range testObjects { + bufSize := dataFileMap["datafile-33-kB"] + reader := getDataReader("datafile-33-kB") + defer reader.Close() + _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), + minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) + if err != nil { + logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) + return + } + } + + testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { + var objCursor int + + // check for object name and storage-class from listing object result + for objInfo := range listFn(context.Background(), bucket, opts) { + if objInfo.Err != nil { + logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) + return + } + if objInfo.Key != testObjects[objCursor].name { + logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) + return + } + if objInfo.StorageClass != testObjects[objCursor].storageClass { + // Ignored as Gateways (Azure/GCS etc) wont return storage class + logIgnored(testName, function, args, startTime, "ListObjects doesn't return expected storage class") + } + objCursor++ + } + + if objCursor != len(testObjects) { + logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) + return + } + } + + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) + testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) + + logSuccess(testName, function, args, startTime) +} + +// testCors is runnable against S3 itself. +// Just provide the env var MINIO_GO_TEST_BUCKET_CORS with bucket that is public and WILL BE DELETED. +// Recreate this manually each time. Minio-go SDK does not support calling +// SetPublicBucket (put-public-access-block) on S3, otherwise we could script the whole thing. +func testCors() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Create or reuse a bucket that will get cors settings applied to it and deleted when done + bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") + if bucketName == "" { + bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + } + args["bucketName"] = bucketName + defer cleanupBucket(bucketName, c) + + publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}` + err = c.SetBucketPolicy(ctx, bucketName, publicPolicy) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + return + } + + // Upload an object for testing. + objectContents := `some-text-file-contents` + reader := strings.NewReader(objectContents) + bufSize := int64(len(objectContents)) + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + return + } + bucketURL := c.EndpointURL().String() + "/" + bucketName + "/" + objectURL := bucketURL + objectName + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + + errStrAccessForbidden := `AccessForbiddenCORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted` + testCases := []struct { + name string + + // Cors rules to apply + applyCorsRules []cors.Rule + + // Outbound request info + method string + url string + headers map[string]string + + // Wanted response + wantStatus int + wantHeaders map[string]string + wantBodyContains string + }{ + { + name: "apply bucket rules", + applyCorsRules: []cors.Rule{ + { + AllowedOrigin: []string{"https"}, // S3 documents 'https' origin, but it does not actually work, see test below. + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-server-side-encryption", "x-amz-request-id"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + ExposeHeader: []string{"X-AMZ-Request-ID"}, + }, + { + AllowedOrigin: []string{"http://www.example3.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"X-Example-3-Special-Header"}, + MaxAgeSeconds: 10, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + ExposeHeader: []string{"x-amz-request-id", "X-AMZ-server-side-encryption"}, + MaxAgeSeconds: 3600, + }, + { + AllowedOrigin: []string{"http://multiplemethodstest.com"}, + AllowedMethod: []string{"POST", "PUT", "DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + { + AllowedOrigin: []string{"http://UPPERCASEEXAMPLE.com"}, + AllowedMethod: []string{"DELETE"}, + }, + { + AllowedOrigin: []string{"https://*"}, + AllowedMethod: []string{"DELETE"}, + AllowedHeader: []string{"x-abc-*", "x-def-*"}, + }, + }, + }, + { + name: "preflight to object url matches example1 rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "", + }, + }, + { + name: "preflight to bucket url matches example1 rule", + method: http.MethodOptions, + url: bucketURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "X-My-Special-Header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "x-my-special-header", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches example2 rule with no header given", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example2.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example2.com", + "Access-Control-Allow-Methods": "POST", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Content-Length": "0", + }, + }, + { + name: "preflight matches wildcard origin rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "GET", + "Access-Control-Request-Headers": "x-custom-header,x-other-custom-header", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "x-custom-header,x-other-custom-header", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + }, + }, + { + name: "preflight does not match any rule", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.couldbeanything.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight does not match example1 rule because of method", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "POST", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test get", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight if cors headers present test put", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "GET", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Expose-Headers": "x-amz-request-id,x-amz-server-side-encryption", + // S3 additionally sets the following headers here, MinIO follows fetch spec and does not: + // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + // "Access-Control-Allow-Methods": "PUT", + // "Access-Control-Max-Age": "3600", + }, + }, + { + name: "s3 processes cors rules even when request is not preflight but there is no rule match", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "get request matches wildcard origin rule and returns cors headers", + method: http.MethodGet, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-request-id,X-AMZ-server-side-encryption", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "GET", + }, + }, + { + name: "head request does not match rule and returns no cors headers", + method: http.MethodHead, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.nomatchingdomainfound.com", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put request with no origin does not match rule and returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request with wildcard origin does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.notsecureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request with wildcard https origin matches secureexample", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample with wildcard https origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "https://www.secureexample.com", + "Access-Control-Allow-Headers": "x-abc-1,x-abc-second,x-def-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight for delete request matches secureexample rejected because request header does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.secureexample.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1,x-does-not-match", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight with https origin is documented by s3 as matching but it does not match", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "https://www.securebutdoesnotmatch.com", + "Access-Control-Request-Method": "PUT", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "put no origin no match returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{}, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put with origin and header match example1 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "x-could-be-anything": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "3600", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "put no match found returns no cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.unmatchingdomain.com", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "put with origin match example3 returns cors headers", + method: http.MethodPut, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example3.com", + "X-My-Special-Header": "myvalue", + }, + wantStatus: http.StatusOK, + + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://www.example3.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Max-Age": "10", + // "Access-Control-Allow-Methods": "PUT", + }, + }, + { + name: "preflight matches example1 rule headers case is incorrect", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sent lowercase, here we test what happens when they are not. + "Access-Control-Request-Headers": "X-Another-Header,X-Could-Be-Anything", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight matches example1 rule headers are not sorted", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.example1.com", + "Access-Control-Request-Method": "PUT", + // Fetch standard guarantees that these are sorted, test what happens when they are not. + "Access-Control-Request-Headers": "a-customer-header,b-should-be-last", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Origin": "http://www.example1.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "a-customer-header,b-should-be-last", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "3600", + "Content-Length": "0", + // S3 returns the following headers, MinIO follows fetch spec and does not: + // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id", + }, + }, + { + name: "preflight with case sensitivity in origin matches uppercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "DELETE", + "Access-Control-Allow-Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Allow-Headers": "", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + }, + }, + { + name: "preflight with case sensitivity in origin does not match when lowercase", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://uppercaseexample.com", + "Access-Control-Request-Method": "DELETE", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight match upper case with unknown header but no header restrictions", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://UPPERCASEEXAMPLE.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-unknown-1", + }, + wantStatus: http.StatusForbidden, + wantBodyContains: errStrAccessForbidden, + }, + { + name: "preflight for delete request matches multiplemethodstest.com origin and request headers", + method: http.MethodOptions, + url: objectURL, + headers: map[string]string{ + "Origin": "http://multiplemethodstest.com", + "Access-Control-Request-Method": "DELETE", + "Access-Control-Request-Headers": "x-abc-1", + }, + wantStatus: http.StatusOK, + wantHeaders: map[string]string{ + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Origin": "http://multiplemethodstest.com", + "Access-Control-Allow-Headers": "x-abc-1", + "Access-Control-Expose-Headers": "", + "Access-Control-Max-Age": "", + // S3 returns POST, PUT, DELETE here, MinIO does not as spec does not require it. + // "Access-Control-Allow-Methods": "DELETE", + }, + }, + { + name: "delete request goes ahead because cors is only for browsers and does not block on the server side", + method: http.MethodDelete, + url: objectURL, + headers: map[string]string{ + "Origin": "http://www.justrandom.com", + }, + wantStatus: http.StatusNoContent, + }, + } + + for i, test := range testCases { + testName := fmt.Sprintf("%s_%d_%s", testName, i+1, strings.ReplaceAll(test.name, " ", "_")) + + // Apply the CORS rules + if test.applyCorsRules != nil { + corsConfig := &cors.Config{ + CORSRules: test.applyCorsRules, + } + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + } + + // Make request + if test.method != "" && test.url != "" { + req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request creation failed", err) + return + } + req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion) + + for k, v := range test.headers { + req.Header.Set(k, v) + } + resp, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer resp.Body.Close() + + // Check returned status code + if resp.StatusCode != test.wantStatus { + errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + + // Check returned body + if test.wantBodyContains != "" { + body, err := io.ReadAll(resp.Body) + if err != nil { + logError(testName, function, args, startTime, "", "Failed to read response body", err) + return + } + if !strings.Contains(string(body), test.wantBodyContains) { + errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body)) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + } + + // Check returned response headers + for k, v := range test.wantHeaders { + gotVal := resp.Header.Get(k) + if k == "Access-Control-Expose-Headers" { + // MinIO returns this in canonical form, S3 does not. + gotVal = strings.ToLower(gotVal) + v = strings.ToLower(v) + } + // Remove all spaces, S3 adds spaces after CSV values in headers, MinIO does not. + gotVal = strings.ReplaceAll(gotVal, " ", "") + if gotVal != v { + errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal) + logError(testName, function, args, startTime, "", errStr, nil) + return + } + } + } + logSuccess(testName, function, args, startTime) + } + logSuccess(testName, function, args, startTime) +} + +func testCorsSetGetDelete() { + ctx := context.Background() + startTime := time.Now() + testName := getFuncName() + function := "SetBucketCors(bucketName, cors)" + args := map[string]interface{}{ + "bucketName": "", + "cors": "", + } + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + defer cleanupBucket(bucketName, c) + + // Set the CORS rules on the new bucket + corsRules := []cors.Rule{ + { + AllowedOrigin: []string{"http://www.example1.com"}, + AllowedMethod: []string{"PUT"}, + AllowedHeader: []string{"*"}, + }, + { + AllowedOrigin: []string{"http://www.example2.com"}, + AllowedMethod: []string{"POST"}, + AllowedHeader: []string{"X-My-Special-Header"}, + }, + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{"GET"}, + AllowedHeader: []string{"*"}, + }, + } + corsConfig := cors.NewConfig(corsRules) + err = c.SetBucketCors(ctx, bucketName, corsConfig) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + return + } + + // Get the rules and check they match what we set + gotCorsConfig, err := c.GetBucketCors(ctx, bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if !reflect.DeepEqual(corsConfig, gotCorsConfig) { + msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig) + logError(testName, function, args, startTime, "", msg, nil) + return + } + + // Delete the rules + err = c.SetBucketCors(ctx, bucketName, nil) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err) + return + } + + // Get the rules and check they are now empty + gotCorsConfig, err = c.GetBucketCors(ctx, bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) + return + } + if gotCorsConfig != nil { + logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test deleting multiple objects with object retention set in Governance mode +func testRemoveObjects() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveObjects(bucketName, objectsCh, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectPrefix": "", + "recursive": "true", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-129-MB"] + reader := getDataReader("datafile-129-MB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + return + } + + // Replace with smaller... + bufSize = dataFileMap["datafile-10-kB"] + reader = getDataReader("datafile-10-kB") + defer reader.Close() + + _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "Error uploading object", err) + } + + t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) + m := minio.RetentionMode(minio.Governance) + opts := minio.PutObjectRetentionOptions{ + GovernanceBypass: false, + RetainUntilDate: &t, + Mode: &m, + } + err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) + if err != nil { + logError(testName, function, args, startTime, "", "Error setting retention", err) + return + } + + objectsCh := make(chan minio.ObjectInfo) + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh <- object + } + }() + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { + // Error is expected here because Retention is set on the object + // and RemoveObjects is called without Bypass Governance + if rErr.Err == nil { + logError(testName, function, args, startTime, "", "Expected error during deletion", nil) + return + } + } + + objectsCh1 := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh1) + // List all objects from a bucket-name with a matching prefix. + for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { + if object.Err != nil { + logError(testName, function, args, startTime, "", "Error listing objects", object.Err) + return + } + objectsCh1 <- object + } + }() + + opts1 := minio.RemoveObjectsOptions{ + GovernanceBypass: true, + } + + for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { + // Error is not expected here because Retention is set on the object + // and RemoveObjects is called with Bypass Governance + logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test get bucket tags +func testGetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server failed", err) + return + } + + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test setting tags for bucket +func testSetBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "SetBucketTagging(bucketName, tags)" + args := map[string]interface{}{ + "bucketName": "", + "tags": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + args["tags"] = t.String() + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Test removing bucket tags +func testRemoveBucketTagging() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "RemoveBucketTagging(bucketName)" + args := map[string]interface{}{ + "bucketName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + tag := randString(60, rand.NewSource(time.Now().UnixNano()), "") + expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + t, err := tags.MapToBucketTags(map[string]string{ + tag: expectedValue, + }) + if err != nil { + logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err) + return + } + + err = c.SetBucketTagging(context.Background(), bucketName, t) + if err != nil { + logError(testName, function, args, startTime, "", "SetBucketTagging failed", err) + return + } + + tagging, err := c.GetBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "GetBucketTagging failed", err) + return + } + + if tagging.ToMap()[tag] != expectedValue { + msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue) + logError(testName, function, args, startTime, "", msg, err) + return + } + + err = c.RemoveBucketTagging(context.Background(), bucketName) + if err != nil { + logError(testName, function, args, startTime, "", "RemoveBucketTagging failed", err) + return + } + + _, err = c.GetBucketTagging(context.Background(), bucketName) + if minio.ToErrorResponse(err).Code != "NoSuchTagSet" { + logError(testName, function, args, startTime, "", "Invalid error from server", err) + return + } + + // Delete all objects and buckets + if err = cleanupVersionedBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "CleanupBucket failed", err) + return + } + + logSuccess(testName, function, args, startTime) +} + +// Convert string to bool and always return false if any error +func mustParseBool(str string) bool { + b, err := strconv.ParseBool(str) + if err != nil { + return false + } + return b +} + +func main() { + slog.SetDefault(slog.New(slog.NewJSONHandler( + os.Stdout, + &slog.HandlerOptions{ + Level: slog.LevelInfo, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.MessageKey || a.Value.String() == "" { + return slog.Attr{} + } + + return a + }, + }, + ))) + + tls := mustParseBool(os.Getenv(enableHTTPS)) + kms := mustParseBool(os.Getenv(enableKMS)) + if os.Getenv(enableKMS) == "" { + // Default to KMS tests. + kms = true + } + + // execute tests + if isFullMode() { + testCorsSetGetDelete() + testCors() + testListMultipartUpload() + testGetObjectAttributes() + testGetObjectAttributesErrorCases() + testMakeBucketErrorV2() + testGetObjectClosedTwiceV2() + testFPutObjectV2() + testMakeBucketRegionsV2() + testGetObjectReadSeekFunctionalV2() + testGetObjectReadAtFunctionalV2() + testGetObjectRanges() + testCopyObjectV2() + testFunctionalV2() + testComposeObjectErrorCasesV2() + testCompose10KSourcesV2() + testUserMetadataCopyingV2() + testPutObjectWithChecksums() + testPutObjectWithTrailingChecksums() + testPutMultipartObjectWithChecksums(false) + testPutMultipartObjectWithChecksums(true) + testPutObject0ByteV2() + testPutObjectNoLengthV2() + testPutObjectsUnknownV2() + testGetObjectContextV2() + testFPutObjectContextV2() + testFGetObjectContextV2() + testPutObjectContextV2() + testPutObjectWithVersioning() + testMakeBucketError() + testMakeBucketRegions() + testPutObjectWithMetadata() + testPutObjectReadAt() + testPutObjectStreaming() + testGetObjectSeekEnd() + testGetObjectClosedTwice() + testGetObjectS3Zip() + testRemoveMultipleObjects() + testRemoveMultipleObjectsWithResult() + testFPutObjectMultipart() + testFPutObject() + testGetObjectReadSeekFunctional() + testGetObjectReadAtFunctional() + testGetObjectReadAtWhenEOFWasReached() + testPresignedPostPolicy() + testCopyObject() + testComposeObjectErrorCases() + testCompose10KSources() + testUserMetadataCopying() + testBucketNotification() + testFunctional() + testGetObjectModified() + testPutObjectUploadSeekedObject() + testGetObjectContext() + testFPutObjectContext() + testFGetObjectContext() + testGetObjectACLContext() + testPutObjectContext() + testStorageClassMetadataPutObject() + testStorageClassInvalidMetadataPutObject() + testStorageClassMetadataCopyObject() + testPutObjectWithContentLanguage() + testListObjects() + testRemoveObjects() + testListObjectVersions() + testStatObjectWithVersioning() + testGetObjectWithVersioning() + testCopyObjectWithVersioning() + testConcurrentCopyObjectWithVersioning() + testComposeObjectWithVersioning() + testRemoveObjectWithVersioning() + testRemoveObjectsWithVersioning() + testObjectTaggingWithVersioning() + testTrailingChecksums() + testPutObjectWithAutomaticChecksums() + testGetBucketTagging() + testSetBucketTagging() + testRemoveBucketTagging() + + // SSE-C tests will only work over TLS connection. + if tls { + testGetObjectAttributesSSECEncryption() + testSSECEncryptionPutGet() + testSSECEncryptionFPut() + testSSECEncryptedGetObjectReadAtFunctional() + testSSECEncryptedGetObjectReadSeekFunctional() + testEncryptedCopyObjectV2() + testEncryptedSSECToSSECCopyObject() + testEncryptedSSECToUnencryptedCopyObject() + testUnencryptedToSSECCopyObject() + testUnencryptedToUnencryptedCopyObject() + testEncryptedEmptyObject() + testDecryptedCopyObject() + testSSECEncryptedToSSECCopyObjectPart() + testSSECMultipartEncryptedToSSECCopyObjectPart() + testSSECEncryptedToUnencryptedCopyPart() + testUnencryptedToSSECCopyObjectPart() + testUnencryptedToUnencryptedCopyPart() + testEncryptedSSECToSSES3CopyObject() + testEncryptedSSES3ToSSECCopyObject() + testSSECEncryptedToSSES3CopyObjectPart() + testSSES3EncryptedToSSECCopyObjectPart() + } + + // KMS tests + if kms { + testSSES3EncryptionPutGet() + testSSES3EncryptionFPut() + testSSES3EncryptedGetObjectReadAtFunctional() + testSSES3EncryptedGetObjectReadSeekFunctional() + testEncryptedSSES3ToSSES3CopyObject() + testEncryptedSSES3ToUnencryptedCopyObject() + testUnencryptedToSSES3CopyObject() + testUnencryptedToSSES3CopyObjectPart() + testSSES3EncryptedToUnencryptedCopyPart() + testSSES3EncryptedToSSES3CopyObjectPart() + } + } else { + testFunctional() + testFunctionalV2() + } +} diff --git a/vendor/github.com/minio/minio-go/v7/hook-reader.go b/vendor/github.com/minio/minio-go/v7/hook-reader.go new file mode 100644 index 000000000..07bc7dbcf --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/hook-reader.go @@ -0,0 +1,101 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" + "sync" +) + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + mu sync.RWMutex + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + hr.mu.Lock() + defer hr.mu.Unlock() + + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + n, err = sourceSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + } + + if hr.hook != nil { + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + var m int64 + m, err = hookSeeker.Seek(offset, whence) + if err != nil { + return 0, err + } + if n != m { + return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) + } + } + } + + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + hr.mu.RLock() + defer hr.mu.RUnlock() + + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + if hr.hook != nil { + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return &hookReader{source: source} + } + return &hookReader{ + source: source, + hook: hook, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go new file mode 100644 index 000000000..e71864ee9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go @@ -0,0 +1,91 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cors + +import ( + "encoding/xml" + "fmt" + "io" + "strings" + + "github.com/dustin/go-humanize" +) + +const defaultXMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" + +// Config is the container for a CORS configuration for a bucket. +type Config struct { + XMLNS string `xml:"xmlns,attr,omitempty"` + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []Rule `xml:"CORSRule"` +} + +// Rule is a single rule in a CORS configuration. +type Rule struct { + AllowedHeader []string `xml:"AllowedHeader,omitempty"` + AllowedMethod []string `xml:"AllowedMethod,omitempty"` + AllowedOrigin []string `xml:"AllowedOrigin,omitempty"` + ExposeHeader []string `xml:"ExposeHeader,omitempty"` + ID string `xml:"ID,omitempty"` + MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"` +} + +// NewConfig creates a new CORS configuration with the given rules. +func NewConfig(rules []Rule) *Config { + return &Config{ + XMLNS: defaultXMLNS, + XMLName: xml.Name{ + Local: "CORSConfiguration", + Space: defaultXMLNS, + }, + CORSRules: rules, + } +} + +// ParseBucketCorsConfig parses a CORS configuration in XML from an io.Reader. +func ParseBucketCorsConfig(reader io.Reader) (*Config, error) { + var c Config + + // Max size of cors document is 64KiB according to https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html + // This limiter is just for safety so has a max of 128KiB + err := xml.NewDecoder(io.LimitReader(reader, 128*humanize.KiByte)).Decode(&c) + if err != nil { + return nil, fmt.Errorf("decoding xml: %w", err) + } + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + for i, rule := range c.CORSRules { + for j, method := range rule.AllowedMethod { + c.CORSRules[i].AllowedMethod[j] = strings.ToUpper(method) + } + } + return &c, nil +} + +// ToXML marshals the CORS configuration to XML. +func (c Config) ToXML() ([]byte, error) { + if c.XMLNS == "" { + c.XMLNS = defaultXMLNS + } + data, err := xml.Marshal(&c) + if err != nil { + return nil, fmt.Errorf("marshaling xml: %w", err) + } + return append([]byte(xml.Header), data...), nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go new file mode 100644 index 000000000..d245bc07a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -0,0 +1,243 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" +) + +// AssumeRoleResponse contains the result of successful AssumeRole request. +type AssumeRoleResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` + + Result AssumeRoleResult `xml:"AssumeRoleResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// AssumeRoleResult - Contains the response to a successful AssumeRole +// request, including temporary credentials that can be used to make +// MinIO API requests. +type AssumeRoleResult struct { + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize int `xml:",omitempty"` +} + +// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSAssumeRole struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // STS endpoint to fetch STS credentials. + STSEndpoint string + + // various options for this request. + Options STSAssumeRoleOptions +} + +// STSAssumeRoleOptions collection of various input options +// to obtain AssumeRole credentials. +type STSAssumeRoleOptions struct { + // Mandatory inputs. + AccessKey string + SecretKey string + + SessionToken string // Optional if the first request is made with temporary credentials. + Policy string // Optional to assign a policy to the assumed role + + Location string // Optional commonly needed with AWS STS. + DurationSeconds int // Optional defaults to 1 hour. + + // Optional only valid if using with AWS STS + RoleARN string + RoleSessionName string + ExternalID string +} + +// NewSTSAssumeRole returns a pointer to a new +// Credentials object wrapping the STSAssumeRole. +func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if opts.AccessKey == "" || opts.SecretKey == "" { + return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") + } + return New(&STSAssumeRole{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + Options: opts, + }), nil +} + +const defaultDurationSeconds = 3600 + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { + v := url.Values{} + v.Set("Action", "AssumeRole") + v.Set("Version", STSVersion) + if opts.RoleARN != "" { + v.Set("RoleArn", opts.RoleARN) + } + if opts.RoleSessionName != "" { + v.Set("RoleSessionName", opts.RoleSessionName) + } + if opts.DurationSeconds > defaultDurationSeconds { + v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) + } else { + v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) + } + if opts.Policy != "" { + v.Set("Policy", opts.Policy) + } + if opts.ExternalID != "" { + v.Set("ExternalId", opts.ExternalID) + } + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleResponse{}, err + } + u.Path = "/" + + postBody := strings.NewReader(v.Encode()) + hash := sha256.New() + if _, err = io.Copy(hash, postBody); err != nil { + return AssumeRoleResponse{}, err + } + postBody.Seek(0, 0) + + req, err := http.NewRequest(http.MethodPost, u.String(), postBody) + if err != nil { + return AssumeRoleResponse{}, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) + if opts.SessionToken != "" { + req.Header.Set("X-Amz-Security-Token", opts.SessionToken) + } + req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleResponse{}, err + } + defer closeResponse(resp) + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleResponse{}, errResp + } + + a := AssumeRoleResponse{} + if _, err = xmlDecodeAndBody(resp.Body, &a); err != nil { + return AssumeRoleResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go new file mode 100644 index 000000000..ddccfb173 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -0,0 +1,88 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample new file mode 100644 index 000000000..d793c9e0e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.min.io", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go new file mode 100644 index 000000000..68f9b3815 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -0,0 +1,196 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +const ( + // STSVersion sts version string + STSVersion = "2011-06-15" + + // How much duration to slash from the given expiration duration + defaultExpiryWindow = 0.8 +) + +// A Value is the S3 credentials value for individual credential fields. +type Value struct { + // S3 Access key ID + AccessKeyID string + + // S3 Secret Access Key + SecretAccessKey string + + // S3 Session Token + SessionToken string + + // Expiration of this credentials - null means no expiration associated + Expiration time.Time + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + cut := window + if cut < 0 { + expireIn := expiration.Sub(e.CurrentTime()) + cut = time.Duration(float64(expireIn) * (1 - defaultExpiryWindow)) + } + e.expiration = expiration.Add(-cut) +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + if c == nil { + return Value{}, nil + } + + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json new file mode 100644 index 000000000..afbfad559 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.json @@ -0,0 +1,7 @@ +{ + "Version": 1, + "SessionToken": "token", + "AccessKeyId": "accessKey", + "SecretAccessKey": "secret", + "Expiration": "9999-04-27T16:02:25.000Z" +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample new file mode 100644 index 000000000..e2dc1bfec --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample @@ -0,0 +1,15 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret + +[with_process] +credential_process = /bin/cat credentials.json diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go new file mode 100644 index 000000000..fbfb10549 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go @@ -0,0 +1,60 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// # Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +package credentials diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go new file mode 100644 index 000000000..b6e60d0e1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go new file mode 100644 index 000000000..5bfeab140 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go @@ -0,0 +1,68 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +// * Access Key ID: MINIO_ROOT_USER. +// * Secret Access Key: MINIO_ROOT_PASSWORD. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ROOT_USER") + secret := os.Getenv("MINIO_ROOT_PASSWORD") + + signerType := SignatureV4 + if id == "" || secret == "" { + id = os.Getenv("MINIO_ACCESS_KEY") + secret = os.Getenv("MINIO_SECRET_KEY") + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go new file mode 100644 index 000000000..07a9c2f09 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/error_response.go @@ -0,0 +1,95 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2021 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" +) + +// ErrorResponse - Is the typed error returned. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ ErrorResponse" json:"-"` + STSError struct { + Type string `xml:"Type"` + Code string `xml:"Code"` + Message string `xml:"Message"` + } `xml:"Error"` + RequestID string `xml:"RequestId"` +} + +// Error - Is the typed error returned by all API operations. +type Error struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + Resource string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Captures the server string returned in response header. + Server string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// Error - Returns S3 error string. +func (e Error) Error() string { + if e.Message == "" { + return fmt.Sprintf("Error response code %s.", e.Code) + } + return e.Message +} + +// Error - Returns STS error string. +func (e ErrorResponse) Error() string { + if e.STSError.Message == "" { + return fmt.Sprintf("Error response code %s.", e.STSError.Code) + } + return e.STSError.Message +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// xmlDecodeAndBody reads the whole body up to 1MB and +// tries to XML decode it into v. +// The body that was read and any error from reading or decoding is returned. +func xmlDecodeAndBody(bodyReader io.Reader, v interface{}) ([]byte, error) { + // read the whole body (up to 1MB) + const maxBodyLength = 1 << 20 + body, err := io.ReadAll(io.LimitReader(bodyReader, maxBodyLength)) + if err != nil { + return nil, err + } + return bytes.TrimSpace(body), xmlDecoder(bytes.NewReader(body), v) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go new file mode 100644 index 000000000..541e1a72f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,158 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "errors" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/go-ini/ini" +) + +// A externalProcessCredentials stores the output of a credential_process +type externalProcessCredentials struct { + Version int + SessionToken string + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + Expiration time.Time +} + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + Expiry + + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename, profile string) *Credentials { + return New(&FileAWSCredentials{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.Filename == "" { + p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.Filename == "" { + homeDir, err := os.UserHomeDir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + if p.Profile == "" { + p.Profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.Filename, p.Profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + // If credential_process is defined, obtain credentials by executing + // the external process + credentialProcess := strings.TrimSpace(iniProfile.Key("credential_process").String()) + if credentialProcess != "" { + args := strings.Fields(credentialProcess) + if len(args) <= 1 { + return Value{}, errors.New("invalid credential process args") + } + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.Output() + if err != nil { + return Value{}, err + } + var externalProcessCredentials externalProcessCredentials + err = json.Unmarshal([]byte(out), &externalProcessCredentials) + if err != nil { + return Value{}, err + } + p.retrieved = true + p.SetExpiration(externalProcessCredentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: externalProcessCredentials.AccessKeyID, + SecretAccessKey: externalProcessCredentials.SecretAccessKey, + SessionToken: externalProcessCredentials.SessionToken, + Expiration: externalProcessCredentials.Expiration, + SignerType: SignatureV4, + }, nil + } + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go new file mode 100644 index 000000000..750e26ffa --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -0,0 +1,137 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + "runtime" + + "github.com/goccy/go-json" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + Filename string + + // MinIO Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "s3" if + // environment variable is also not set. + Alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename, alias string) *Credentials { + return New(&FileMinioClient{ + Filename: filename, + Alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.Filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.Filename = value + } else { + homeDir, err := os.UserHomeDir() + if err != nil { + return Value{}, err + } + p.Filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.Filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.Alias == "" { + p.Alias = os.Getenv("MINIO_ALIAS") + if p.Alias == "" { + p.Alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.Filename, p.Alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` + Aliases map[string]hostConfig `json:"aliases"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := os.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + + if cfg.Version == "10" { + return cfg.Aliases[alias], nil + } + + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go new file mode 100644 index 000000000..ea4b3ef93 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -0,0 +1,456 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "strings" + "time" + + "github.com/goccy/go-json" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +// DefaultExpiryWindow can be used as parameter to (*Expiry).SetExpiration. +// When used the tokens refresh will be triggered when 80% of the elapsed +// time until the actual expiration time is passed. +const DefaultExpiryWindow = -1 + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + Endpoint string + + // Region configurable custom region for STS + Region string + + // Support for container authorization token https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html + Container struct { + AuthorizationToken string + AuthorizationTokenFile string + CredentialsFullURI string + CredentialsRelativeURI string + } + + // EKS based k8s RBAC authorization - https://docs.aws.amazon.com/eks/latest/userguide/pod-configuration.html + EKSIdentity struct { + TokenFile string + RoleARN string + RoleSessionName string + } +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + DefaultIAMRoleEndpoint = "http://169.254.169.254" + DefaultECSRoleEndpoint = "http://169.254.170.2" + DefaultSTSRoleEndpoint = "https://sts.amazonaws.com" + DefaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" + TokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" + TokenPath = "/latest/api/token" + TokenTTL = "21600" + TokenRequestHeader = "X-aws-ec2-metadata-token" +) + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + return New(&IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + Endpoint: endpoint, + }) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") + if token == "" { + token = m.Container.AuthorizationToken + } + + tokenFile := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE") + if tokenFile == "" { + tokenFile = m.Container.AuthorizationToken + } + + relativeURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if relativeURI == "" { + relativeURI = m.Container.CredentialsRelativeURI + } + + fullURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") + if fullURI == "" { + fullURI = m.Container.CredentialsFullURI + } + + identityFile := os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE") + if identityFile == "" { + identityFile = m.EKSIdentity.TokenFile + } + + roleArn := os.Getenv("AWS_ROLE_ARN") + if roleArn == "" { + roleArn = m.EKSIdentity.RoleARN + } + + roleSessionName := os.Getenv("AWS_ROLE_SESSION_NAME") + if roleSessionName == "" { + roleSessionName = m.EKSIdentity.RoleSessionName + } + + region := os.Getenv("AWS_REGION") + if region == "" { + region = m.Region + } + + var roleCreds ec2RoleCredRespBody + var err error + + endpoint := m.Endpoint + switch { + case identityFile != "": + if len(endpoint) == 0 { + if region != "" { + if strings.HasPrefix(region, "cn-") { + endpoint = "https://sts." + region + ".amazonaws.com.cn" + } else { + endpoint = "https://sts." + region + ".amazonaws.com" + } + } else { + endpoint = DefaultSTSRoleEndpoint + } + } + + creds := &STSWebIdentity{ + Client: m.Client, + STSEndpoint: endpoint, + GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { + token, err := os.ReadFile(identityFile) + if err != nil { + return nil, err + } + + return &WebIdentityToken{Token: string(token)}, nil + }, + RoleARN: roleArn, + roleSessionName: roleSessionName, + } + + stsWebIdentityCreds, err := creds.Retrieve() + if err == nil { + m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) + } + return stsWebIdentityCreds, err + + case relativeURI != "": + if len(endpoint) == 0 { + endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI) + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + case tokenFile != "" && fullURI != "": + endpoint = fullURI + roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile) + + case fullURI != "": + if len(endpoint) == 0 { + endpoint = fullURI + var ok bool + if ok, err = isLoopback(endpoint); !ok { + if err == nil { + err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) + } + break + } + } + + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + + default: + roleCreds, err = getCredentials(m.Client, endpoint) + } + + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Expiration: roleCreds.Expiration, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = DefaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + if token != "" { + req.Header.Add(TokenRequestHeader, token) + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint, token string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest(http.MethodGet, endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if token != "" { + req.Header.Set("Authorization", token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +func getEKSPodIdentityCredentials(client *http.Client, endpoint string, tokenFile string) (ec2RoleCredRespBody, error) { + if tokenFile != "" { + bytes, err := os.ReadFile(tokenFile) + if err != nil { + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: failed to read token file:%s", err) + } + token := string(bytes) + return getEcsTaskCredentials(client, endpoint, token) + } + return ec2RoleCredRespBody{}, fmt.Errorf("getEKSPodIdentityCredentials: no tokenFile found") +} + +func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint+TokenPath, nil) + if err != nil { + return "", err + } + req.Header.Add(TokenRequestTTLHeader, TokenTTL) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", errors.New(resp.Status) + } + return string(data), nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + if endpoint == "" { + endpoint = DefaultIAMRoleEndpoint + } + + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + token, err := fetchIMDSToken(client, endpoint) + if err != nil { + // Return only errors for valid situations, if the IMDSv2 is not enabled + // we will not be able to get the token, in such a situation we have + // to rely on IMDSv1 behavior as a fallback, this check ensures that. + // Refer https://github.com/minio/minio-go/issues/1866 + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + return ec2RoleCredRespBody{}, err + } + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u, token) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + if token != "" { + req.Header.Add(TokenRequestHeader, token) + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} + +// isLoopback identifies if a uri's host is on a loopback address +func isLoopback(uri string) (bool, error) { + u, err := url.Parse(uri) + if err != nil { + return false, err + } + + host := u.Hostname() + if len(host) == 0 { + return false, fmt.Errorf("can't parse host from uri: %s", uri) + } + + ips, err := net.LookupHost(host) + if err != nil { + return false, err + } + for _, ip := range ips { + if !net.ParseIP(ip).IsLoopback() { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go new file mode 100644 index 000000000..b79433305 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature_type.go @@ -0,0 +1,77 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go new file mode 100644 index 000000000..7dde00b0a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go new file mode 100644 index 000000000..62bfbb6b0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -0,0 +1,183 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make MinIO API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + Token string + Expiry int +} + +// A STSClientGrants retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // MinIO endpoint to fetch STS credentials. + STSEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error), +) (AssumeRoleWithClientGrantsResponse, error) { + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token) + v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithClientGrantsResponse{}, errResp + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go new file mode 100644 index 000000000..75e1a77d3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -0,0 +1,147 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// CustomTokenResult - Contains temporary creds and user metadata. +type CustomTokenResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId"` + SecretKey string `xml:"SecretAccessKey"` + Expiration time.Time `xml:"Expiration"` + SessionToken string `xml:"SessionToken"` + } `xml:",omitempty"` + + AssumedUser string `xml:",omitempty"` +} + +// AssumeRoleWithCustomTokenResponse contains the result of a successful +// AssumeRoleWithCustomToken request. +type AssumeRoleWithCustomTokenResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCustomTokenResponse" json:"-"` + Result CustomTokenResult `xml:"AssumeRoleWithCustomTokenResult"` + Metadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// CustomTokenIdentity - satisfies the Provider interface, and retrieves +// credentials from MinIO using the AssumeRoleWithCustomToken STS API. +type CustomTokenIdentity struct { + Expiry + + Client *http.Client + + // MinIO server STS endpoint to fetch STS credentials. + STSEndpoint string + + // The custom token to use with the request. + Token string + + // RoleArn associated with the identity + RoleArn string + + // RequestedExpiry is to set the validity of the generated credentials + // (this value bounded by server). + RequestedExpiry time.Duration +} + +// Retrieve - to satisfy Provider interface; fetches credentials from MinIO. +func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { + u, err := url.Parse(c.STSEndpoint) + if err != nil { + return value, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithCustomToken") + v.Set("Version", STSVersion) + v.Set("RoleArn", c.RoleArn) + v.Set("Token", c.Token) + if c.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(c.RequestedExpiry.Seconds()))) + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return value, err + } + + resp, err := c.Client.Do(req) + if err != nil { + return value, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return value, errors.New(resp.Status) + } + + r := AssumeRoleWithCustomTokenResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return + } + + cr := r.Result.Credentials + c.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + Expiration: cr.Expiration, + SignerType: SignatureV4, + }, nil +} + +// NewCustomTokenCredentials - returns credentials using the +// AssumeRoleWithCustomToken STS API. +func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) { + c := CustomTokenIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + Token: token, + RoleArn: roleArn, + } + for _, optFunc := range optFuncs { + optFunc(&c) + } + return New(&c), nil +} + +// CustomTokenOpt is a function type to configure the custom-token based +// credentials using NewCustomTokenCredentials. +type CustomTokenOpt func(*CustomTokenIdentity) + +// CustomTokenValidityOpt sets the validity duration of the requested +// credentials. This value is ignored if the server enforces a lower validity +// period. +func CustomTokenValidityOpt(d time.Duration) CustomTokenOpt { + return func(c *CustomTokenIdentity) { + c.RequestedExpiry = d + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go new file mode 100644 index 000000000..b8df289f2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -0,0 +1,190 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// AssumeRoleWithLDAPResponse contains the result of successful +// AssumeRoleWithLDAPIdentity request +type AssumeRoleWithLDAPResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` + Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// LDAPIdentityResult - contains credentials for a successful +// AssumeRoleWithLDAPIdentity request. +type LDAPIdentityResult struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + + SubjectFromToken string `xml:",omitempty"` +} + +// LDAPIdentity retrieves credentials from MinIO +type LDAPIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // LDAP username/password used to fetch LDAP STS credentials. + LDAPUsername, LDAPPassword string + + // Session policy to apply to the generated credentials. Leave empty to + // use the full access policy available to the user. + Policy string + + // RequestedExpiry is the configured expiry duration for credentials + // requested from LDAP. + RequestedExpiry time.Duration +} + +// NewLDAPIdentity returns new credentials object that uses LDAP +// Identity. +func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { + l := LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + } + for _, optFunc := range optFuncs { + optFunc(&l) + } + return New(&l), nil +} + +// LDAPIdentityOpt is a function type used to configured the LDAPIdentity +// instance. +type LDAPIdentityOpt func(*LDAPIdentity) + +// LDAPIdentityPolicyOpt sets the session policy for requested credentials. +func LDAPIdentityPolicyOpt(policy string) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.Policy = policy + } +} + +// LDAPIdentityExpiryOpt sets the expiry duration for requested credentials. +func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { + return func(k *LDAPIdentity) { + k.RequestedExpiry = d + } +} + +// NewLDAPIdentityWithSessionPolicy returns new credentials object that uses +// LDAP Identity with a specified session policy. The `policy` parameter must be +// a JSON string specifying the policy document. +// +// Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. +func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { + return New(&LDAPIdentity{ + Client: &http.Client{Transport: http.DefaultTransport}, + STSEndpoint: stsEndpoint, + LDAPUsername: ldapUsername, + LDAPPassword: ldapPassword, + Policy: policy, + }), nil +} + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + u, err := url.Parse(k.STSEndpoint) + if err != nil { + return value, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithLDAPIdentity") + v.Set("Version", STSVersion) + v.Set("LDAPUsername", k.LDAPUsername) + v.Set("LDAPPassword", k.LDAPPassword) + if k.Policy != "" { + v.Set("Policy", k.Policy) + } + if k.RequestedExpiry != 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", int(k.RequestedExpiry.Seconds()))) + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return value, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := k.Client.Do(req) + if err != nil { + return value, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return value, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return value, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return value, errResp + } + + r := AssumeRoleWithLDAPResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { + return + } + + cr := r.Result.Credentials + k.SetExpiration(cr.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: cr.AccessKey, + SecretAccessKey: cr.SecretKey, + SessionToken: cr.SessionToken, + Expiration: cr.Expiration, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go new file mode 100644 index 000000000..10083502d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -0,0 +1,212 @@ +// MinIO Go Library for Amazon S3 Compatible Cloud Storage +// Copyright 2021 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package credentials + +import ( + "bytes" + "crypto/tls" + "encoding/xml" + "errors" + "io" + "net" + "net/http" + "net/url" + "strconv" + "time" +) + +// CertificateIdentityOption is an optional AssumeRoleWithCertificate +// parameter - e.g. a custom HTTP transport configuration or S3 credental +// livetime. +type CertificateIdentityOption func(*STSCertificateIdentity) + +// CertificateIdentityWithTransport returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given http.RoundTripper. +func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) +} + +// CertificateIdentityWithExpiry returns a CertificateIdentityOption that +// customizes the STSCertificateIdentity with the given livetime. +// +// Fetched S3 credentials will have the given livetime if the STS server +// allows such credentials. +func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOption { + return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.S3CredentialLivetime = livetime }) +} + +// A STSCertificateIdentity retrieves S3 credentials from the MinIO STS API and +// rotates those credentials once they expire. +type STSCertificateIdentity struct { + Expiry + + // STSEndpoint is the base URL endpoint of the STS API. + // For example, https://minio.local:9000 + STSEndpoint string + + // S3CredentialLivetime is the duration temp. S3 access + // credentials should be valid. + // + // It represents the access credential livetime requested + // by the client. The STS server may choose to issue + // temp. S3 credentials that have a different - usually + // shorter - livetime. + // + // The default livetime is one hour. + S3CredentialLivetime time.Duration + + // Client is the HTTP client used to authenticate and fetch + // S3 credentials. + // + // A custom TLS client configuration can be specified by + // using a custom http.Transport: + // Client: http.Client { + // Transport: &http.Transport{ + // TLSClientConfig: &tls.Config{}, + // }, + // } + Client http.Client +} + +var _ Provider = (*STSWebIdentity)(nil) // compiler check + +// NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates +// to the given STS endpoint with the given TLS certificate and retrieves and +// rotates S3 credentials. +func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { + if endpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if _, err := url.Parse(endpoint); err != nil { + return nil, err + } + identity := &STSCertificateIdentity{ + STSEndpoint: endpoint, + Client: http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 5 * time.Second, + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{certificate}, + }, + }, + }, + } + for _, option := range options { + option(identity) + } + return New(identity), nil +} + +// Retrieve fetches a new set of S3 credentials from the configured +// STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + endpointURL, err := url.Parse(i.STSEndpoint) + if err != nil { + return Value{}, err + } + livetime := i.S3CredentialLivetime + if livetime == 0 { + livetime = 1 * time.Hour + } + + queryValues := url.Values{} + queryValues.Set("Action", "AssumeRoleWithCertificate") + queryValues.Set("Version", STSVersion) + endpointURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest(http.MethodPost, endpointURL.String(), nil) + if err != nil { + return Value{}, err + } + if req.Form == nil { + req.Form = url.Values{} + } + req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) + + resp, err := i.Client.Do(req) + if err != nil { + return Value{}, err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return Value{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return Value{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return Value{}, errResp + } + + const MaxSize = 10 * 1 << 20 + var body io.Reader = resp.Body + if resp.ContentLength > 0 && resp.ContentLength < MaxSize { + body = io.LimitReader(body, resp.ContentLength) + } else { + body = io.LimitReader(body, MaxSize) + } + + var response assumeRoleWithCertificateResponse + if err = xml.NewDecoder(body).Decode(&response); err != nil { + return Value{}, err + } + i.SetExpiration(response.Result.Credentials.Expiration, DefaultExpiryWindow) + return Value{ + AccessKeyID: response.Result.Credentials.AccessKey, + SecretAccessKey: response.Result.Credentials.SecretKey, + SessionToken: response.Result.Credentials.SessionToken, + Expiration: response.Result.Credentials.Expiration, + SignerType: SignatureDefault, + }, nil +} + +// Expiration returns the expiration time of the current S3 credentials. +func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } + +type assumeRoleWithCertificateResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithCertificateResponse" json:"-"` + Result struct { + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:"Credentials" json:"credentials,omitempty"` + } `xml:"AssumeRoleWithCertificateResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go new file mode 100644 index 000000000..596d95152 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -0,0 +1,206 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make MinIO API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + Token string + AccessToken string + Expiry int +} + +// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Required http Client to use when connecting to MinIO STS service. + Client *http.Client + + // Exported STS endpoint to fetch STS credentials. + STSEndpoint string + + // Exported GetWebIDTokenExpiry function which returns ID + // tokens from IDP. This function should return two values + // one is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + GetWebIDTokenExpiry func() (*WebIdentityToken, error) + + // RoleARN is the Amazon Resource Name (ARN) of the role that the caller is + // assuming. + RoleARN string + + // roleSessionName is the identifier for the assumed role session. + roleSessionName string +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + return New(&STSWebIdentity{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + STSEndpoint: stsEndpoint, + GetWebIDTokenExpiry: getWebIDTokenExpiry, + }), nil +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, + getWebIDTokenExpiry func() (*WebIdentityToken, error), +) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + if len(roleARN) > 0 { + v.Set("RoleArn", roleARN) + + if len(roleSessionName) == 0 { + roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) + } + v.Set("RoleSessionName", roleSessionName) + } + v.Set("WebIdentityToken", idToken.Token) + if idToken.AccessToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityAccessToken", idToken.AccessToken) + } + if idToken.Expiry > 0 { + v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) + } + v.Set("Version", STSVersion) + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var errResp ErrorResponse + buf, err := io.ReadAll(resp.Body) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + _, err = xmlDecodeAndBody(bytes.NewReader(buf), &errResp) + if err != nil { + var s3Err Error + if _, err = xmlDecodeAndBody(bytes.NewReader(buf), &s3Err); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + errResp.RequestID = s3Err.RequestID + errResp.STSError.Code = s3Err.Code + errResp.STSError.Message = s3Err.Message + } + return AssumeRoleWithWebIdentityResponse{}, errResp + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.GetWebIDTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + Expiration: a.Result.Credentials.Expiration, + SignerType: SignatureV4, + }, nil +} + +// Expiration returns the expiration time of the credentials +func (m *STSWebIdentity) Expiration() time.Time { + return m.expiration +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go new file mode 100644 index 000000000..6db26c036 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_disabled.go @@ -0,0 +1,24 @@ +//go:build !fips +// +build !fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = false diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go new file mode 100644 index 000000000..640258242 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/fips_enabled.go @@ -0,0 +1,24 @@ +//go:build fips +// +build fips + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +// FIPS is true if 'fips' build tag was specified. +const FIPS = true diff --git a/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go new file mode 100644 index 000000000..c40e40a1c --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go @@ -0,0 +1,197 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "errors" + "net/http" + + "github.com/goccy/go-json" + "golang.org/x/crypto/argon2" +) + +const ( + // SseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + SseGenericHeader = "X-Amz-Server-Side-Encryption" + + // SseKmsKeyID is the AWS SSE-KMS key id. + SseKmsKeyID = SseGenericHeader + "-Aws-Kms-Key-Id" + // SseEncryptionContext is the AWS SSE-KMS Encryption Context data. + SseEncryptionContext = SseGenericHeader + "-Context" + + // SseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + SseCustomerAlgorithm = SseGenericHeader + "-Customer-Algorithm" + // SseCustomerKey is the AWS SSE-C encryption key HTTP header key. + SseCustomerKey = SseGenericHeader + "-Customer-Key" + // SseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + SseCustomerKeyMD5 = SseGenericHeader + "-Customer-Key-MD5" + + // SseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + SseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // SseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + SseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // SseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + SseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(SseCustomerAlgorithm, "AES256") + h.Set(SseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(SseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(SseCopyCustomerAlgorithm, "AES256") + h.Set(SseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(SseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(SseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(SseGenericHeader, "aws:kms") + if s.key != "" { + h.Set(SseKmsKeyID, s.key) + } + if s.hasContext { + h.Set(SseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go new file mode 100644 index 000000000..e706b57de --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go @@ -0,0 +1,516 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lifecycle contains all the lifecycle related data types and marshallers. +package lifecycle + +import ( + "encoding/json" + "encoding/xml" + "errors" + "time" +) + +var errMissingStorageClass = errors.New("storage-class cannot be empty") + +// AbortIncompleteMultipartUpload structure, not supported yet on MinIO +type AbortIncompleteMultipartUpload struct { + XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` + DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { + return n.DaysAfterInitiation == ExpirationDays(0) +} + +// MarshalXML if days after initiation is set to non-zero value +func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.IsDaysNull() { + return nil + } + type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload + return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) +} + +// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. +// Upon expiration, server permanently deletes the noncurrent object versions. +// Set this lifecycle configuration action on a bucket that has versioning enabled +// (or suspended) to request server delete noncurrent object versions at a +// specific period in the object's lifetime. +type NoncurrentVersionExpiration struct { + XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` +} + +// MarshalXML if n is non-empty, i.e has a non-zero NoncurrentDays or NewerNoncurrentVersions. +func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration + return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionExpiration) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +func (n NoncurrentVersionExpiration) isNull() bool { + return n.IsDaysNull() && n.NewerNoncurrentVersions == 0 +} + +// NoncurrentVersionTransition structure, set this action to request server to +// transition noncurrent object versions to different set storage classes +// at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + NoncurrentDays ExpirationDays `xml:"NoncurrentDays" json:"NoncurrentDays"` + NewerNoncurrentVersions int `xml:"NewerNoncurrentVersions,omitempty" json:"NewerNoncurrentVersions,omitempty"` +} + +// IsDaysNull returns true if days field is null +func (n NoncurrentVersionTransition) IsDaysNull() bool { + return n.NoncurrentDays == ExpirationDays(0) +} + +// IsStorageClassEmpty returns true if storage class field is empty +func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { + return n.StorageClass == "" +} + +func (n NoncurrentVersionTransition) isNull() bool { + return n.StorageClass == "" +} + +// UnmarshalJSON implements NoncurrentVersionTransition JSONify +func (n *NoncurrentVersionTransition) UnmarshalJSON(b []byte) error { + type noncurrentVersionTransition NoncurrentVersionTransition + var nt noncurrentVersionTransition + err := json.Unmarshal(b, &nt) + if err != nil { + return err + } + + if nt.StorageClass == "" { + return errMissingStorageClass + } + *n = NoncurrentVersionTransition(nt) + return nil +} + +// MarshalXML is extended to leave out +// tags +func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if n.isNull() { + return nil + } + type noncurrentVersionTransitionWrapper NoncurrentVersionTransition + return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) +} + +// Tag structure key/value pair representing an object tag to apply lifecycle configuration +type Tag struct { + XMLName xml.Name `xml:"Tag,omitempty" json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Transition structure - transition details of lifecycle configuration +type Transition struct { + XMLName xml.Name `xml:"Transition" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` + Days ExpirationDays `xml:"Days" json:"Days"` +} + +// UnmarshalJSON returns an error if storage-class is empty. +func (t *Transition) UnmarshalJSON(b []byte) error { + type transition Transition + var tr transition + err := json.Unmarshal(b, &tr) + if err != nil { + return err + } + + if tr.StorageClass == "" { + return errMissingStorageClass + } + *t = Transition(tr) + return nil +} + +// MarshalJSON customizes json encoding by omitting empty values +func (t Transition) MarshalJSON() ([]byte, error) { + if t.IsNull() { + return nil, nil + } + type transition struct { + Date *ExpirationDate `json:"Date,omitempty"` + StorageClass string `json:"StorageClass,omitempty"` + Days *ExpirationDays `json:"Days"` + } + + newt := transition{ + StorageClass: t.StorageClass, + } + + if !t.IsDateNull() { + newt.Date = &t.Date + } else { + newt.Days = &t.Days + } + return json.Marshal(newt) +} + +// IsDaysNull returns true if days field is null +func (t Transition) IsDaysNull() bool { + return t.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (t Transition) IsDateNull() bool { + return t.Date.Time.IsZero() +} + +// IsNull returns true if no storage-class is set. +func (t Transition) IsNull() bool { + return t.StorageClass == "" +} + +// MarshalXML is transition is non null +func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if t.IsNull() { + return nil + } + type transitionWrapper Transition + return en.EncodeElement(transitionWrapper(t), startElement) +} + +// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter +type And struct { + XMLName xml.Name `xml:"And" json:"-"` + Prefix string `xml:"Prefix" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag" json:"Tags,omitempty"` + ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` +} + +// IsEmpty returns true if Tags field is null +func (a And) IsEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" && + a.ObjectSizeLessThan == 0 && a.ObjectSizeGreaterThan == 0 +} + +// Filter will be used in selecting rule(s) for lifecycle configuration +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` + ObjectSizeLessThan int64 `xml:"ObjectSizeLessThan,omitempty" json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `xml:"ObjectSizeGreaterThan,omitempty" json:"ObjectSizeGreaterThan,omitempty"` +} + +// IsNull returns true if all Filter fields are empty. +func (f Filter) IsNull() bool { + return f.Tag.IsEmpty() && f.And.IsEmpty() && f.Prefix == "" && + f.ObjectSizeLessThan == 0 && f.ObjectSizeGreaterThan == 0 +} + +// MarshalJSON customizes json encoding by removing empty values. +func (f Filter) MarshalJSON() ([]byte, error) { + type filter struct { + And *And `json:"And,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Tag *Tag `json:"Tag,omitempty"` + ObjectSizeLessThan int64 `json:"ObjectSizeLessThan,omitempty"` + ObjectSizeGreaterThan int64 `json:"ObjectSizeGreaterThan,omitempty"` + } + + newf := filter{ + Prefix: f.Prefix, + } + if !f.Tag.IsEmpty() { + newf.Tag = &f.Tag + } + if !f.And.IsEmpty() { + newf.And = &f.And + } + newf.ObjectSizeLessThan = f.ObjectSizeLessThan + newf.ObjectSizeGreaterThan = f.ObjectSizeGreaterThan + return json.Marshal(newf) +} + +// MarshalXML - produces the xml representation of the Filter struct +// only one of Prefix, And and Tag should be present in the output. +func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := e.EncodeToken(start); err != nil { + return err + } + + switch { + case !f.And.IsEmpty(): + if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { + return err + } + case !f.Tag.IsEmpty(): + if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { + return err + } + default: + if f.ObjectSizeLessThan > 0 { + if err := e.EncodeElement(f.ObjectSizeLessThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeLessThan"}}); err != nil { + return err + } + break + } + if f.ObjectSizeGreaterThan > 0 { + if err := e.EncodeElement(f.ObjectSizeGreaterThan, xml.StartElement{Name: xml.Name{Local: "ObjectSizeGreaterThan"}}); err != nil { + return err + } + break + } + // Print empty Prefix field only when everything else is empty + if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +// ExpirationDays is a type alias to unmarshal Days in Expiration +type ExpirationDays int + +// MarshalXML encodes number of days to expire if it is non-zero and +// encodes empty string otherwise +func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDays == 0 { + return nil + } + return e.EncodeElement(int(eDays), startElement) +} + +// ExpirationDate is a embedded type containing time.Time to unmarshal +// Date in Expiration +type ExpirationDate struct { + time.Time +} + +// MarshalXML encodes expiration date if it is non-zero and encodes +// empty string otherwise +func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if eDate.Time.IsZero() { + return nil + } + return e.EncodeElement(eDate.Format(time.RFC3339), startElement) +} + +// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. +type ExpireDeleteMarker ExpirationBoolean + +// IsEnabled returns true if the auto delete-marker expiration is enabled +func (e ExpireDeleteMarker) IsEnabled() bool { + return bool(e) +} + +// ExpirationBoolean represents an XML version of 'bool' type +type ExpirationBoolean bool + +// MarshalXML encodes delete marker boolean into an XML form. +func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { + if !b { + return nil + } + type booleanWrapper ExpirationBoolean + return e.EncodeElement(booleanWrapper(b), startElement) +} + +// IsEnabled returns true if the expiration boolean is enabled +func (b ExpirationBoolean) IsEnabled() bool { + return bool(b) +} + +// Expiration structure - expiration details of lifecycle configuration +type Expiration struct { + XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` + Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` + Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"` + DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"` +} + +// MarshalJSON customizes json encoding by removing empty day/date specification. +func (e Expiration) MarshalJSON() ([]byte, error) { + type expiration struct { + Date *ExpirationDate `json:"Date,omitempty"` + Days *ExpirationDays `json:"Days,omitempty"` + DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"` + DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"` + } + + newexp := expiration{ + DeleteMarker: e.DeleteMarker, + DeleteAll: e.DeleteAll, + } + if !e.IsDaysNull() { + newexp.Days = &e.Days + } + if !e.IsDateNull() { + newexp.Date = &e.Date + } + return json.Marshal(newexp) +} + +// IsDaysNull returns true if days field is null +func (e Expiration) IsDaysNull() bool { + return e.Days == ExpirationDays(0) +} + +// IsDateNull returns true if date field is null +func (e Expiration) IsDateNull() bool { + return e.Date.Time.IsZero() +} + +// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled +func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { + return e.DeleteMarker.IsEnabled() +} + +// IsNull returns true if both date and days fields are null +func (e Expiration) IsNull() bool { + return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() && !e.DeleteAll.IsEnabled() +} + +// MarshalXML is expiration is non null +func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { + if e.IsNull() { + return nil + } + type expirationWrapper Expiration + return en.EncodeElement(expirationWrapper(e), startElement) +} + +// DelMarkerExpiration represents DelMarkerExpiration actions element in an ILM policy +type DelMarkerExpiration struct { + XMLName xml.Name `xml:"DelMarkerExpiration" json:"-"` + Days int `xml:"Days,omitempty" json:"Days,omitempty"` +} + +// IsNull returns true if Days isn't specified and false otherwise. +func (de DelMarkerExpiration) IsNull() bool { + return de.Days == 0 +} + +// MarshalXML avoids serializing an empty DelMarkerExpiration element +func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if de.IsNull() { + return nil + } + type delMarkerExp DelMarkerExpiration + return enc.EncodeElement(delMarkerExp(de), start) +} + +// MarshalJSON customizes json encoding by omitting empty values +func (r Rule) MarshalJSON() ([]byte, error) { + type rule struct { + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration *Expiration `json:"Expiration,omitempty"` + DelMarkerExpiration *DelMarkerExpiration `json:"DelMarkerExpiration,omitempty"` + ID string `json:"ID"` + RuleFilter *Filter `json:"Filter,omitempty"` + NoncurrentVersionExpiration *NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition *NoncurrentVersionTransition `json:"NoncurrentVersionTransition,omitempty"` + Prefix string `json:"Prefix,omitempty"` + Status string `json:"Status"` + Transition *Transition `json:"Transition,omitempty"` + } + newr := rule{ + Prefix: r.Prefix, + Status: r.Status, + ID: r.ID, + } + + if !r.RuleFilter.IsNull() { + newr.RuleFilter = &r.RuleFilter + } + if !r.AbortIncompleteMultipartUpload.IsDaysNull() { + newr.AbortIncompleteMultipartUpload = &r.AbortIncompleteMultipartUpload + } + if !r.Expiration.IsNull() { + newr.Expiration = &r.Expiration + } + if !r.DelMarkerExpiration.IsNull() { + newr.DelMarkerExpiration = &r.DelMarkerExpiration + } + if !r.Transition.IsNull() { + newr.Transition = &r.Transition + } + if !r.NoncurrentVersionExpiration.isNull() { + newr.NoncurrentVersionExpiration = &r.NoncurrentVersionExpiration + } + if !r.NoncurrentVersionTransition.isNull() { + newr.NoncurrentVersionTransition = &r.NoncurrentVersionTransition + } + + return json.Marshal(newr) +} + +// Rule represents a single rule in lifecycle configuration +type Rule struct { + XMLName xml.Name `xml:"Rule,omitempty" json:"-"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` + DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty" json:"DelMarkerExpiration,omitempty"` + ID string `xml:"ID" json:"ID"` + RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` + NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Status string `xml:"Status" json:"Status"` + Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` +} + +// Configuration is a collection of Rule objects. +type Configuration struct { + XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` + Rules []Rule `xml:"Rule"` +} + +// Empty check if lifecycle configuration is empty +func (c *Configuration) Empty() bool { + if c == nil { + return true + } + return len(c.Rules) == 0 +} + +// NewConfiguration initializes a fresh lifecycle configuration +// for manipulation, such as setting and removing lifecycle rules +// and filters. +func NewConfiguration() *Configuration { + return &Configuration{} +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go new file mode 100644 index 000000000..126661a9e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents an Amazon an S3 bucket notification event. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// Info - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type Info struct { + Records []Event + Err error +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go new file mode 100644 index 000000000..151ca21e8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go @@ -0,0 +1,441 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notification + +import ( + "encoding/xml" + "errors" + "fmt" + "strings" + + "github.com/minio/minio-go/v7/pkg/set" +) + +// EventType is a S3 notification event associated to the bucket notification configuration +type EventType string + +// The role of all event types are described in : +// +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll EventType = "s3:ObjectCreated:*" + ObjectCreatedPut EventType = "s3:ObjectCreated:Put" + ObjectCreatedPost EventType = "s3:ObjectCreated:Post" + ObjectCreatedCopy EventType = "s3:ObjectCreated:Copy" + ObjectCreatedDeleteTagging EventType = "s3:ObjectCreated:DeleteTagging" + ObjectCreatedCompleteMultipartUpload EventType = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectCreatedPutLegalHold EventType = "s3:ObjectCreated:PutLegalHold" + ObjectCreatedPutRetention EventType = "s3:ObjectCreated:PutRetention" + ObjectCreatedPutTagging EventType = "s3:ObjectCreated:PutTagging" + ObjectAccessedGet EventType = "s3:ObjectAccessed:Get" + ObjectAccessedHead EventType = "s3:ObjectAccessed:Head" + ObjectAccessedGetRetention EventType = "s3:ObjectAccessed:GetRetention" + ObjectAccessedGetLegalHold EventType = "s3:ObjectAccessed:GetLegalHold" + ObjectAccessedAll EventType = "s3:ObjectAccessed:*" + ObjectRemovedAll EventType = "s3:ObjectRemoved:*" + ObjectRemovedDelete EventType = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated EventType = "s3:ObjectRemoved:DeleteMarkerCreated" + ILMDelMarkerExpirationDelete EventType = "s3:LifecycleDelMarkerExpiration:Delete" + ObjectReducedRedundancyLostObject EventType = "s3:ReducedRedundancyLostObject" + ObjectTransitionAll EventType = "s3:ObjectTransition:*" + ObjectTransitionFailed EventType = "s3:ObjectTransition:Failed" + ObjectTransitionComplete EventType = "s3:ObjectTransition:Complete" + ObjectTransitionPost EventType = "s3:ObjectRestore:Post" + ObjectTransitionCompleted EventType = "s3:ObjectRestore:Completed" + ObjectReplicationAll EventType = "s3:Replication:*" + ObjectReplicationOperationCompletedReplication EventType = "s3:Replication:OperationCompletedReplication" + ObjectReplicationOperationFailedReplication EventType = "s3:Replication:OperationFailedReplication" + ObjectReplicationOperationMissedThreshold EventType = "s3:Replication:OperationMissedThreshold" + ObjectReplicationOperationNotTracked EventType = "s3:Replication:OperationNotTracked" + ObjectReplicationOperationReplicatedAfterThreshold EventType = "s3:Replication:OperationReplicatedAfterThreshold" + ObjectScannerManyVersions EventType = "s3:Scanner:ManyVersions" + ObjectScannerBigPrefix EventType = "s3:Scanner:BigPrefix" + ObjectScannerAll EventType = "s3:Scanner:*" + BucketCreatedAll EventType = "s3:BucketCreated:*" + BucketRemovedAll EventType = "s3:BucketRemoved:*" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{ + Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource, + } +} + +var ( + // ErrInvalidArnPrefix is returned when ARN string format does not start with 'arn' + ErrInvalidArnPrefix = errors.New("invalid ARN format, must start with 'arn:'") + // ErrInvalidArnFormat is returned when ARN string format is not valid + ErrInvalidArnFormat = errors.New("invalid ARN format, must be 'arn:::::'") +) + +// NewArnFromString parses string representation of ARN into Arn object. +// Returns an error if the string format is incorrect. +func NewArnFromString(arn string) (Arn, error) { + parts := strings.Split(arn, ":") + if len(parts) != 6 { + return Arn{}, ErrInvalidArnFormat + } + if parts[0] != "arn" { + return Arn{}, ErrInvalidArnPrefix + } + + return NewArn(parts[1], parts[2], parts[3], parts[4], parts[5]), nil +} + +// String returns the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// Config - represents one single notification configuration +// such as topic, queue or lambda configuration. +type Config struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []EventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewConfig creates one notification config and sets the given ARN +func NewConfig(arn Arn) Config { + return Config{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *Config) AddEvents(events ...EventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *Config) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *Config) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// EqualEventTypeList tells whether a and b contain the same events +func EqualEventTypeList(a, b []EventType) bool { + if len(a) != len(b) { + return false + } + setA := set.NewStringSet() + for _, i := range a { + setA.Add(string(i)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(string(i)) + } + + return setA.Difference(setB).IsEmpty() +} + +// EqualFilterRuleList tells whether a and b contain the same filters +func EqualFilterRuleList(a, b []FilterRule) bool { + if len(a) != len(b) { + return false + } + + setA := set.NewStringSet() + for _, i := range a { + setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + setB := set.NewStringSet() + for _, i := range b { + setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) + } + + return setA.Difference(setB).IsEmpty() +} + +// Equal returns whether this `Config` is equal to another defined by the passed parameters +func (t *Config) Equal(events []EventType, prefix, suffix string) bool { + if t == nil { + return false + } + + // Compare events + passEvents := EqualEventTypeList(t.Events, events) + + // Compare filters + var newFilterRules []FilterRule + if prefix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) + } + if suffix != "" { + newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) + } + + var currentFilterRules []FilterRule + if t.Filter != nil { + currentFilterRules = t.Filter.S3Key.FilterRules + } + + passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) + return passEvents && passFilters +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + Config + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + Config + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + Config + Lambda string `xml:"CloudFunction"` +} + +// Configuration - the struct that represents the whole XML to be sent to the web service +type Configuration struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *Configuration) AddTopic(topicConfig Config) bool { + newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *Configuration) AddQueue(queueConfig Config) bool { + newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *Configuration) AddLambda(lambdaConfig Config) bool { + newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *Configuration) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete +var ErrNoConfigMatch = errors.New("no notification configuration matched") + +// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.TopicConfigs { + // if it matches events and filters, mark the index for deletion + if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *Configuration) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.QueueConfigs { + // if it matches events and filters, mark the index for deletion + if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *Configuration) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} + +// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix +func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { + removeIndex := -1 + for i, v := range b.LambdaConfigs { + // if it matches events and filters, mark the index for deletion + if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { + removeIndex = i + break // since we have at most one matching config + } + } + if removeIndex >= 0 { + b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) + return nil + } + return ErrNoConfigMatch +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go new file mode 100644 index 000000000..65a2f75e9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -0,0 +1,974 @@ +/* + * MinIO Client (C) 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package replication + +import ( + "bytes" + "encoding/xml" + "fmt" + "math" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/rs/xid" +) + +var errInvalidFilter = fmt.Errorf("invalid filter") + +// OptionType specifies operation to be performed on config +type OptionType string + +const ( + // AddOption specifies addition of rule to config + AddOption OptionType = "Add" + // SetOption specifies modification of existing rule to config + SetOption OptionType = "Set" + + // RemoveOption specifies rule options are for removing a rule + RemoveOption OptionType = "Remove" + // ImportOption is for getting current config + ImportOption OptionType = "Import" +) + +// Options represents options to set a replication configuration rule +type Options struct { + Op OptionType + RoleArn string + ID string + Prefix string + RuleStatus string + Priority string + TagString string + StorageClass string + DestBucket string + IsTagSet bool + IsSCSet bool + ReplicateDeletes string // replicate versioned deletes + ReplicateDeleteMarkers string // replicate soft deletes + ReplicaSync string // replicate replica metadata modifications + ExistingObjectReplicate string +} + +// Tags returns a slice of tags for a rule +func (opts Options) Tags() ([]Tag, error) { + var tagList []Tag + tagTokens := strings.Split(opts.TagString, "&") + for _, tok := range tagTokens { + if tok == "" { + break + } + kv := strings.SplitN(tok, "=", 2) + if len(kv) != 2 { + return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") + } + tagList = append(tagList, Tag{ + Key: kv[0], + Value: kv[1], + }) + } + return tagList, nil +} + +// Config - replication configuration specified in +// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type Config struct { + XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` + Rules []Rule `xml:"Rule" json:"Rules"` + Role string `xml:"Role" json:"Role"` +} + +// Empty returns true if config is not set +func (c *Config) Empty() bool { + return len(c.Rules) == 0 +} + +// AddRule adds a new rule to existing replication config. If a rule exists with the +// same ID, then the rule is replaced. +func (c *Config) AddRule(opts Options) error { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + var compatSw bool // true if RoleArn is used with new mc client and older minio version prior to multisite + if opts.RoleArn != "" { + tokens := strings.Split(opts.RoleArn, ":") + if len(tokens) != 6 { + return fmt.Errorf("invalid format for replication Role Arn: %v", opts.RoleArn) + } + switch { + case strings.HasPrefix(opts.RoleArn, "arn:minio:replication") && len(c.Rules) == 0: + c.Role = opts.RoleArn + compatSw = true + case strings.HasPrefix(opts.RoleArn, "arn:aws:iam"): + c.Role = opts.RoleArn + default: + return fmt.Errorf("RoleArn invalid for AWS replication configuration: %v", opts.RoleArn) + } + } + + var status Status + // toggle rule status for edit option + switch opts.RuleStatus { + case "enable": + status = Enabled + case "disable": + status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + + tags, err := opts.Tags() + if err != nil { + return err + } + andVal := And{ + Tags: tags, + } + filter := Filter{Prefix: opts.Prefix} + // only a single tag is set. + if opts.Prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || opts.Prefix != "" { + filter.And = andVal + filter.And.Prefix = opts.Prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + if opts.ID == "" { + opts.ID = xid.New().String() + } + + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { + if len(btokens) == 1 && compatSw { + destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) + } else { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + } + dmStatus := Disabled + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + dmStatus = Enabled + case "disable": + dmStatus = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") + } + } + + vDeleteStatus := Disabled + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + vDeleteStatus = Enabled + case "disable": + vDeleteStatus = Disabled + default: + return fmt.Errorf("ReplicateDeletes should be either enable|disable") + } + } + var replicaSync Status + // replica sync is by default Enabled, unless specified. + switch opts.ReplicaSync { + case "enable", "": + replicaSync = Enabled + case "disable": + replicaSync = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + + var existingStatus Status + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + existingStatus = Enabled + case "disable", "": + existingStatus = Disabled + default: + return fmt.Errorf("existingObjectReplicate should be either enable|disable") + } + } + newRule := Rule{ + ID: opts.ID, + Priority: priority, + Status: status, + Filter: filter, + Destination: Destination{ + Bucket: destBucket, + StorageClass: opts.StorageClass, + }, + DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, + DeleteReplication: DeleteReplication{Status: vDeleteStatus}, + // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow + // automatic failover as the expectation in this case is that replica and source should be identical. + // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html + SourceSelectionCriteria: SourceSelectionCriteria{ + ReplicaModifications: ReplicaModifications{ + Status: replicaSync, + }, + }, + // By default disable existing object replication unless selected + ExistingObjectReplication: ExistingObjectReplication{ + Status: existingStatus, + }, + } + + // validate rule after overlaying priority for pre-existing rule being disabled. + if err := newRule.Validate(); err != nil { + return err + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for MinIO configuration + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && !compatSw { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + for _, rule := range c.Rules { + if rule.Priority == newRule.Priority { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.ID == newRule.ID { + return fmt.Errorf("a rule exists with this ID") + } + } + + c.Rules = append(c.Rules, newRule) + return nil +} + +// EditRule modifies an existing rule in replication config +func (c *Config) EditRule(opts Options) error { + if opts.ID == "" { + return fmt.Errorf("rule ID missing") + } + // if replication config uses RoleArn, migrate this to the destination element as target ARN for remote bucket for non AWS. + if c.Role != "" && !strings.HasPrefix(c.Role, "arn:aws:iam") && len(c.Rules) > 1 { + for i := range c.Rules { + c.Rules[i].Destination.Bucket = c.Role + } + c.Role = "" + } + + rIdx := -1 + var newRule Rule + for i, rule := range c.Rules { + if rule.ID == opts.ID { + rIdx = i + newRule = rule + break + } + } + if rIdx < 0 { + return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) + } + prefixChg := opts.Prefix != newRule.Prefix() + if opts.IsTagSet || prefixChg { + prefix := newRule.Prefix() + if prefix != opts.Prefix { + prefix = opts.Prefix + } + tags := []Tag{newRule.Filter.Tag} + if len(newRule.Filter.And.Tags) != 0 { + tags = newRule.Filter.And.Tags + } + var err error + if opts.IsTagSet { + tags, err = opts.Tags() + if err != nil { + return err + } + } + andVal := And{ + Tags: tags, + } + + filter := Filter{Prefix: prefix} + // only a single tag is set. + if prefix == "" && len(tags) == 1 { + filter.Tag = tags[0] + } + // both prefix and tag are present + if len(andVal.Tags) > 1 || prefix != "" { + filter.And = andVal + filter.And.Prefix = prefix + filter.Prefix = "" + filter.Tag = Tag{} + } + newRule.Filter = filter + } + + // toggle rule status for edit option + if opts.RuleStatus != "" { + switch opts.RuleStatus { + case "enable": + newRule.Status = Enabled + case "disable": + newRule.Status = Disabled + default: + return fmt.Errorf("rule state should be either [enable|disable]") + } + } + // set DeleteMarkerReplication rule status for edit option + if opts.ReplicateDeleteMarkers != "" { + switch opts.ReplicateDeleteMarkers { + case "enable": + newRule.DeleteMarkerReplication.Status = Enabled + case "disable": + newRule.DeleteMarkerReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") + } + } + + // set DeleteReplication rule status for edit option. This is a MinIO specific + // option to replicate versioned deletes + if opts.ReplicateDeletes != "" { + switch opts.ReplicateDeletes { + case "enable": + newRule.DeleteReplication.Status = Enabled + case "disable": + newRule.DeleteReplication.Status = Disabled + default: + return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") + } + } + + if opts.ReplicaSync != "" { + switch opts.ReplicaSync { + case "enable", "": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled + case "disable": + newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled + default: + return fmt.Errorf("replica metadata sync should be either [enable|disable]") + } + } + + if opts.ExistingObjectReplicate != "" { + switch opts.ExistingObjectReplicate { + case "enable": + newRule.ExistingObjectReplication.Status = Enabled + case "disable": + newRule.ExistingObjectReplication.Status = Disabled + default: + return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") + } + } + if opts.IsSCSet { + newRule.Destination.StorageClass = opts.StorageClass + } + if opts.Priority != "" { + priority, err := strconv.Atoi(opts.Priority) + if err != nil { + return err + } + newRule.Priority = priority + } + if opts.DestBucket != "" { + destBucket := opts.DestBucket + // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html + if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { + return fmt.Errorf("destination bucket needs to be in Arn format") + } + newRule.Destination.Bucket = destBucket + } + // validate rule + if err := newRule.Validate(); err != nil { + return err + } + // ensure priority and destination bucket restrictions are not violated + for idx, rule := range c.Rules { + if rule.Priority == newRule.Priority && rIdx != idx { + return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") + } + if rule.Destination.Bucket != newRule.Destination.Bucket && rule.ID == newRule.ID { + if c.Role == newRule.Destination.Bucket { + continue + } + return fmt.Errorf("invalid destination bucket for this rule") + } + } + + c.Rules[rIdx] = newRule + return nil +} + +// RemoveRule removes a rule from replication config. +func (c *Config) RemoveRule(opts Options) error { + var newRules []Rule + ruleFound := false + for _, rule := range c.Rules { + if rule.ID != opts.ID { + newRules = append(newRules, rule) + continue + } + ruleFound = true + } + if !ruleFound { + return fmt.Errorf("Rule with ID %s not found", opts.ID) + } + if len(newRules) == 0 { + return fmt.Errorf("replication configuration should have at least one rule") + } + c.Rules = newRules + return nil +} + +// Rule - a rule for replication configuration. +type Rule struct { + XMLName xml.Name `xml:"Rule" json:"-"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Priority int `xml:"Priority"` + DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` + DeleteReplication DeleteReplication `xml:"DeleteReplication"` + Destination Destination `xml:"Destination"` + Filter Filter `xml:"Filter" json:"Filter"` + SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` +} + +// Validate validates the rule for correctness +func (r Rule) Validate() error { + if err := r.validateID(); err != nil { + return err + } + if err := r.validateStatus(); err != nil { + return err + } + if err := r.validateFilter(); err != nil { + return err + } + + if r.Priority < 0 && r.Status == Enabled { + return fmt.Errorf("priority must be set for the rule") + } + + if err := r.validateStatus(); err != nil { + return err + } + return r.ExistingObjectReplication.Validate() +} + +// validateID - checks if ID is valid or not. +func (r Rule) validateID() error { + // cannot be longer than 255 characters + if len(r.ID) > 255 { + return fmt.Errorf("ID must be less than 255 characters") + } + return nil +} + +// validateStatus - checks if status is valid or not. +func (r Rule) validateStatus() error { + // Status can't be empty + if len(r.Status) == 0 { + return fmt.Errorf("status cannot be empty") + } + + // Status must be one of Enabled or Disabled + if r.Status != Enabled && r.Status != Disabled { + return fmt.Errorf("status must be set to either Enabled or Disabled") + } + return nil +} + +func (r Rule) validateFilter() error { + return r.Filter.Validate() +} + +// Prefix - a rule can either have prefix under or under +// . This method returns the prefix from the +// location where it is available +func (r Rule) Prefix() string { + if r.Filter.Prefix != "" { + return r.Filter.Prefix + } + return r.Filter.And.Prefix +} + +// Tags - a rule can either have tag under or under +// . This method returns all the tags from the +// rule in the format tag1=value1&tag2=value2 +func (r Rule) Tags() string { + ts := []Tag{r.Filter.Tag} + if len(r.Filter.And.Tags) != 0 { + ts = r.Filter.And.Tags + } + + var buf bytes.Buffer + for _, t := range ts { + if buf.Len() > 0 { + buf.WriteString("&") + } + buf.WriteString(t.String()) + } + return buf.String() +} + +// Filter - a filter for a replication configuration Rule. +type Filter struct { + XMLName xml.Name `xml:"Filter" json:"-"` + Prefix string `json:"Prefix,omitempty"` + And And `xml:"And,omitempty" json:"And,omitempty"` + Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// Validate - validates the filter element +func (f Filter) Validate() error { + // A Filter must have exactly one of Prefix, Tag, or And specified. + if !f.And.isEmpty() { + if f.Prefix != "" { + return errInvalidFilter + } + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if f.Prefix != "" { + if !f.Tag.IsEmpty() { + return errInvalidFilter + } + } + if !f.Tag.IsEmpty() { + if err := f.Tag.Validate(); err != nil { + return err + } + } + return nil +} + +// Tag - a tag for a replication configuration Rule filter. +type Tag struct { + XMLName xml.Name `json:"-"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + Value string `xml:"Value,omitempty" json:"Value,omitempty"` +} + +func (tag Tag) String() string { + if tag.IsEmpty() { + return "" + } + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { + return fmt.Errorf("invalid Tag Key") + } + + if utf8.RuneCountInString(tag.Value) > 256 { + return fmt.Errorf("invalid Tag Value") + } + return nil +} + +// Destination - destination in ReplicationConfiguration. +type Destination struct { + XMLName xml.Name `xml:"Destination" json:"-"` + Bucket string `xml:"Bucket" json:"Bucket"` + StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` +} + +// And - a tag to combine a prefix and multiple tags for replication configuration rule. +type And struct { + XMLName xml.Name `xml:"And,omitempty" json:"-"` + Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` + Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` +} + +// isEmpty returns true if Tags field is null +func (a And) isEmpty() bool { + return len(a.Tags) == 0 && a.Prefix == "" +} + +// Status represents Enabled/Disabled status +type Status string + +// Supported status types +const ( + Enabled Status = "Enabled" + Disabled Status = "Disabled" +) + +// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html +type DeleteMarkerReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (d DeleteMarkerReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// DeleteReplication - whether versioned deletes are replicated - this +// is a MinIO specific extension +type DeleteReplication struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteReplication is not set +func (d DeleteReplication) IsEmpty() bool { + return len(d.Status) == 0 +} + +// ReplicaModifications specifies if replica modification sync is enabled +type ReplicaModifications struct { + Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default +} + +// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. +type SourceSelectionCriteria struct { + ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` +} + +// IsValid - checks whether SourceSelectionCriteria is valid or not. +func (s SourceSelectionCriteria) IsValid() bool { + return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled +} + +// Validate source selection criteria +func (s SourceSelectionCriteria) Validate() error { + if (s == SourceSelectionCriteria{}) { + return nil + } + if !s.IsValid() { + return fmt.Errorf("invalid ReplicaModification status") + } + return nil +} + +// ExistingObjectReplication - whether existing object replication is enabled +type ExistingObjectReplication struct { + Status Status `xml:"Status"` // should be set to "Disabled" by default +} + +// IsEmpty returns true if DeleteMarkerReplication is not set +func (e ExistingObjectReplication) IsEmpty() bool { + return len(e.Status) == 0 +} + +// Validate validates whether the status is disabled. +func (e ExistingObjectReplication) Validate() error { + if e.IsEmpty() { + return nil + } + if e.Status != Disabled && e.Status != Enabled { + return fmt.Errorf("invalid ExistingObjectReplication status") + } + return nil +} + +// TargetMetrics represents inline replication metrics +// such as pending, failed and completed bytes in total for a bucket remote target +type TargetMetrics struct { + // Completed count + ReplicatedCount uint64 `json:"replicationCount,omitempty"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` + // errors seen in replication in last minute, hour and total + Failed TimedErrStats `json:"failed,omitempty"` + // Deprecated fields + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount,omitempty"` +} + +// Metrics represents inline replication metrics for a bucket. +type Metrics struct { + Stats map[string]TargetMetrics + // Completed size in bytes across targets + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Total Replica size in bytes across targets + ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Total Replica counts + ReplicaCount int64 `json:"replicaCount,omitempty"` + // Total Replicated count + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // errors seen in replication in last minute, hour and total + Errors TimedErrStats `json:"failed,omitempty"` + // Total number of entries that are queued for replication + QStats InQueueMetric `json:"queued"` + // Deprecated fields + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount,omitempty"` +} + +// RStat - has count and bytes for replication metrics +type RStat struct { + Count float64 `json:"count"` + Bytes int64 `json:"bytes"` +} + +// Add two RStat +func (r RStat) Add(r1 RStat) RStat { + return RStat{ + Count: r.Count + r1.Count, + Bytes: r.Bytes + r1.Bytes, + } +} + +// TimedErrStats holds error stats for a time period +type TimedErrStats struct { + LastMinute RStat `json:"lastMinute"` + LastHour RStat `json:"lastHour"` + Totals RStat `json:"totals"` +} + +// Add two TimedErrStats +func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { + return TimedErrStats{ + LastMinute: te.LastMinute.Add(o.LastMinute), + LastHour: te.LastHour.Add(o.LastHour), + Totals: te.Totals.Add(o.Totals), + } +} + +// ResyncTargetsInfo provides replication target information to resync replicated data. +type ResyncTargetsInfo struct { + Targets []ResyncTarget `json:"target,omitempty"` +} + +// ResyncTarget provides the replica resources and resetID to initiate resync replication. +type ResyncTarget struct { + Arn string `json:"arn"` + ResetID string `json:"resetid"` + StartTime time.Time `json:"startTime,omitempty"` + EndTime time.Time `json:"endTime,omitempty"` + // Status of resync operation + ResyncStatus string `json:"resyncStatus,omitempty"` + // Completed size in bytes + ReplicatedSize int64 `json:"completedReplicationSize,omitempty"` + // Failed size in bytes + FailedSize int64 `json:"failedReplicationSize,omitempty"` + // Total number of failed operations + FailedCount int64 `json:"failedReplicationCount,omitempty"` + // Total number of completed operations + ReplicatedCount int64 `json:"replicationCount,omitempty"` + // Last bucket/object replicated. + Bucket string `json:"bucket,omitempty"` + Object string `json:"object,omitempty"` +} + +// XferStats holds transfer rate info for uploads/sec +type XferStats struct { + AvgRate float64 `json:"avgRate"` + PeakRate float64 `json:"peakRate"` + CurrRate float64 `json:"currRate"` +} + +// Merge two XferStats +func (x *XferStats) Merge(x1 XferStats) { + x.AvgRate += x1.AvgRate + x.PeakRate += x1.PeakRate + x.CurrRate += x1.CurrRate +} + +// QStat holds count and bytes for objects in replication queue +type QStat struct { + Count float64 `json:"count"` + Bytes float64 `json:"bytes"` +} + +// Add 2 QStat entries +func (q *QStat) Add(q1 QStat) { + q.Count += q1.Count + q.Bytes += q1.Bytes +} + +// InQueueMetric holds stats for objects in replication queue +type InQueueMetric struct { + Curr QStat `json:"curr" msg:"cq"` + Avg QStat `json:"avg" msg:"aq"` + Max QStat `json:"peak" msg:"pq"` +} + +// MetricName name of replication metric +type MetricName string + +const ( + // Large is a metric name for large objects >=128MiB + Large MetricName = "Large" + // Small is a metric name for objects <128MiB size + Small MetricName = "Small" + // Total is a metric name for total objects + Total MetricName = "Total" +) + +// WorkerStat has stats on number of replication workers +type WorkerStat struct { + Curr int32 `json:"curr"` + Avg float32 `json:"avg"` + Max int32 `json:"max"` +} + +// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes +// and number of entries that failed replication after 3 retries +type ReplMRFStats struct { + LastFailedCount uint64 `json:"failedCount_last5min"` + // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` + // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` +} + +// ReplQNodeStats holds stats for a node in replication queue +type ReplQNodeStats struct { + NodeName string `json:"nodeName"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"activeWorkers"` + + XferStats map[MetricName]XferStats `json:"transferSummary"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` + + QStats InQueueMetric `json:"queueStats"` + MRFStats ReplMRFStats `json:"mrfStats"` +} + +// ReplQueueStats holds stats for replication queue across nodes +type ReplQueueStats struct { + Nodes []ReplQNodeStats `json:"nodes"` +} + +// Workers returns number of workers across all nodes +func (q ReplQueueStats) Workers() (tot WorkerStat) { + for _, node := range q.Nodes { + tot.Avg += node.Workers.Avg + tot.Curr += node.Workers.Curr + if tot.Max < node.Workers.Max { + tot.Max = node.Workers.Max + } + } + if len(q.Nodes) > 0 { + tot.Avg /= float32(len(q.Nodes)) + tot.Curr /= int32(len(q.Nodes)) + } + return tot +} + +// qStatSummary returns cluster level stats for objects in replication queue +func (q ReplQueueStats) qStatSummary() InQueueMetric { + m := InQueueMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.QStats.Avg) + m.Curr.Add(v.QStats.Curr) + if m.Max.Count < v.QStats.Max.Count { + m.Max.Add(v.QStats.Max) + } + } + return m +} + +// ReplQStats holds stats for objects in replication queue +type ReplQStats struct { + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"workers"` + + XferStats map[MetricName]XferStats `json:"xferStats"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` + + QStats InQueueMetric `json:"qStats"` + MRFStats ReplMRFStats `json:"mrfStats"` +} + +// QStats returns cluster level stats for objects in replication queue +func (q ReplQueueStats) QStats() (r ReplQStats) { + r.QStats = q.qStatSummary() + r.XferStats = make(map[MetricName]XferStats) + r.TgtXferStats = make(map[string]map[MetricName]XferStats) + r.Workers = q.Workers() + + for _, node := range q.Nodes { + for arn := range node.TgtXferStats { + xmap, ok := node.TgtXferStats[arn] + if !ok { + xmap = make(map[MetricName]XferStats) + } + for m, v := range xmap { + st, ok := r.XferStats[m] + if !ok { + st = XferStats{} + } + st.AvgRate += v.AvgRate + st.CurrRate += v.CurrRate + st.PeakRate = math.Max(st.PeakRate, v.PeakRate) + if _, ok := r.TgtXferStats[arn]; !ok { + r.TgtXferStats[arn] = make(map[MetricName]XferStats) + } + r.TgtXferStats[arn][m] = st + } + } + for k, v := range node.XferStats { + st, ok := r.XferStats[k] + if !ok { + st = XferStats{} + } + st.AvgRate += v.AvgRate + st.CurrRate += v.CurrRate + st.PeakRate = math.Max(st.PeakRate, v.PeakRate) + r.XferStats[k] = st + } + r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount + r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount + r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes + r.Uptime += node.Uptime + } + if len(q.Nodes) > 0 { + r.Uptime /= int64(len(q.Nodes)) // average uptime + } + return +} + +// MetricsV2 represents replication metrics for a bucket. +type MetricsV2 struct { + Uptime int64 `json:"uptime"` + CurrentStats Metrics `json:"currStats"` + QueueStats ReplQueueStats `json:"queueStats"` +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go new file mode 100644 index 000000000..0e63ce2f7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -0,0 +1,411 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start with a "." + if host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + + parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + + return "" +} + +// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. +func IsAliyunOSSEndpoint(endpointURL url.URL) bool { + return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") +} + +// IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint +// See https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html. +func IsAmazonPrivateLinkEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return amazonS3HostPrivateLink.MatchString(endpointURL.Host) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Hostname() == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.ReplaceAll(s, "/", "%2F") +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// TagDecode - decodes canonical tag into map of key and value. +func TagDecode(ctag string) map[string]string { + if ctag == "" { + return map[string]string{} + } + tags := strings.Split(ctag, "&") + tagMap := make(map[string]string, len(tags)) + var err error + for _, tag := range tags { + kvs := strings.SplitN(tag, "=", 2) + if len(kvs) == 0 { + return map[string]string{} + } + if len(kvs) == 1 { + return map[string]string{} + } + tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) + if err != nil { + continue + } + } + return tagMap +} + +// TagEncode - encodes tag values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func TagEncode(tags map[string]string) string { + if tags == nil { + return "" + } + values := url.Values{} + for k, v := range tags { + values[k] = []string{v} + } + return QueryEncode(values) +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname strings.Builder + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname.WriteRune(s) + continue + default: + l := utf8.RuneLen(s) + if l < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, l) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname.WriteString("%" + strings.ToUpper(hex)) + } + } + } + return encodedPathname.String() +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be longer than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go new file mode 100644 index 000000000..c265ce572 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go @@ -0,0 +1,195 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "fmt" + "sort" + + "github.com/goccy/go-json" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []interface{}{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(fmt.Sprintf("%v", s)) + } + } else { + var s interface{} + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(fmt.Sprintf("%v", s)) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go new file mode 100644 index 000000000..77540e2d8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming-unsigned-trailer.go @@ -0,0 +1,224 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// getUnsignedChunkLength - calculates the length of chunk metadata +func getUnsignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + crlfLen + + chunkDataSize + + crlfLen +} + +// getUSStreamLength - calculates the length of the overall stream (data + metadata) +func getUSStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getUnsignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getUnsignedChunkLength(remainingBytes) + } + streamLen += getUnsignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += crlfLen + } + + return streamLen +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareUSStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + req.TransferEncoding = []string{"aws-chunked"} + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getUSStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) +} + +// StreamingUSReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingUSReader struct { + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header +} + +// writeChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingUSReader) writeChunk(chunkLen int, addCrLf bool) { + s.buf.WriteString(strconv.FormatInt(int64(chunkLen), 16) + "\r\n") + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingUSReader) addTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// StreamingUnsignedV4 - provides chunked upload +func StreamingUnsignedV4(req *http.Request, sessionToken string, dataLen int64, reqTime time.Time) *http.Request { + // Set headers needed for streaming signature. + prepareUSStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingUSReader{ + baseReadCloser: req.Body, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingUSReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.writeChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.writeChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingUSReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go new file mode 100644 index 000000000..1c2f1dc9d --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go @@ -0,0 +1,403 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + md5simd "github.com/minio/md5-simd" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingSignTrailerAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + streamingTrailerHdr = "AWS4-HMAC-SHA256-TRAILER" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF + trailerKVSeparator = ":" + trailerSignature = "x-amz-trailer-signature" +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64, trailers http.Header) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + if len(trailers) > 0 { + for name, placeholder := range trailers { + if len(placeholder) > 0 { + streamLen += int64(len(name) + len(trailerKVSeparator) + len(placeholder[0]) + 1) + } + } + streamLen += int64(len(trailerSignature)+len(trailerKVSeparator)) + signatureStrLen + crlfLen + crlfLen + } + + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + emptySHA256, + chunkChecksum, + } + + return strings.Join(stringToSignParts, "\n") +} + +// buildTrailerChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildTrailerChunkStringToSign(t time.Time, region, previousSig, chunkChecksum string) string { + stringToSignParts := []string{ + streamingTrailerHdr, + t.Format(iso8601DateFormat), + getScope(region, t, ServiceTypeS3), + previousSig, + chunkChecksum, + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + if len(req.Trailer) == 0 { + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + } else { + req.Header.Set("X-Amz-Content-Sha256", streamingSignTrailerAlgorithm) + for k := range req.Trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + req.TransferEncoding = []string{"aws-chunked"} + } + + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize), req.Trailer) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkCheckSum string, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkCheckSum) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildTrailerChunkSignature(chunkChecksum string, reqTime time.Time, region, + previousSignature, secretAccessKey string, +) string { + chunkStringToSign := buildTrailerChunkStringToSign(reqTime, region, + previousSignature, chunkChecksum) + signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int + trailer http.Header + sh256 md5simd.Hasher +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int, addCrLf bool) { + // Compute chunk signature for next header + s.sh256.Reset() + s.sh256.Write(s.chunkBuf[:chunkLen]) + chunckChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + + signature := buildChunkSignature(chunckChecksum, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + if addCrLf { + s.buf.Write([]byte("\r\n")) + } + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// addSignedTrailer - adds a trailer with the provided headers, +// then signs a chunk and adds it to output. +func (s *StreamingReader) addSignedTrailer(h http.Header) { + olen := len(s.chunkBuf) + s.chunkBuf = s.chunkBuf[:0] + for k, v := range h { + s.chunkBuf = append(s.chunkBuf, []byte(strings.ToLower(k)+trailerKVSeparator+v[0]+"\n")...) + } + + s.sh256.Reset() + s.sh256.Write(s.chunkBuf) + chunkChecksum := hex.EncodeToString(s.sh256.Sum(nil)) + // Compute chunk signature + signature := buildTrailerChunkSignature(chunkChecksum, s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + s.buf.Write(s.chunkBuf) + s.buf.WriteString("\r\n" + trailerSignature + trailerKVSeparator + signature + "\r\n\r\n") + + // Reset chunkBufLen for next chunk read. + s.chunkBuf = s.chunkBuf[:olen] + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time, sh256 md5simd.Hasher, +) *http.Request { + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = io.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + sh256: sh256, + } + if len(req.Trailer) > 0 { + stReader.trailer = req.Trailer + // Remove... + req.Trailer = nil + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen, true) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0, len(s.trailer) == 0) + if len(s.trailer) > 0 { + // Trailer must be set now. + s.addSignedTrailer(s.trailer) + } + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + if s.sh256 != nil { + s.sh256.Close() + s.sh256 = nil + } + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go new file mode 100644 index 000000000..fa4f8c91e --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go @@ -0,0 +1,319 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } + } + path = s3utils.EncodePath(req.URL.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// +// This list should be kept alphabetically sorted, do not hastily edit. +var resourceList = []string{ + "acl", + "cors", + "delete", + "encryption", + "legal-hold", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "replication", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "retention", + "select", + "select-type", + "tagging", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go new file mode 100644 index 000000000..ffd251451 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go @@ -0,0 +1,351 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +// Different service types +const ( + ServiceTypeS3 = "s3" + ServiceTypeSTS = "sts" +) + +// Excerpts from @lsegal - +// https:/github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +// +// * User-Agent +// This is ignored from signing because signing this causes problems with generating pre-signed +// URLs (that are executed by other agents) or when customers pass requests through proxies, which +// may modify the user-agent. +// +// * Authorization +// Is skipped for obvious reasons. +// +// * Accept-Encoding +// Some S3 servers like Hitachi Content Platform do not honor this header for signature +// calculation. +var v4IgnoredHeaders = map[string]bool{ + "Accept-Encoding": true, + "Authorization": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte(serviceType)) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time, serviceType string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + serviceType, + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { + scope := getScope(location, t, serviceType) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(getHostAddr(&req)) + buf.WriteByte('\n') + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +func headerExists(key string, headers []string) bool { + for _, k := range headers { + if k == key { + return true + } + } + return false +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + if !headerExists("host", headers) { + headers = append(headers, "host") + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + hashedPayload, + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" + stringToSign += hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4STS - signature v4 for STS request. +func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS, nil) +} + +// Internal function called for different service types. +func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string, trailer http.Header) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + if len(trailer) > 0 { + for k := range trailer { + req.Header.Add("X-Amz-Trailer", strings.ToLower(k)) + } + + req.Header.Set("Content-Encoding", "aws-chunked") + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(req.ContentLength, 10)) + } + + hashedPayload := getHashedPayload(req) + if serviceType == ServiceTypeSTS { + // Content sha256 header is not sent with the request + // but it is expected to have sha256 of payload for signature + // in STS service type request. + req.Header.Del("X-Amz-Content-Sha256") + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t, serviceType) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t, serviceType) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + if len(trailer) > 0 { + // Use custom chunked encoding. + req.Trailer = trailer + return StreamingUnsignedV4(&req, sessionToken, req.ContentLength, time.Now().UTC()) + } + return &req +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, nil) +} + +// SignV4Trailer sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +func SignV4Trailer(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, trailer http.Header) *http.Request { + return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3, trailer) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go new file mode 100644 index 000000000..87c993989 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signer + +import ( + "crypto/hmac" + "crypto/sha256" + "net/http" + "strings" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + host := req.Header.Get("host") + if host != "" && req.Host != host { + return host + } + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go new file mode 100644 index 000000000..b5fb9565a --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go @@ -0,0 +1,66 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sse + +import "encoding/xml" + +// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate +// KMS, SSEAlgoritm needs to be set to "aws:kms" +// Minio currently does not support Kms. +type ApplySSEByDefault struct { + KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` + SSEAlgorithm string `xml:"SSEAlgorithm"` +} + +// Rule layer encapsulates default encryption configuration +type Rule struct { + Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Configuration is the default encryption configuration structure +type Configuration struct { + XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` + Rules []Rule `xml:"Rule"` +} + +// NewConfigurationSSES3 initializes a new SSE-S3 configuration +func NewConfigurationSSES3() *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + SSEAlgorithm: "AES256", + }, + }, + }, + } +} + +// NewConfigurationSSEKMS initializes a new SSE-KMS configuration +func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { + return &Configuration{ + Rules: []Rule{ + { + Apply: ApplySSEByDefault{ + KmsMasterKeyID: kmsMasterKey, + SSEAlgorithm: "aws:kms", + }, + }, + }, + } +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go new file mode 100644 index 000000000..7a84a6f34 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go @@ -0,0 +1,413 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2020-2022 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tags + +import ( + "encoding/xml" + "io" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Error contains tag specific error. +type Error interface { + error + Code() string +} + +type errTag struct { + code string + message string +} + +// Code contains error code. +func (err errTag) Code() string { + return err.code +} + +// Error contains error message. +func (err errTag) Error() string { + return err.message +} + +var ( + errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} + errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} + errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} + errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} + errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} +) + +// Tag comes with limitation as per +// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +const ( + maxKeyLength = 128 + maxValueLength = 256 + maxObjectTagCount = 10 + maxTagCount = 50 +) + +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions +// borrowed from this article and also testing various ASCII characters following regex +// is supported by AWS S3 for both tags and values. +var validTagKeyValue = regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`) + +func checkKey(key string) error { + if len(key) == 0 { + return errInvalidTagKey + } + + if utf8.RuneCountInString(key) > maxKeyLength || !validTagKeyValue.MatchString(key) { + return errInvalidTagKey + } + + return nil +} + +func checkValue(value string) error { + if value != "" { + if utf8.RuneCountInString(value) > maxValueLength || !validTagKeyValue.MatchString(value) { + return errInvalidTagValue + } + } + + return nil +} + +// Tag denotes key and value. +type Tag struct { + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +func (tag Tag) String() string { + return tag.Key + "=" + tag.Value +} + +// IsEmpty returns whether this tag is empty or not. +func (tag Tag) IsEmpty() bool { + return tag.Key == "" +} + +// Validate checks this tag. +func (tag Tag) Validate() error { + if err := checkKey(tag.Key); err != nil { + return err + } + + return checkValue(tag.Value) +} + +// MarshalXML encodes to XML data. +func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if err := tag.Validate(); err != nil { + return err + } + + type subTag Tag // to avoid recursively calling MarshalXML() + return e.EncodeElement(subTag(tag), start) +} + +// UnmarshalXML decodes XML data to tag. +func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type subTag Tag // to avoid recursively calling UnmarshalXML() + var st subTag + if err := d.DecodeElement(&st, &start); err != nil { + return err + } + + if err := Tag(st).Validate(); err != nil { + return err + } + + *tag = Tag(st) + return nil +} + +// tagSet represents list of unique tags. +type tagSet struct { + tagMap map[string]string + isObject bool +} + +func (tags tagSet) String() string { + if len(tags.tagMap) == 0 { + return "" + } + var buf strings.Builder + keys := make([]string, 0, len(tags.tagMap)) + for k := range tags.tagMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + keyEscaped := url.QueryEscape(k) + valueEscaped := url.QueryEscape(tags.tagMap[k]) + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(keyEscaped) + buf.WriteByte('=') + buf.WriteString(valueEscaped) + } + return buf.String() +} + +func (tags *tagSet) remove(key string) { + delete(tags.tagMap, key) +} + +func (tags *tagSet) set(key, value string, failOnExist bool) error { + if failOnExist { + if _, found := tags.tagMap[key]; found { + return errDuplicateTagKey + } + } + + if err := checkKey(key); err != nil { + return err + } + + if err := checkValue(value); err != nil { + return err + } + + if tags.isObject { + if len(tags.tagMap) == maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tags.tagMap) == maxTagCount { + return errTooManyTags + } + + tags.tagMap[key] = value + return nil +} + +func (tags tagSet) count() int { + return len(tags.tagMap) +} + +func (tags tagSet) toMap() map[string]string { + m := make(map[string]string, len(tags.tagMap)) + for key, value := range tags.tagMap { + m[key] = value + } + return m +} + +// MarshalXML encodes to XML data. +func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + tagList.Tags = make([]Tag, 0, len(tags.tagMap)) + for key, value := range tags.tagMap { + tagList.Tags = append(tagList.Tags, Tag{key, value}) + } + + return e.EncodeElement(tagList, start) +} + +// UnmarshalXML decodes XML data to tag list. +func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tagList := struct { + Tags []Tag `xml:"Tag"` + }{} + + if err := d.DecodeElement(&tagList, &start); err != nil { + return err + } + + if tags.isObject { + if len(tagList.Tags) > maxObjectTagCount { + return errTooManyObjectTags + } + } else if len(tagList.Tags) > maxTagCount { + return errTooManyTags + } + + m := make(map[string]string, len(tagList.Tags)) + for _, tag := range tagList.Tags { + if _, found := m[tag.Key]; found { + return errDuplicateTagKey + } + + m[tag.Key] = tag.Value + } + + tags.tagMap = m + return nil +} + +type tagging struct { + XMLName xml.Name `xml:"Tagging"` + TagSet *tagSet `xml:"TagSet"` +} + +// Tags is list of tags of XML request/response as per +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody +type Tags tagging + +func (tags Tags) String() string { + return tags.TagSet.String() +} + +// Remove removes a tag by its key. +func (tags *Tags) Remove(key string) { + tags.TagSet.remove(key) +} + +// Set sets new tag. +func (tags *Tags) Set(key, value string) error { + return tags.TagSet.set(key, value, false) +} + +// Count - return number of tags accounted for +func (tags Tags) Count() int { + return tags.TagSet.count() +} + +// ToMap returns copy of tags. +func (tags Tags) ToMap() map[string]string { + return tags.TagSet.toMap() +} + +// MapToObjectTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToObjectTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, true) +} + +// MapToBucketTags converts an input map of key and value into +// *Tags data structure with validation. +func MapToBucketTags(tagMap map[string]string) (*Tags, error) { + return NewTags(tagMap, false) +} + +// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. +func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + for key, value := range tagMap { + if err := tagging.TagSet.set(key, value, true); err != nil { + return nil, err + } + } + + return tagging, nil +} + +func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseBucketXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. +func ParseBucketXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, false) +} + +// ParseObjectXML decodes XML data of tags in reader specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax +func ParseObjectXML(reader io.Reader) (*Tags, error) { + return unmarshalXML(reader, true) +} + +// stringsCut slices s around the first instance of sep, +// returning the text before and after sep. +// The found result reports whether sep appears in s. +// If sep does not appear in s, cut returns s, "", false. +func stringsCut(s, sep string) (before, after string, found bool) { + if i := strings.Index(s, sep); i >= 0 { + return s[:i], s[i+len(sep):], true + } + return s, "", false +} + +func (tags *tagSet) parseTags(tgs string) (err error) { + for tgs != "" { + var key string + key, tgs, _ = stringsCut(tgs, "&") + if key == "" { + continue + } + key, value, _ := stringsCut(key, "=") + key, err1 := url.QueryUnescape(key) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + value, err1 = url.QueryUnescape(value) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + if err = tags.set(key, value, true); err != nil { + return err + } + } + return err +} + +// Parse decodes HTTP query formatted string into tags which is limited by isObject. +// A query formatted string is like "key1=value1&key2=value2". +func Parse(s string, isObject bool) (*Tags, error) { + tagging := &Tags{ + TagSet: &tagSet{ + tagMap: make(map[string]string), + isObject: isObject, + }, + } + + if err := tagging.TagSet.parseTags(s); err != nil { + return nil, err + } + + return tagging, nil +} + +// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". +func ParseObjectTags(s string) (*Tags, error) { + return Parse(s, true) +} diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go new file mode 100644 index 000000000..19687e027 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -0,0 +1,408 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2023 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/minio/minio-go/v7/pkg/encrypt" + "github.com/minio/minio-go/v7/pkg/tags" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.000Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return errInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +// Can use an empty value ("") to allow any key. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return errInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetCondition - Sets condition for credentials, date and algorithm +func (p *PostPolicy) SetCondition(matchType, condition, value string) error { + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("No value specified for condition") + } + + policyCond := policyCondition{ + matchType: matchType, + condition: "$" + condition, + value: value, + } + if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[condition] = value + return nil + } + return errInvalidArgument("Invalid condition in policy") +} + +// SetTagging - Sets tagging for the object for this policy based upload. +func (p *PostPolicy) SetTagging(tagging string) error { + if strings.TrimSpace(tagging) == "" || tagging == "" { + return errInvalidArgument("No tagging specified.") + } + _, err := tags.ParseObjectXML(strings.NewReader(tagging)) + if err != nil { + return errors.New("The XML you provided was not well-formed or did not validate against our published schema.") //nolint + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$tagging", + value: tagging, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["tagging"] = tagging + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return errInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentTypeStartsWith - Sets what content-type of the object for this policy +// based upload can start with. +// Can use an empty value ("") to allow any content-type. +func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$Content-Type", + value: contentTypeStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentTypeStartsWith + return nil +} + +// SetContentDisposition - Sets content-disposition of the object for this policy +func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { + if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" { + return errInvalidArgument("No content disposition specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Disposition", + value: contentDisposition, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Disposition"] = contentDisposition + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return errInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return errInvalidArgument("Minimum limit cannot be negative.") + } + if max <= 0 { + return errInvalidArgument("Maximum limit cannot be non-positive.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { + if strings.TrimSpace(redirect) == "" || redirect == "" { + return errInvalidArgument("Redirect is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_redirect", + value: redirect, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_redirect"] = redirect + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return errInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserMetadataStartsWith - Set how an user metadata should starts with. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "starts-with", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetChecksum sets the checksum of the request. +func (p *PostPolicy) SetChecksum(c Checksum) { + if c.IsSet() { + p.formData[amzChecksumAlgo] = c.Type.String() + p.formData[c.Type.Key()] = c.Encoded() + } +} + +// SetEncryption - sets encryption headers for POST API +func (p *PostPolicy) SetEncryption(sse encrypt.ServerSide) { + if sse == nil { + return + } + h := http.Header{} + sse.Marshal(h) + for k, v := range h { + p.formData[k] = v[0] + } +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key, value string) error { + if key == "" { + return errInvalidArgument("Key is empty") + } + if value == "" { + return errInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +// Can use starts-with with an empty value ("") to allow any content within a form field. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" { + return errInvalidArgument("Policy fields are empty.") + } + if policyCond.matchType != "starts-with" && policyCond.value == "" { + return errInvalidArgument("Policy value is empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// String function for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshaled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr += conditionsStr + retStr += "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshaled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go new file mode 100644 index 000000000..bfeea95f3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -0,0 +1,69 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go new file mode 100644 index 000000000..d15eb5901 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -0,0 +1,150 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "crypto/x509" + "errors" + "net/http" + "net/url" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 200 * time.Millisecond +var DefaultRetryUnit = 200 * time.Millisecond + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +var DefaultRetryCap = time.Second + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + case attemptCh <- i + 1: + case <-ctx.Done(): + return + } + + select { + case <-time.After(exponentialBackoffWait(i)): + case <-ctx.Done(): + return + } + } + }() + return attemptCh +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, + 520: {}, // It is used by Cloudflare as a catch-all response for when the origin server sends something unexpected. + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} + +// For now, all http Do() requests are retriable except some well defined errors +func isRequestErrorRetryable(ctx context.Context, err error) bool { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + // Retry if internal timeout in the HTTP call. + return ctx.Err() == nil + } + if ue, ok := err.(*url.Error); ok { + e := ue.Unwrap() + switch e.(type) { + // x509: certificate signed by unknown authority + case x509.UnknownAuthorityError: + return false + } + switch e.Error() { + case "http: server gave HTTP response to HTTPS client": + return false + } + } + return true +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go new file mode 100644 index 000000000..01cee8a19 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-endpoints.go @@ -0,0 +1,175 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +type awsS3Endpoint struct { + endpoint string + dualstackEndpoint string +} + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]awsS3Endpoint{ + "us-east-1": { + "s3.us-east-1.amazonaws.com", + "s3.dualstack.us-east-1.amazonaws.com", + }, + "us-east-2": { + "s3.us-east-2.amazonaws.com", + "s3.dualstack.us-east-2.amazonaws.com", + }, + "us-west-2": { + "s3.us-west-2.amazonaws.com", + "s3.dualstack.us-west-2.amazonaws.com", + }, + "us-west-1": { + "s3.us-west-1.amazonaws.com", + "s3.dualstack.us-west-1.amazonaws.com", + }, + "ca-central-1": { + "s3.ca-central-1.amazonaws.com", + "s3.dualstack.ca-central-1.amazonaws.com", + }, + "ca-west-1": { + "s3.ca-west-1.amazonaws.com", + "s3.dualstack.ca-west-1.amazonaws.com", + }, + "eu-west-1": { + "s3.eu-west-1.amazonaws.com", + "s3.dualstack.eu-west-1.amazonaws.com", + }, + "eu-west-2": { + "s3.eu-west-2.amazonaws.com", + "s3.dualstack.eu-west-2.amazonaws.com", + }, + "eu-west-3": { + "s3.eu-west-3.amazonaws.com", + "s3.dualstack.eu-west-3.amazonaws.com", + }, + "eu-central-1": { + "s3.eu-central-1.amazonaws.com", + "s3.dualstack.eu-central-1.amazonaws.com", + }, + "eu-central-2": { + "s3.eu-central-2.amazonaws.com", + "s3.dualstack.eu-central-2.amazonaws.com", + }, + "eu-north-1": { + "s3.eu-north-1.amazonaws.com", + "s3.dualstack.eu-north-1.amazonaws.com", + }, + "eu-south-1": { + "s3.eu-south-1.amazonaws.com", + "s3.dualstack.eu-south-1.amazonaws.com", + }, + "eu-south-2": { + "s3.eu-south-2.amazonaws.com", + "s3.dualstack.eu-south-2.amazonaws.com", + }, + "ap-east-1": { + "s3.ap-east-1.amazonaws.com", + "s3.dualstack.ap-east-1.amazonaws.com", + }, + "ap-south-1": { + "s3.ap-south-1.amazonaws.com", + "s3.dualstack.ap-south-1.amazonaws.com", + }, + "ap-south-2": { + "s3.ap-south-2.amazonaws.com", + "s3.dualstack.ap-south-2.amazonaws.com", + }, + "ap-southeast-1": { + "s3.ap-southeast-1.amazonaws.com", + "s3.dualstack.ap-southeast-1.amazonaws.com", + }, + "ap-southeast-2": { + "s3.ap-southeast-2.amazonaws.com", + "s3.dualstack.ap-southeast-2.amazonaws.com", + }, + "ap-southeast-3": { + "s3.ap-southeast-3.amazonaws.com", + "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + "ap-southeast-4": { + "s3.ap-southeast-4.amazonaws.com", + "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + "ap-northeast-1": { + "s3.ap-northeast-1.amazonaws.com", + "s3.dualstack.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": { + "s3.ap-northeast-2.amazonaws.com", + "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + "ap-northeast-3": { + "s3.ap-northeast-3.amazonaws.com", + "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + "af-south-1": { + "s3.af-south-1.amazonaws.com", + "s3.dualstack.af-south-1.amazonaws.com", + }, + "me-central-1": { + "s3.me-central-1.amazonaws.com", + "s3.dualstack.me-central-1.amazonaws.com", + }, + "me-south-1": { + "s3.me-south-1.amazonaws.com", + "s3.dualstack.me-south-1.amazonaws.com", + }, + "sa-east-1": { + "s3.sa-east-1.amazonaws.com", + "s3.dualstack.sa-east-1.amazonaws.com", + }, + "us-gov-west-1": { + "s3.us-gov-west-1.amazonaws.com", + "s3.dualstack.us-gov-west-1.amazonaws.com", + }, + "us-gov-east-1": { + "s3.us-gov-east-1.amazonaws.com", + "s3.dualstack.us-gov-east-1.amazonaws.com", + }, + "cn-north-1": { + "s3.cn-north-1.amazonaws.com.cn", + "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + "cn-northwest-1": { + "s3.cn-northwest-1.amazonaws.com.cn", + "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + "il-central-1": { + "s3.il-central-1.amazonaws.com", + "s3.dualstack.il-central-1.amazonaws.com", + }, +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string, useDualstack bool) (endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.us-east-1.amazonaws.com' endpoint. + if useDualstack { + return "s3.dualstack.us-east-1.amazonaws.com" + } + return "s3.us-east-1.amazonaws.com" + } + if useDualstack { + return s3Endpoint.dualstackEndpoint + } + return s3Endpoint.endpoint +} diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go new file mode 100644 index 000000000..f7fad19f6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/s3-error.go @@ -0,0 +1,62 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/v7/transport.go b/vendor/github.com/minio/minio-go/v7/transport.go new file mode 100644 index 000000000..1bff66462 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/transport.go @@ -0,0 +1,83 @@ +//go:build go1.7 || go1.8 +// +build go1.7 go1.8 + +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/tls" + "crypto/x509" + "net" + "net/http" + "os" + "time" +) + +// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) +func mustGetSystemCertPool() *x509.CertPool { + pool, err := x509.SystemCertPool() + if err != nil { + return x509.NewCertPool() + } + return pool +} + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport = func(secure bool) (*http.Transport, error) { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + MaxIdleConns: 256, + MaxIdleConnsPerHost: 16, + ResponseHeaderTimeout: time.Minute, + IdleConnTimeout: time.Minute, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 10 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, + } + + if secure { + tr.TLSClientConfig = &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + } + if f := os.Getenv("SSL_CERT_FILE"); f != "" { + rootCAs := mustGetSystemCertPool() + data, err := os.ReadFile(f) + if err == nil { + rootCAs.AppendCertsFromPEM(data) + } + tr.TLSClientConfig.RootCAs = rootCAs + } + } + return tr, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go new file mode 100644 index 000000000..a5beb371f --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -0,0 +1,700 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "crypto/md5" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "fmt" + "hash" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "time" + + md5simd "github.com/minio/md5-simd" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +func trimEtag(etag string) string { + etag = strings.TrimPrefix(etag, "\"") + return strings.TrimSuffix(etag, "\"") +} + +var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) + +func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { + if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { + expTime, err := parseRFC7231Time(matches[1]) + if err != nil { + return time.Time{}, "" + } + return expTime, matches[2] + } + return time.Time{}, "" +} + +var restoreRegex = regexp.MustCompile(`ongoing-request="(.*?)"(, expiry-date="(.*?)")?`) + +func amzRestoreToStruct(restore string) (ongoing bool, expTime time.Time, err error) { + matches := restoreRegex.FindStringSubmatch(restore) + if len(matches) != 4 { + return false, time.Time{}, errors.New("unexpected restore header") + } + ongoing, err = strconv.ParseBool(matches[1]) + if err != nil { + return false, time.Time{}, err + } + if matches[3] != "" { + expTime, err = parseRFC7231Time(matches[3]) + if err != nil { + return false, time.Time{}, err + } + } + return +} + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := newSHA256Hasher() + defer hash.Close() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := newMd5Hasher() + defer hash.Close() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return errInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return errInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + host := endpointURL.Hostname() + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpointURL.Host + " does not follow ip address or domain name standards." + return errInvalidArgument(msg) + } + + if strings.Contains(host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return errInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return errInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + preserveKeys := []string{ + "Content-Type", + "Cache-Control", + "Content-Encoding", + "Content-Language", + "Content-Disposition", + "X-Amz-Storage-Class", + "X-Amz-Object-Lock-Mode", + "X-Amz-Object-Lock-Retain-Until-Date", + "X-Amz-Object-Lock-Legal-Hold", + "X-Amz-Website-Redirect-Location", + "X-Amz-Server-Side-Encryption", + "X-Amz-Tagging-Count", + "X-Amz-Meta-", + // Add new headers to be preserved. + // if you add new headers here, please extend + // PutObjectOptions{} to preserve them + // upon upload as well. + } + filteredHeader := make(http.Header) + for k, v := range header { + var found bool + for _, prefix := range preserveKeys { + if !strings.HasPrefix(k, prefix) { + continue + } + found = true + break + } + if found { + filteredHeader[k] = v + } + } + return filteredHeader +} + +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + rfc822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +func parseTime(t string, formats ...string) (time.Time, error) { + for _, format := range formats { + tt, err := time.Parse(format, t) + if err == nil { + return tt, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse %s in any of the input formats: %s", t, formats) +} + +func parseRFC7231Time(lastModified string) (time.Time, error) { + return parseTime(lastModified, rfc822TimeFormat, rfc822TimeFormatSingleDigitDay, rfc822TimeFormatSingleDigitDayTwoDigitYear) +} + +// ToObjectInfo converts http header values into ObjectInfo type, +// extracts metadata and fills in all the necessary fields in ObjectInfo. +func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, error) { + var err error + // Trim off the odd double quotes from ETag in the beginning and end. + etag := trimEtag(h.Get("ETag")) + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + mtime, err := parseRFC7231Time(h.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(h.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + expiryStr := h.Get("Expires") + var expiry time.Time + if expiryStr != "" { + expiry, err = parseRFC7231Time(expiryStr) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("'Expiry' is not in supported format: %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + metadata := extractObjMetadata(h) + userMetadata := make(map[string]string) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-") { + userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] + } + } + userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) + + var tagCount int + if count := h.Get(amzTaggingCount); count != "" { + tagCount, err = strconv.Atoi(count) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), + BucketName: bucketName, + Key: objectName, + RequestID: h.Get("x-amz-request-id"), + HostID: h.Get("x-amz-id-2"), + Region: h.Get("x-amz-bucket-region"), + } + } + } + + // Nil if not found + var restore *RestoreInfo + if restoreHdr := h.Get(amzRestore); restoreHdr != "" { + ongoing, expTime, err := amzRestoreToStruct(restoreHdr) + if err != nil { + return ObjectInfo{}, err + } + restore = &RestoreInfo{OngoingRestore: ongoing, ExpiryTime: expTime} + } + + // extract lifecycle expiry date and rule ID + expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) + + deleteMarker := h.Get(amzDeleteMarker) == "true" + + // Save object metadata info. + return ObjectInfo{ + ETag: etag, + Key: objectName, + Size: size, + LastModified: mtime, + ContentType: contentType, + Expires: expiry, + VersionID: h.Get(amzVersionID), + IsDeleteMarker: deleteMarker, + ReplicationStatus: h.Get(amzReplicationStatus), + Expiration: expTime, + ExpirationRuleID: ruleID, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: metadata, + UserMetadata: userMetadata, + UserTags: userTags, + UserTagCount: tagCount, + Restore: restore, + + // Checksum values + ChecksumCRC32: h.Get("x-amz-checksum-crc32"), + ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), + ChecksumSHA1: h.Get("x-amz-checksum-sha1"), + ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + }, nil +} + +var readFull = func(r io.Reader, buf []byte) (n int, err error) { + // ReadFull reads exactly len(buf) bytes from r into buf. + // It returns the number of bytes copied and an error if + // fewer bytes were read. The error is EOF only if no bytes + // were read. If an EOF happens after reading some but not + // all the bytes, ReadFull returns ErrUnexpectedEOF. + // On return, n == len(buf) if and only if err == nil. + // If r returns an error having read at least len(buf) bytes, + // the error is dropped. + for n < len(buf) && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + // Some spurious io.Reader's return + // io.ErrUnexpectedEOF when nn == 0 + // this behavior is undocumented + // so we are on purpose not using io.ReadFull + // implementation because this can lead + // to custom handling, to avoid that + // we simply modify the original io.ReadFull + // implementation to avoid this issue. + // io.ErrUnexpectedEOF with nn == 0 really + // means that io.EOF + if err == io.ErrUnexpectedEOF && nn == 0 { + err = io.EOF + } + n += nn + } + if n >= len(buf) { + err = nil + } else if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + // Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = map[string]bool{ + "content-type": true, + "cache-control": true, + "content-encoding": true, + "content-disposition": true, + "content-language": true, + "x-amz-website-redirect-location": true, + "x-amz-object-lock-mode": true, + "x-amz-metadata-directive": true, + "x-amz-object-lock-retain-until-date": true, + "expires": true, + "x-amz-replication-status": true, + // Add more supported headers here. + // Must be lower case. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.EqualFold(amzStorageClass, headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + return supportedHeaders[strings.ToLower(headerKey)] +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = map[string]bool{ + "x-amz-server-side-encryption": true, + "x-amz-server-side-encryption-aws-kms-key-id": true, + "x-amz-server-side-encryption-context": true, + "x-amz-server-side-encryption-customer-algorithm": true, + "x-amz-server-side-encryption-customer-key": true, + "x-amz-server-side-encryption-customer-key-md5": true, + // Add more supported headers here. + // Must be lower case. +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + return sseHeaders[strings.ToLower(headerKey)] +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) || strings.HasPrefix(key, "x-amz-checksum-") +} + +// isMinioHeader returns true if header is x-minio- header. +func isMinioHeader(headerKey string) bool { + return strings.HasPrefix(strings.ToLower(headerKey), "x-minio-") +} + +// supportedQueryValues is a list of query strings that can be passed in when using GetObject. +var supportedQueryValues = map[string]bool{ + "attributes": true, + "partNumber": true, + "versionId": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "response-content-language": true, + "response-content-type": true, + "response-expires": true, +} + +// isStandardQueryValue will return true when the passed in query string parameter is supported rather than customized. +func isStandardQueryValue(qsKey string) bool { + return supportedQueryValues[qsKey] +} + +// Per documentation at https://docs.aws.amazon.com/AmazonS3/latest/userguide/LogFormat.html#LogFormatCustom, the +// set of query params starting with "x-" are ignored by S3. +const allowedCustomQueryPrefix = "x-" + +func isCustomQueryValue(qsKey string) bool { + return strings.HasPrefix(qsKey, allowedCustomQueryPrefix) +} + +var ( + md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} + sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} +) + +func newMd5Hasher() md5simd.Hasher { + return &hashWrapper{Hash: md5Pool.Get().(hash.Hash), isMD5: true} +} + +func newSHA256Hasher() md5simd.Hasher { + return &hashWrapper{Hash: sha256Pool.Get().(hash.Hash), isSHA256: true} +} + +// hashWrapper implements the md5simd.Hasher interface. +type hashWrapper struct { + hash.Hash + isMD5 bool + isSHA256 bool +} + +// Close will put the hasher back into the pool. +func (m *hashWrapper) Close() { + if m.isMD5 && m.Hash != nil { + m.Reset() + md5Pool.Put(m.Hash) + } + if m.isSHA256 && m.Hash != nil { + m.Reset() + sha256Pool.Put(m.Hash) + } + m.Hash = nil +} + +const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + return prefix + string(b[0:30-len(prefix)]) +} + +// IsNetworkOrHostDown - if there was a network error or if the host is down. +// expectTimeouts indicates that *context* timeouts are expected and does not +// indicate a downed host. Other timeouts still returns down. +func IsNetworkOrHostDown(err error, expectTimeouts bool) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) { + return false + } + + if expectTimeouts && errors.Is(err, context.DeadlineExceeded) { + return false + } + + if errors.Is(err, context.DeadlineExceeded) { + return true + } + + // We need to figure if the error either a timeout + // or a non-temporary error. + urlErr := &url.Error{} + if errors.As(err, &urlErr) { + switch urlErr.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError, *tls.CertificateVerificationError: + return true + } + } + var e net.Error + if errors.As(err, &e) { + if e.Timeout() { + return true + } + } + + // Fallback to other mechanisms. + switch { + case strings.Contains(err.Error(), "Connection closed by foreign host"): + return true + case strings.Contains(err.Error(), "TLS handshake timeout"): + // If error is - tlsHandshakeTimeoutError. + return true + case strings.Contains(err.Error(), "i/o timeout"): + // If error is - tcp timeoutError. + return true + case strings.Contains(err.Error(), "connection timed out"): + // If err is a net.Dial timeout. + return true + case strings.Contains(err.Error(), "connection refused"): + // If err is connection refused + return true + case strings.Contains(err.Error(), "server gave HTTP response to HTTPS client"): + // If err is TLS client is used with HTTP server + return true + case strings.Contains(err.Error(), "Client sent an HTTP request to an HTTPS server"): + // If err is plain-text Client is used with a HTTPS server + return true + case strings.Contains(strings.ToLower(err.Error()), "503 service unavailable"): + // Denial errors + return true + } + return false +} + +// newHashReaderWrapper will hash all reads done through r. +// When r returns io.EOF the done function will be called with the sum. +func newHashReaderWrapper(r io.Reader, h hash.Hash, done func(hash []byte)) *hashReaderWrapper { + return &hashReaderWrapper{ + r: r, + h: h, + done: done, + } +} + +type hashReaderWrapper struct { + r io.Reader + h hash.Hash + done func(hash []byte) +} + +// Read implements the io.Reader interface. +func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { + n, err = h.r.Read(p) + if n > 0 { + n2, err := h.h.Write(p[:n]) + if err != nil { + return 0, err + } + if n2 != n { + return 0, io.ErrShortWrite + } + } + if err == io.EOF { + // Call back + h.done(h.h.Sum(nil)) + } + return n, err +} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 000000000..c75823490 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,96 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..0018dc7d9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..3a754ca72 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..1efb22ac3 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1540 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go new file mode 100644 index 000000000..c4510a695 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/health.go @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package operational + +import ( + "net" + "net/http" + "time" + + "github.com/heptiolabs/healthcheck" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/server" + log "github.com/sirupsen/logrus" +) + +func NewHealthServer(opts *config.Options, isAlive healthcheck.Check, isReady healthcheck.Check) *http.Server { + handler := healthcheck.NewHandler() + address := net.JoinHostPort(opts.Health.Address, opts.Health.Port) + handler.AddLivenessCheck("PipelineCheck", isAlive) + handler.AddReadinessCheck("PipelineCheck", isReady) + + server := server.Default(&http.Server{ + Handler: handler, + Addr: address, + }) + + go func() { + for { + err := server.ListenAndServe() + log.Errorf("http.ListenAndServe error %v", err) + time.Sleep(60 * time.Second) + } + }() + + return server +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go new file mode 100644 index 000000000..d0008aa3f --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/metrics.go @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package operational + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +type MetricDefinition struct { + Name string + Help string + Type metricType + Labels []string +} + +type metricType string + +const TypeCounter metricType = "counter" +const TypeGauge metricType = "gauge" +const TypeHistogram metricType = "histogram" +const TypeSummary metricType = "summary" + +var allMetrics = []MetricDefinition{} + +func DefineMetric(name, help string, t metricType, labels ...string) MetricDefinition { + def := MetricDefinition{ + Name: name, + Help: help, + Type: t, + Labels: labels, + } + allMetrics = append(allMetrics, def) + return def +} + +var ( + recordsWritten = DefineMetric( + "records_written", + "Number of output records written", + TypeCounter, + "stage", + ) + stageInQueueSize = DefineMetric( + "stage_in_queue_size", + "Pipeline stage input queue size (number of elements in queue)", + TypeGauge, + "stage", + ) + stageOutQueueSize = DefineMetric( + "stage_out_queue_size", + "Pipeline stage output queue size (number of elements in queue)", + TypeGauge, + "stage", + ) + stageDuration = DefineMetric( + "stage_duration_ms", + "Pipeline stage duration in milliseconds", + TypeHistogram, + "stage", + ) + indexerHit = DefineMetric( + "secondary_network_indexer_hit", + "Counter of hits per secondary network index for Kubernetes enrichment", + TypeCounter, + "kind", + "namespace", + "network", + "warning", + ) +) + +func (def *MetricDefinition) mapLabels(labels []string) prometheus.Labels { + if len(labels) != len(def.Labels) { + logrus.Errorf("Could not map labels, length differ in def %s [%v / %v]", def.Name, def.Labels, labels) + } + labelsMap := prometheus.Labels{} + for i, label := range labels { + labelsMap[def.Labels[i]] = label + } + return labelsMap +} + +func verifyMetricType(def *MetricDefinition, t metricType) { + if def.Type != t { + logrus.Panicf("operational metric %q is of type %q but is being registered as %q", def.Name, def.Type, t) + } +} + +type Metrics struct { + settings *config.MetricsSettings + stageDurationHisto *prometheus.HistogramVec +} + +func NewMetrics(settings *config.MetricsSettings) *Metrics { + return &Metrics{settings: settings} +} + +// register will register against the default registry. May panic or not depending on settings +func (o *Metrics) register(c prometheus.Collector, name string) { + err := prometheus.DefaultRegisterer.Register(c) + if err != nil { + var castErr prometheus.AlreadyRegisteredError + if errors.As(err, &castErr) { + logrus.Warningf("metrics registration error [%s]: %v", name, err) + } else if o.settings.NoPanic { + logrus.Errorf("metrics registration error [%s]: %v", name, err) + } else { + logrus.Panicf("metrics registration error [%s]: %v", name, err) + } + } +} + +func (o *Metrics) NewCounter(def *MetricDefinition, labels ...string) prometheus.Counter { + verifyMetricType(def, TypeCounter) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewCounter(prometheus.CounterOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + }) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewCounterVec(def *MetricDefinition) *prometheus.CounterVec { + verifyMetricType(def, TypeCounter) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: fullName, + Help: def.Help, + }, def.Labels) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewGauge(def *MetricDefinition, labels ...string) prometheus.Gauge { + verifyMetricType(def, TypeGauge) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + }) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewGaugeVec(def *MetricDefinition) *prometheus.GaugeVec { + verifyMetricType(def, TypeGauge) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: fullName, + Help: def.Help, + }, def.Labels) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewGaugeFunc(def *MetricDefinition, f func() float64, labels ...string) { + verifyMetricType(def, TypeGauge) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + }, f) + o.register(c, fullName) +} + +func (o *Metrics) NewHistogram(def *MetricDefinition, buckets []float64, labels ...string) prometheus.Histogram { + verifyMetricType(def, TypeHistogram) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: fullName, + Help: def.Help, + Buckets: buckets, + ConstLabels: def.mapLabels(labels), + }) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewHistogramVec(def *MetricDefinition, buckets []float64) *prometheus.HistogramVec { + verifyMetricType(def, TypeHistogram) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: fullName, + Help: def.Help, + Buckets: buckets, + }, def.Labels) + o.register(c, fullName) + return c +} + +func (o *Metrics) NewSummary(def *MetricDefinition, labels ...string) prometheus.Summary { + verifyMetricType(def, TypeSummary) + fullName := o.settings.Prefix + def.Name + c := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + // arbitrary objectives for now + Objectives: map[float64]float64{ + 0.5: 0.02, + 0.95: 0.01, + }, + }) + o.register(c, fullName) + return c +} + +func (o *Metrics) CreateRecordsWrittenCounter(stage string) prometheus.Counter { + return o.NewCounter(&recordsWritten, stage) +} + +func (o *Metrics) CreateInQueueSizeGauge(stage string, f func() int) { + o.NewGaugeFunc(&stageInQueueSize, func() float64 { return float64(f()) }, stage) +} + +func (o *Metrics) CreateOutQueueSizeGauge(stage string, f func() int) { + o.NewGaugeFunc(&stageOutQueueSize, func() float64 { return float64(f()) }, stage) +} + +func (o *Metrics) GetOrCreateStageDurationHisto() *prometheus.HistogramVec { + if o.stageDurationHisto == nil { + o.stageDurationHisto = o.NewHistogramVec(&stageDuration, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000}) + } + return o.stageDurationHisto +} + +func (o *Metrics) CreateIndexerHitCounter() *prometheus.CounterVec { + return o.NewCounterVec(&indexerHit) +} + +func GetDocumentation() string { + doc := "" + sort.Slice(allMetrics, func(i, j int) bool { + return allMetrics[i].Name < allMetrics[j].Name + }) + for _, opts := range allMetrics { + doc += fmt.Sprintf( + ` +### %s +| **Name** | %s | +|:---|:---| +| **Description** | %s | +| **Type** | %s | +| **Labels** | %s | + +`, + opts.Name, + opts.Name, + opts.Help, + opts.Type, + strings.Join(opts.Labels, ", "), + ) + } + + return doc +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go new file mode 100644 index 000000000..6862326b8 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/operational/timer.go @@ -0,0 +1,51 @@ +package operational + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type Timer struct { + startTime *time.Time + observer prometheus.Observer +} + +func NewTimer(o prometheus.Observer) *Timer { + return &Timer{ + observer: o, + } +} + +// Start starts or restarts the timer, regardless if a previous call occurred without being observed first +func (t *Timer) Start() time.Time { + now := time.Now() + t.startTime = &now + return now +} + +// StartOnce starts the timer just the first time. Subsequent calls will be ignored, +// until the timer is observed +func (t *Timer) StartOnce() time.Time { + if t.startTime == nil { + now := time.Now() + t.startTime = &now + } + return *t.startTime +} + +func (t *Timer) ObserveMilliseconds() { + t.observe(func(d time.Duration) float64 { return float64(d.Milliseconds()) }) +} + +func (t *Timer) ObserveSeconds() { + t.observe(time.Duration.Seconds) +} + +func (t *Timer) observe(f func(d time.Duration) float64) { + if t.startTime != nil { + duration := time.Since(*t.startTime) + t.observer.Observe(f(duration)) + t.startTime = nil + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go new file mode 100644 index 000000000..d85511e61 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode.go @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package decode + +import ( + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/netobserv-ebpf-agent/pkg/decode" +) + +type Decoder interface { + Decode(in []byte) (config.GenericMap, error) +} + +func GetDecoder(params api.Decoder) (Decoder, error) { + switch params.Type { + case api.DecoderJSON: + return NewDecodeJSON() + case api.DecoderProtobuf: + return decode.NewProtobuf() + } + panic(fmt.Sprintf("`decode` type %s not defined", params.Type)) +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go new file mode 100644 index 000000000..c3b7481ab --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode/decode_json.go @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package decode + +import ( + "encoding/json" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + log "github.com/sirupsen/logrus" +) + +//nolint:revive +type DecodeJSON struct { +} + +// Decode decodes input strings to a list of flow entries +func (c *DecodeJSON) Decode(line []byte) (config.GenericMap, error) { + + if log.IsLevelEnabled(log.DebugLevel) { + log.Debugf("decodeJSON: line = %v", string(line)) + } + var decodedLine map[string]interface{} + if err := json.Unmarshal(line, &decodedLine); err != nil { + return nil, err + } + decodedLine2 := make(config.GenericMap, len(decodedLine)) + // flows directly ingested by flp-transformer won't have this field, so we need to add it + // here. If the received line already contains the field, it will be overridden later + decodedLine2["TimeReceived"] = time.Now().Unix() + for k, v := range decodedLine { + if v == nil { + continue + } + decodedLine2[k] = v + } + return decodedLine2, nil +} + +// NewDecodeJSON create a new decode +func NewDecodeJSON() (Decoder, error) { + log.Debugf("entering NewDecodeJSON") + return &DecodeJSON{}, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go new file mode 100644 index 000000000..e754ed1d4 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode.go @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy ofthe License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specificlanguage governing permissions and + * limitations under the License. + * + */ + +package encode + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" + log "github.com/sirupsen/logrus" +) + +type encodeNone struct { + prevRecord config.GenericMap +} + +type Encoder interface { + Encode(in config.GenericMap) + Update(config.StageParam) +} + +// Encode encodes a flow before being stored +func (t *encodeNone) Encode(in config.GenericMap) { + t.prevRecord = in +} + +func (t *encodeNone) Update(_ config.StageParam) { + log.Warn("Encode None, update not supported") +} + +// NewEncodeNone create a new encode +func NewEncodeNone() (Encoder, error) { + log.Debugf("entering NewEncodeNone") + return &encodeNone{}, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go new file mode 100644 index 000000000..2ead32be6 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_kafka.go @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encode + +import ( + "encoding/json" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/prometheus/client_golang/prometheus" + kafkago "github.com/segmentio/kafka-go" + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +const ( + defaultReadTimeoutSeconds = int64(10) + defaultWriteTimeoutSeconds = int64(10) +) + +type kafkaWriteMessage interface { + WriteMessages(ctx context.Context, msgs ...kafkago.Message) error +} + +type encodeKafka struct { + kafkaParams api.EncodeKafka + kafkaWriter kafkaWriteMessage + recordsWritten prometheus.Counter +} + +// Encode writes entries to kafka topic +func (r *encodeKafka) Encode(entry config.GenericMap) { + var entryByteArray []byte + var err error + entryByteArray, err = json.Marshal(entry) + if err != nil { + log.Errorf("encodeKafka error: %v", err) + return + } + msg := kafkago.Message{ + Value: entryByteArray, + } + err = r.kafkaWriter.WriteMessages(context.Background(), msg) + if err != nil { + log.Errorf("encodeKafka error: %v", err) + } else { + r.recordsWritten.Inc() + } +} + +func (r *encodeKafka) Update(_ config.StageParam) { + log.Warn("Encode Kafka, update not supported") +} + +// NewEncodeKafka create a new writer to kafka +func NewEncodeKafka(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) { + log.Debugf("entering NewEncodeKafka") + config := api.EncodeKafka{} + if params.Encode != nil && params.Encode.Kafka != nil { + config = *params.Encode.Kafka + } + + var balancer kafkago.Balancer + switch config.Balancer { + case api.KafkaRoundRobin: + balancer = &kafkago.RoundRobin{} + case api.KafkaLeastBytes: + balancer = &kafkago.LeastBytes{} + case api.KafkaHash: + balancer = &kafkago.Hash{} + case api.KafkaCrc32: + balancer = &kafkago.CRC32Balancer{} + case api.KafkaMurmur2: + balancer = &kafkago.Murmur2Balancer{} + default: + balancer = nil + } + + readTimeoutSecs := defaultReadTimeoutSeconds + if config.ReadTimeout != 0 { + readTimeoutSecs = config.ReadTimeout + } + + writeTimeoutSecs := defaultWriteTimeoutSeconds + if config.WriteTimeout != 0 { + writeTimeoutSecs = config.WriteTimeout + } + + transport := kafkago.Transport{} + if config.TLS != nil { + log.Infof("Using TLS configuration: %v", config.TLS) + tlsConfig, err := config.TLS.Build() + if err != nil { + return nil, err + } + transport.TLS = tlsConfig + } + + if config.SASL != nil { + m, err := utils.SetupSASLMechanism(config.SASL) + if err != nil { + return nil, err + } + transport.SASL = m + } + + // connect to the kafka server + kafkaWriter := kafkago.Writer{ + Addr: kafkago.TCP(config.Address), + Topic: config.Topic, + Balancer: balancer, + ReadTimeout: time.Duration(readTimeoutSecs) * time.Second, + WriteTimeout: time.Duration(writeTimeoutSecs) * time.Second, + BatchSize: config.BatchSize, + BatchBytes: config.BatchBytes, + // Temporary fix may be we should implement a batching systems + // https://github.com/segmentio/kafka-go/issues/326#issuecomment-519375403 + BatchTimeout: time.Nanosecond, + Transport: &transport, + } + + return &encodeKafka{ + kafkaParams: config, + kafkaWriter: &kafkaWriter, + recordsWritten: opMetrics.CreateRecordsWrittenCounter(params.Name), + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go new file mode 100644 index 000000000..dbf974991 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_prom.go @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encode + +import ( + "reflect" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics" + promserver "github.com/netobserv/flowlogs-pipeline/pkg/prometheus" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +var plog = logrus.WithField("component", "encode.Prometheus") + +const defaultExpiryTime = time.Duration(2 * time.Minute) + +// nolint:revive +type EncodeProm struct { + cfg *api.PromEncode + registerer prometheus.Registerer + metricCommon *MetricsCommonStruct + updateChan chan config.StageParam + server *promserver.PromServer + regName string +} + +func (e *EncodeProm) Gatherer() prometheus.Gatherer { + return e.server +} + +// Encode encodes a metric before being stored; the heavy work is done by the MetricCommonEncode +func (e *EncodeProm) Encode(metricRecord config.GenericMap) { + plog.Tracef("entering EncodeMetric. metricRecord = %v", metricRecord) + e.metricCommon.MetricCommonEncode(e, metricRecord) + e.checkConfUpdate() +} + +func (e *EncodeProm) ProcessCounter(m interface{}, labels map[string]string, value float64) error { + counter := m.(*prometheus.CounterVec) + mm, err := counter.GetMetricWith(labels) + if err != nil { + return err + } + mm.Add(value) + return nil +} + +func (e *EncodeProm) ProcessGauge(m interface{}, labels map[string]string, value float64, _ string) error { + gauge := m.(*prometheus.GaugeVec) + mm, err := gauge.GetMetricWith(labels) + if err != nil { + return err + } + mm.Set(value) + return nil +} + +func (e *EncodeProm) ProcessHist(m interface{}, labels map[string]string, value float64) error { + hist := m.(*prometheus.HistogramVec) + mm, err := hist.GetMetricWith(labels) + if err != nil { + return err + } + mm.Observe(value) + return nil +} + +func (e *EncodeProm) ProcessAggHist(m interface{}, labels map[string]string, values []float64) error { + hist := m.(*prometheus.HistogramVec) + mm, err := hist.GetMetricWith(labels) + if err != nil { + return err + } + for _, v := range values { + mm.Observe(v) + } + return nil +} + +func (e *EncodeProm) GetChacheEntry(entryLabels map[string]string, m interface{}) interface{} { + switch mv := m.(type) { + case *prometheus.CounterVec: + return func() { mv.Delete(entryLabels) } + case *prometheus.GaugeVec: + return func() { mv.Delete(entryLabels) } + case *prometheus.HistogramVec: + return func() { mv.Delete(entryLabels) } + } + return nil +} + +// callback function from lru cleanup +func (e *EncodeProm) Cleanup(cleanupFunc interface{}) { + cleanupFunc.(func())() +} + +func (e *EncodeProm) addCounter(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector { + counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels()) + e.metricCommon.AddCounter(fullMetricName, counter, mInfo) + return counter +} + +func (e *EncodeProm) addGauge(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector { + gauge := prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels()) + e.metricCommon.AddGauge(fullMetricName, gauge, mInfo) + return gauge +} + +func (e *EncodeProm) addHistogram(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector { + histogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels()) + e.metricCommon.AddHist(fullMetricName, histogram, mInfo) + return histogram +} + +func (e *EncodeProm) addAgghistogram(fullMetricName string, mInfo *metrics.Preprocessed) prometheus.Collector { + agghistogram := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: fullMetricName, Help: ""}, mInfo.TargetLabels()) + e.metricCommon.AddAggHist(fullMetricName, agghistogram, mInfo) + return agghistogram +} + +func (e *EncodeProm) unregisterMetric(c interface{}) { + if c, ok := c.(prometheus.Collector); ok { + e.registerer.Unregister(c) + } +} + +func (e *EncodeProm) cleanDeletedGeneric(newCfg api.PromEncode, metrics map[string]mInfoStruct) { + for fullName, m := range metrics { + if !strings.HasPrefix(fullName, newCfg.Prefix) { + if c, ok := m.genericMetric.(prometheus.Collector); ok { + e.registerer.Unregister(c) + } + e.unregisterMetric(m.genericMetric) + delete(metrics, fullName) + } + metricName := strings.TrimPrefix(fullName, newCfg.Prefix) + found := false + for i := range newCfg.Metrics { + if metricName == newCfg.Metrics[i].Name { + found = true + break + } + } + if !found { + e.unregisterMetric(m.genericMetric) + delete(metrics, fullName) + } + } +} + +func (e *EncodeProm) cleanDeletedMetrics(newCfg api.PromEncode) { + e.cleanDeletedGeneric(newCfg, e.metricCommon.counters) + e.cleanDeletedGeneric(newCfg, e.metricCommon.gauges) + e.cleanDeletedGeneric(newCfg, e.metricCommon.histos) + e.cleanDeletedGeneric(newCfg, e.metricCommon.aggHistos) +} + +// returns true if a registry restart is needed +func (e *EncodeProm) checkMetricUpdate(prefix string, apiItem *api.MetricsItem, store map[string]mInfoStruct, createMetric func(string, *metrics.Preprocessed) prometheus.Collector) bool { + fullMetricName := prefix + apiItem.Name + plog.Debugf("Checking metric: %s", fullMetricName) + mInfo := metrics.Preprocess(apiItem) + if oldMetric, ok := store[fullMetricName]; ok { + if !reflect.DeepEqual(mInfo.TargetLabels(), oldMetric.info.TargetLabels()) { + plog.Debug("Changes detected in labels") + return true + } + if !reflect.DeepEqual(mInfo.MetricsItem, oldMetric.info.MetricsItem) { + plog.Debug("Changes detected: unregistering and replacing") + e.unregisterMetric(oldMetric.genericMetric) + c := createMetric(fullMetricName, mInfo) + err := e.registerer.Register(c) + if err != nil { + plog.Errorf("error in prometheus.Register: %v", err) + } + } else { + plog.Debug("No changes found") + } + } else { + plog.Debug("New metric") + c := createMetric(fullMetricName, mInfo) + err := e.registerer.Register(c) + if err != nil { + plog.Errorf("error in prometheus.Register: %v", err) + } + } + return false +} + +func (e *EncodeProm) checkConfUpdate() { + select { + case stage := <-e.updateChan: + cfg := api.PromEncode{} + if stage.Encode != nil && stage.Encode.Prom != nil { + cfg = *stage.Encode.Prom + } + plog.Infof("Received config update: %v", cfg) + + e.cleanDeletedMetrics(cfg) + + needNewRegistry := false + for i := range cfg.Metrics { + switch cfg.Metrics[i].Type { + case api.MetricCounter: + needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.counters, e.addCounter) + case api.MetricGauge: + needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.gauges, e.addGauge) + case api.MetricHistogram: + needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.histos, e.addHistogram) + case api.MetricAggHistogram: + needNewRegistry = e.checkMetricUpdate(cfg.Prefix, &cfg.Metrics[i], e.metricCommon.aggHistos, e.addAgghistogram) + case "default": + plog.Errorf("invalid metric type = %v, skipping", cfg.Metrics[i].Type) + continue + } + if needNewRegistry { + break + } + } + e.cfg = &cfg + if needNewRegistry { + // cf https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.0/prometheus#Registerer.Unregister + plog.Info("Changes detected on labels: need registry reset.") + e.resetRegistry() + break + } + default: + // Nothing to do + return + } +} + +func (e *EncodeProm) resetRegistry() { + e.metricCommon.cleanupInfoStructs() + reg := prometheus.NewRegistry() + e.registerer = reg + for i := range e.cfg.Metrics { + mCfg := &e.cfg.Metrics[i] + fullMetricName := e.cfg.Prefix + mCfg.Name + mInfo := metrics.Preprocess(mCfg) + plog.Debugf("Create metric: %s, Labels: %v", fullMetricName, mInfo.TargetLabels()) + var m prometheus.Collector + switch mCfg.Type { + case api.MetricCounter: + m = e.addCounter(fullMetricName, mInfo) + case api.MetricGauge: + m = e.addGauge(fullMetricName, mInfo) + case api.MetricHistogram: + m = e.addHistogram(fullMetricName, mInfo) + case api.MetricAggHistogram: + m = e.addAgghistogram(fullMetricName, mInfo) + case "default": + plog.Errorf("invalid metric type = %v, skipping", mCfg.Type) + continue + } + if m != nil { + err := e.registerer.Register(m) + if err != nil { + plog.Errorf("error in prometheus.Register: %v", err) + } + } + } + e.server.SetRegistry(e.regName, reg) +} + +func NewEncodeProm(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) { + cfg := api.PromEncode{} + if params.Encode != nil && params.Encode.Prom != nil { + cfg = *params.Encode.Prom + } + + expiryTime := cfg.ExpiryTime + if expiryTime.Duration == 0 { + expiryTime.Duration = defaultExpiryTime + } + plog.Debugf("expiryTime = %v", expiryTime) + + registry := prometheus.NewRegistry() + + w := &EncodeProm{ + cfg: &cfg, + registerer: registry, + updateChan: make(chan config.StageParam), + server: promserver.SharedServer, + regName: params.Name, + } + + if cfg.PromConnectionInfo != nil { + // Start new server + w.server = promserver.StartServerAsync(cfg.PromConnectionInfo, params.Name, registry) + } + + metricCommon := NewMetricsCommonStruct(opMetrics, cfg.MaxMetrics, params.Name, expiryTime, w.Cleanup) + w.metricCommon = metricCommon + + // Init metrics + w.resetRegistry() + + return w, nil +} + +func (e *EncodeProm) Update(config config.StageParam) { + e.updateChan <- config +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go new file mode 100644 index 000000000..a1b476c77 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/encode_s3.go @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encode + +import ( + "bytes" + "encoding/json" + "fmt" + "sync" + "time" + + minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +const ( + flpS3Version = "v0.1" + defaultBatchSize = 10 +) + +var ( + defaultTimeOut = api.Duration{Duration: 60 * time.Second} +) + +type encodeS3 struct { + s3Params api.EncodeS3 + s3Writer s3WriteEntries + recordsWritten prometheus.Counter + pendingEntries []config.GenericMap + mutex *sync.Mutex + expiryTime time.Time + exitChan <-chan struct{} + streamID string + intervalStartTime time.Time + sequenceNumber int64 +} + +type s3WriteEntries interface { + putObject(bucket string, objectName string, object map[string]interface{}) error +} + +type encodeS3Writer struct { + s3Client *minio.Client + s3Params *api.EncodeS3 +} + +// The mutex must be held when calling writeObject +func (s *encodeS3) writeObject() error { + nLogs := len(s.pendingEntries) + if nLogs > s.s3Params.BatchSize { + nLogs = s.s3Params.BatchSize + } + now := time.Now() + object := s.GenerateStoreHeader(s.pendingEntries[0:nLogs], s.intervalStartTime, now) + year := fmt.Sprintf("%04d", now.Year()) + month := fmt.Sprintf("%02d", now.Month()) + day := fmt.Sprintf("%02d", now.Day()) + hour := fmt.Sprintf("%02d", now.Hour()) + seq := fmt.Sprintf("%08d", s.sequenceNumber) + objectName := s.s3Params.Account + "/year=" + year + "/month=" + month + "/day=" + day + "/hour=" + hour + "/stream-id=" + s.streamID + "/" + seq + log.Debugf("S3 writeObject: objectName = %s", objectName) + log.Debugf("S3 writeObject: object = %v", object) + s.pendingEntries = s.pendingEntries[nLogs:] + s.intervalStartTime = now + s.expiryTime = now.Add(s.s3Params.WriteTimeout.Duration) + s.sequenceNumber++ + // send object to object store + err := s.s3Writer.putObject(s.s3Params.Bucket, objectName, object) + if err != nil { + log.Errorf("error in writing object: %v", err) + } + return err +} + +func (s *encodeS3) GenerateStoreHeader(flows []config.GenericMap, startTime time.Time, endTime time.Time) map[string]interface{} { + object := make(map[string]interface{}) + // copy user defined keys from config to object header + for key, value := range s.s3Params.ObjectHeaderParameters { + object[key] = value + } + object["version"] = flpS3Version + object["capture_start_time"] = startTime.Format(time.RFC3339) + object["capture_end_time"] = endTime.Format(time.RFC3339) + object["number_of_flow_logs"] = len(flows) + object["flow_logs"] = flows + + return object +} + +func (s *encodeS3) Update(_ config.StageParam) { + log.Warn("Encode S3 Writer, update not supported") +} + +func (s *encodeS3) createObjectTimeoutLoop() { + log.Debugf("entering createObjectTimeoutLoop") + ticker := time.NewTicker(s.s3Params.WriteTimeout.Duration) + for { + select { + case <-s.exitChan: + log.Debugf("exiting createObjectTimeoutLoop because of signal") + return + case <-ticker.C: + now := time.Now() + log.Debugf("time now = %v, expiryTime = %v", now, s.expiryTime) + s.mutex.Lock() + _ = s.writeObject() + s.mutex.Unlock() + } + } +} + +// Encode queues entries to be sent to object store +func (s *encodeS3) Encode(entry config.GenericMap) { + log.Debugf("Encode S3, entry = %v", entry) + s.mutex.Lock() + defer s.mutex.Unlock() + s.pendingEntries = append(s.pendingEntries, entry) + s.recordsWritten.Inc() + if len(s.pendingEntries) >= s.s3Params.BatchSize { + _ = s.writeObject() + } +} + +// NewEncodeS3 creates a new writer to S3 +func NewEncodeS3(opMetrics *operational.Metrics, params config.StageParam) (Encoder, error) { + configParams := api.EncodeS3{} + if params.Encode != nil && params.Encode.S3 != nil { + configParams = *params.Encode.S3 + } + log.Debugf("NewEncodeS3, config = %v", configParams) + s3Writer := &encodeS3Writer{ + s3Params: &configParams, + } + if configParams.WriteTimeout.Duration == time.Duration(0) { + configParams.WriteTimeout = defaultTimeOut + } + if configParams.BatchSize == 0 { + configParams.BatchSize = defaultBatchSize + } + + s := &encodeS3{ + s3Params: configParams, + s3Writer: s3Writer, + recordsWritten: opMetrics.CreateRecordsWrittenCounter(params.Name), + pendingEntries: make([]config.GenericMap, 0), + expiryTime: time.Now().Add(configParams.WriteTimeout.Duration), + exitChan: utils.ExitChannel(), + streamID: time.Now().Format(time.RFC3339), + intervalStartTime: time.Now(), + mutex: &sync.Mutex{}, + } + go s.createObjectTimeoutLoop() + return s, nil +} + +func (e *encodeS3Writer) connectS3(config *api.EncodeS3) (*minio.Client, error) { + // Initialize s3 client object. + minioOptions := minio.Options{ + Creds: credentials.NewStaticV4(config.AccessKeyID, config.SecretAccessKey, ""), + Secure: config.Secure, + } + s3Client, err := minio.New(config.Endpoint, &minioOptions) + if err != nil { + log.Errorf("Error when creating S3 client: %v", err) + return nil, err + } + + found, err := s3Client.BucketExists(context.Background(), config.Bucket) + if err != nil { + log.Errorf("Error accessing S3 bucket: %v", err) + return nil, err + } + if found { + log.Infof("S3 Bucket %s found", config.Bucket) + } + log.Debugf("s3Client = %#v", s3Client) // s3Client is now setup + return s3Client, nil +} + +func (e *encodeS3Writer) putObject(bucket string, objectName string, object map[string]interface{}) error { + if e.s3Client == nil { + s3Client, err := e.connectS3(e.s3Params) + if s3Client == nil { + return err + } + e.s3Client = s3Client + } + b := new(bytes.Buffer) + err := json.NewEncoder(b).Encode(object) + if err != nil { + log.Errorf("error encoding object: %v", err) + return err + } + log.Debugf("encoded object = %v", b) + // TBD: add necessary headers such as authorization (token), gzip, md5, etc + uploadInfo, err := e.s3Client.PutObject(context.Background(), bucket, objectName, b, int64(b.Len()), minio.PutObjectOptions{ContentType: "application/octet-stream"}) + log.Debugf("uploadInfo = %v", uploadInfo) + return err +} + +func (e *encodeS3Writer) Update(_ config.StageParam) { + log.Warn("Encode S3 Writer, update not supported") +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go new file mode 100644 index 000000000..885d4ae2a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/filtering.go @@ -0,0 +1,28 @@ +package metrics + +import "github.com/netobserv/flowlogs-pipeline/pkg/config" + +func (p *Preprocessed) ApplyFilters(flow config.GenericMap, flatParts []config.GenericMap) (bool, []config.GenericMap) { + filteredParts := flatParts + for _, filter := range p.filters { + if filter.useFlat { + filteredParts = filter.filterFlatParts(filteredParts) + if len(filteredParts) == 0 { + return false, nil + } + } else if !filter.predicate(flow) { + return false, nil + } + } + return true, filteredParts +} + +func (pf *preprocessedFilter) filterFlatParts(flatParts []config.GenericMap) []config.GenericMap { + var filteredParts []config.GenericMap + for _, part := range flatParts { + if pf.predicate(part) { + filteredParts = append(filteredParts, part) + } + } + return filteredParts +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go new file mode 100644 index 000000000..e9f27ebbd --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/flattening.go @@ -0,0 +1,88 @@ +package metrics + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" +) + +func (p *Preprocessed) GenerateFlatParts(flow config.GenericMap) []config.GenericMap { + if len(p.MetricsItem.Flatten) == 0 { + return nil + } + // Want to generate sub-flows from {A=foo, B=[{B1=x, B2=y},{B1=z}], C=[foo,bar]} + // => {B>B1=x, B>B2=y, C=foo}, {B>B1=z, C=foo}, {B>B1=x, B>B2=y, C=bar}, {B>B1=z, C=bar} + var partsPerLabel [][]config.GenericMap + for _, fl := range p.MetricsItem.Flatten { + if anyVal, ok := flow[fl]; ok { + // Intermediate step to get: + // [{B>B1=x, B>B2=y}, {B>B1=z}], [C=foo, C=bar] + var partsForLabel []config.GenericMap + switch v := anyVal.(type) { + case []any: + prefix := fl + ">" + for _, vv := range v { + switch vvv := vv.(type) { + case config.GenericMap: + partsForLabel = append(partsForLabel, flattenNested(prefix, vvv)) + default: + partsForLabel = append(partsForLabel, config.GenericMap{fl: vv}) + } + } + case []config.GenericMap: + prefix := fl + ">" + for _, vv := range v { + partsForLabel = append(partsForLabel, flattenNested(prefix, vv)) + } + case []string: + for _, vv := range v { + partsForLabel = append(partsForLabel, config.GenericMap{fl: vv}) + } + } + if len(partsForLabel) > 0 { + partsPerLabel = append(partsPerLabel, partsForLabel) + } + } + } + return distribute(partsPerLabel) +} + +func distribute(allUnflat [][]config.GenericMap) []config.GenericMap { + // turn + // [{B>B1=x, B>B2=y}, {B>B1=z}], [{C=foo}, {C=bar}] + // into + // [{B>B1=x, B>B2=y, C=foo}, {B>B1=z, C=foo}, {B>B1=x, B>B2=y, C=bar}, {B>B1=z, C=bar}] + totalCard := 1 + for _, part := range allUnflat { + if len(part) > 1 { + totalCard *= len(part) + } + } + ret := make([]config.GenericMap, totalCard) + indexes := make([]int, len(allUnflat)) + for c := range ret { + ret[c] = config.GenericMap{} + incIndex := false + for i, part := range allUnflat { + index := indexes[i] + for k, v := range part[index] { + ret[c][k] = v + } + if !incIndex { + if index+1 == len(part) { + indexes[i] = 0 + } else { + indexes[i] = index + 1 + incIndex = true + } + } + } + } + return ret +} + +func flattenNested(prefix string, nested config.GenericMap) config.GenericMap { + subFlow := config.GenericMap{} + for k, v := range nested { + subFlow[prefix+k] = v + } + return subFlow +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go new file mode 100644 index 000000000..5aabc8311 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics/preprocess.go @@ -0,0 +1,91 @@ +package metrics + +import ( + "regexp" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/utils/filters" +) + +type Preprocessed struct { + *api.MetricsItem + filters []preprocessedFilter + MappedLabels []MappedLabel + FlattenedLabels []MappedLabel +} + +type MappedLabel struct { + Source string + Target string +} + +type preprocessedFilter struct { + predicate filters.Predicate + useFlat bool +} + +func (p *Preprocessed) TargetLabels() []string { + var targetLabels []string + for _, l := range p.FlattenedLabels { + targetLabels = append(targetLabels, l.Target) + } + for _, l := range p.MappedLabels { + targetLabels = append(targetLabels, l.Target) + } + return targetLabels +} + +func filterToPredicate(filter api.MetricsFilter) filters.Predicate { + switch filter.Type { + case api.MetricFilterEqual: + return filters.Equal(filter.Key, filter.Value, true) + case api.MetricFilterNotEqual: + return filters.NotEqual(filter.Key, filter.Value, true) + case api.MetricFilterPresence: + return filters.Presence(filter.Key) + case api.MetricFilterAbsence: + return filters.Absence(filter.Key) + case api.MetricFilterRegex: + r, _ := regexp.Compile(filter.Value) + return filters.Regex(filter.Key, r) + case api.MetricFilterNotRegex: + r, _ := regexp.Compile(filter.Value) + return filters.NotRegex(filter.Key, r) + } + // Default = Exact + return filters.Equal(filter.Key, filter.Value, true) +} + +func Preprocess(def *api.MetricsItem) *Preprocessed { + mi := Preprocessed{ + MetricsItem: def, + } + for _, l := range def.Labels { + ml := MappedLabel{Source: l, Target: l} + if as := def.Remap[l]; as != "" { + ml.Target = as + } + if mi.isFlattened(l) { + mi.FlattenedLabels = append(mi.FlattenedLabels, ml) + } else { + mi.MappedLabels = append(mi.MappedLabels, ml) + } + } + for _, f := range def.Filters { + mi.filters = append(mi.filters, preprocessedFilter{ + predicate: filterToPredicate(f), + useFlat: mi.isFlattened(f.Key), + }) + } + return &mi +} + +func (p *Preprocessed) isFlattened(fieldPath string) bool { + for _, flat := range p.Flatten { + if fieldPath == flat || strings.HasPrefix(fieldPath, flat+">") { + return true + } + } + return false +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go new file mode 100644 index 000000000..d420f1af8 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics_common.go @@ -0,0 +1,353 @@ +/* + * Copyright (C) 2024 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encode + +import ( + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics" + putils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" +) + +type mInfoStruct struct { + genericMetric interface{} // can be a counter, gauge, or histogram pointer + info *metrics.Preprocessed +} + +type MetricsCommonStruct struct { + gauges map[string]mInfoStruct + counters map[string]mInfoStruct + histos map[string]mInfoStruct + aggHistos map[string]mInfoStruct + mCache *putils.TimedCache + mChacheLenMetric prometheus.Gauge + metricsProcessed prometheus.Counter + metricsDropped prometheus.Counter + errorsCounter *prometheus.CounterVec + expiryTime time.Duration + exitChan <-chan struct{} +} + +type MetricsCommonInterface interface { + GetChacheEntry(entryLabels map[string]string, m interface{}) interface{} + ProcessCounter(m interface{}, labels map[string]string, value float64) error + ProcessGauge(m interface{}, labels map[string]string, value float64, key string) error + ProcessHist(m interface{}, labels map[string]string, value float64) error + ProcessAggHist(m interface{}, labels map[string]string, value []float64) error +} + +var ( + metricsProcessed = operational.DefineMetric( + "metrics_processed", + "Number of metrics processed", + operational.TypeCounter, + "stage", + ) + metricsDropped = operational.DefineMetric( + "metrics_dropped", + "Number of metrics dropped", + operational.TypeCounter, + "stage", + ) + encodePromErrors = operational.DefineMetric( + "encode_prom_errors", + "Total errors during metrics generation", + operational.TypeCounter, + "error", "metric", "key", + ) + mChacheLen = operational.DefineMetric( + "encode_prom_metrics_reported", + "Total number of prometheus metrics reported by this stage", + operational.TypeGauge, + "stage", + ) +) + +func (m *MetricsCommonStruct) AddCounter(name string, g interface{}, info *metrics.Preprocessed) { + mStruct := mInfoStruct{genericMetric: g, info: info} + m.counters[name] = mStruct +} + +func (m *MetricsCommonStruct) AddGauge(name string, g interface{}, info *metrics.Preprocessed) { + mStruct := mInfoStruct{genericMetric: g, info: info} + m.gauges[name] = mStruct +} + +func (m *MetricsCommonStruct) AddHist(name string, g interface{}, info *metrics.Preprocessed) { + mStruct := mInfoStruct{genericMetric: g, info: info} + m.histos[name] = mStruct +} + +func (m *MetricsCommonStruct) AddAggHist(name string, g interface{}, info *metrics.Preprocessed) { + mStruct := mInfoStruct{genericMetric: g, info: info} + m.aggHistos[name] = mStruct +} + +func (m *MetricsCommonStruct) MetricCommonEncode(mci MetricsCommonInterface, metricRecord config.GenericMap) { + log.Tracef("entering MetricCommonEncode. metricRecord = %v", metricRecord) + + // Process counters + for _, mInfo := range m.counters { + labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric) + if labelSets == nil { + continue + } + for _, labels := range labelSets { + err := mci.ProcessCounter(mInfo.genericMetric, labels.lMap, value) + if err != nil { + log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err) + m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc() + continue + } + m.metricsProcessed.Inc() + } + } + + // Process gauges + for _, mInfo := range m.gauges { + labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric) + if labelSets == nil { + continue + } + for _, labels := range labelSets { + err := mci.ProcessGauge(mInfo.genericMetric, labels.lMap, value, labels.key) + if err != nil { + log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err) + m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc() + continue + } + m.metricsProcessed.Inc() + } + } + + // Process histograms + for _, mInfo := range m.histos { + labelSets, value := m.prepareMetric(mci, metricRecord, mInfo.info, mInfo.genericMetric) + if labelSets == nil { + continue + } + for _, labels := range labelSets { + err := mci.ProcessHist(mInfo.genericMetric, labels.lMap, value) + if err != nil { + log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err) + m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc() + continue + } + m.metricsProcessed.Inc() + } + } + + // Process pre-aggregated histograms + for _, mInfo := range m.aggHistos { + labelSets, values := m.prepareAggHisto(mci, metricRecord, mInfo.info, mInfo.genericMetric) + if labelSets == nil { + continue + } + for _, labels := range labelSets { + err := mci.ProcessAggHist(mInfo.genericMetric, labels.lMap, values) + if err != nil { + log.Errorf("labels registering error on %s: %v", mInfo.info.Name, err) + m.errorsCounter.WithLabelValues("LabelsRegisteringError", mInfo.info.Name, "").Inc() + continue + } + m.metricsProcessed.Inc() + } + } +} + +func (m *MetricsCommonStruct) prepareMetric(mci MetricsCommonInterface, flow config.GenericMap, info *metrics.Preprocessed, mv interface{}) ([]labelsKeyAndMap, float64) { + flatParts := info.GenerateFlatParts(flow) + ok, flatParts := info.ApplyFilters(flow, flatParts) + if !ok { + return nil, 0 + } + + val := m.extractGenericValue(flow, info) + if val == nil { + return nil, 0 + } + floatVal, err := utils.ConvertToFloat64(val) + if err != nil { + m.errorsCounter.WithLabelValues("ValueConversionError", info.Name, info.ValueKey).Inc() + return nil, 0 + } + if info.ValueScale != 0 { + floatVal /= info.ValueScale + } + + labelSets := extractLabels(flow, flatParts, info) + var lkms []labelsKeyAndMap + for _, ls := range labelSets { + // Update entry for expiry mechanism (the entry itself is its own cleanup function) + lkm := ls.toKeyAndMap(info) + lkms = append(lkms, lkm) + cacheEntry := mci.GetChacheEntry(lkm.lMap, mv) + ok := m.mCache.UpdateCacheEntry(lkm.key, cacheEntry) + if !ok { + m.metricsDropped.Inc() + return nil, 0 + } + } + return lkms, floatVal +} + +func (m *MetricsCommonStruct) prepareAggHisto(mci MetricsCommonInterface, flow config.GenericMap, info *metrics.Preprocessed, mc interface{}) ([]labelsKeyAndMap, []float64) { + flatParts := info.GenerateFlatParts(flow) + ok, flatParts := info.ApplyFilters(flow, flatParts) + if !ok { + return nil, nil + } + + val := m.extractGenericValue(flow, info) + if val == nil { + return nil, nil + } + values, ok := val.([]float64) + if !ok { + m.errorsCounter.WithLabelValues("HistoValueConversionError", info.Name, info.ValueKey).Inc() + return nil, nil + } + + labelSets := extractLabels(flow, flatParts, info) + var lkms []labelsKeyAndMap + for _, ls := range labelSets { + // Update entry for expiry mechanism (the entry itself is its own cleanup function) + lkm := ls.toKeyAndMap(info) + lkms = append(lkms, lkm) + cacheEntry := mci.GetChacheEntry(lkm.lMap, mc) + ok := m.mCache.UpdateCacheEntry(lkm.key, cacheEntry) + if !ok { + m.metricsDropped.Inc() + return nil, nil + } + } + return lkms, values +} + +func (m *MetricsCommonStruct) extractGenericValue(flow config.GenericMap, info *metrics.Preprocessed) interface{} { + if info.ValueKey == "" { + // No value key means it's a records / flows counter (1 flow = 1 increment), so just return 1 + return 1 + } + val, found := flow[info.ValueKey] + if !found { + // No value might mean 0 for counters, to keep storage lightweight - it can safely be ignored + return nil + } + return val +} + +type label struct { + key string + value string +} + +type labelSet []label + +type labelsKeyAndMap struct { + key string + lMap map[string]string +} + +func (l labelSet) toKeyAndMap(info *metrics.Preprocessed) labelsKeyAndMap { + key := strings.Builder{} + key.WriteString(info.Name) + key.WriteRune('|') + m := map[string]string{} + for _, kv := range l { + key.WriteString(kv.value) + key.WriteRune('|') + m[kv.key] = kv.value + } + return labelsKeyAndMap{key: key.String(), lMap: m} +} + +// extractLabels takes the flow and a single metric definition as input. +// It returns the flat labels maps (label names and values). +// Most of the time it will return a single map; it may return several of them when the parsed flow fields are lists (e.g. "interfaces"). +func extractLabels(flow config.GenericMap, flatParts []config.GenericMap, info *metrics.Preprocessed) []labelSet { + common := newLabelSet(flow, info.MappedLabels) + if len(flatParts) == 0 { + return []labelSet{common} + } + var all []labelSet + for _, fp := range flatParts { + ls := newLabelSet(fp, info.FlattenedLabels) + ls = append(ls, common...) + all = append(all, ls) + } + return all +} + +func newLabelSet(part config.GenericMap, labels []metrics.MappedLabel) labelSet { + var ls labelSet + for _, t := range labels { + label := label{key: t.Target, value: ""} + if v, ok := part[t.Source]; ok { + label.value = utils.ConvertToString(v) + } + ls = append(ls, label) + } + return ls +} + +func (m *MetricsCommonStruct) cleanupExpiredEntriesLoop(callback putils.CacheCallback) { + ticker := time.NewTicker(m.expiryTime) + for { + select { + case <-m.exitChan: + log.Debugf("exiting cleanupExpiredEntriesLoop because of signal") + return + case <-ticker.C: + m.mCache.CleanupExpiredEntries(m.expiryTime, callback) + } + } +} + +func (m *MetricsCommonStruct) cleanupInfoStructs() { + m.gauges = map[string]mInfoStruct{} + m.counters = map[string]mInfoStruct{} + m.histos = map[string]mInfoStruct{} + m.aggHistos = map[string]mInfoStruct{} +} + +func NewMetricsCommonStruct(opMetrics *operational.Metrics, maxCacheEntries int, name string, expiryTime api.Duration, callback putils.CacheCallback) *MetricsCommonStruct { + mChacheLenMetric := opMetrics.NewGauge(&mChacheLen, name) + m := &MetricsCommonStruct{ + mCache: putils.NewTimedCache(maxCacheEntries, mChacheLenMetric), + mChacheLenMetric: mChacheLenMetric, + metricsProcessed: opMetrics.NewCounter(&metricsProcessed, name), + metricsDropped: opMetrics.NewCounter(&metricsDropped, name), + errorsCounter: opMetrics.NewCounterVec(&encodePromErrors), + expiryTime: expiryTime.Duration, + exitChan: putils.ExitChannel(), + gauges: map[string]mInfoStruct{}, + counters: map[string]mInfoStruct{}, + histos: map[string]mInfoStruct{}, + aggHistos: map[string]mInfoStruct{}, + } + go m.cleanupExpiredEntriesLoop(callback) + return m +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go new file mode 100644 index 000000000..1eee38912 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlplogs.go @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package opentelemetry + +import ( + "context" + + sdklog "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode" + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/sdk/resource" +) + +type EncodeOtlpLogs struct { + cfg api.EncodeOtlpLogs + ctx context.Context + res *resource.Resource + lp *sdklog.LoggerProvider +} + +// Encode encodes a log entry to be exported +func (e *EncodeOtlpLogs) Encode(entry config.GenericMap) { + log.Tracef("entering EncodeOtlpLogs. entry = %v", entry) + e.LogWrite(entry) +} + +func (e *EncodeOtlpLogs) Update(_ config.StageParam) { + log.Warn("EncodeOtlpLogs, update not supported") +} + +func NewEncodeOtlpLogs(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) { + log.Tracef("entering NewEncodeOtlpLogs \n") + cfg := api.EncodeOtlpLogs{} + if params.Encode != nil && params.Encode.OtlpLogs != nil { + cfg = *params.Encode.OtlpLogs + } + log.Debugf("NewEncodeOtlpLogs cfg = %v \n", cfg) + + ctx := context.Background() + res := newResource() + + lp, err := NewOtlpLoggerProvider(ctx, params, res) + if err != nil { + return nil, err + } + + w := &EncodeOtlpLogs{ + cfg: cfg, + ctx: ctx, + res: res, + lp: lp, + } + return w, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go new file mode 100644 index 000000000..92200cece --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlpmetrics.go @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package opentelemetry + +import ( + "context" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics" + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" +) + +const defaultExpiryTime = time.Duration(2 * time.Minute) +const flpMeterName = "flp_meter" + +type EncodeOtlpMetrics struct { + cfg api.EncodeOtlpMetrics + ctx context.Context + res *resource.Resource + mp *sdkmetric.MeterProvider + meter metric.Meter + metricCommon *encode.MetricsCommonStruct +} + +func (e *EncodeOtlpMetrics) Update(_ config.StageParam) { + log.Warn("EncodeOtlpMetrics, update not supported") +} + +// Encode encodes a metric to be exported +func (e *EncodeOtlpMetrics) Encode(metricRecord config.GenericMap) { + log.Tracef("entering EncodeOtlpMetrics. entry = %v", metricRecord) + e.metricCommon.MetricCommonEncode(e, metricRecord) +} + +func (e *EncodeOtlpMetrics) ProcessCounter(m interface{}, labels map[string]string, value float64) error { + counter := m.(metric.Float64Counter) + // set attributes using the labels + attributes := obtainAttributesFromLabels(labels) + counter.Add(e.ctx, value, metric.WithAttributes(attributes...)) + return nil +} + +func (e *EncodeOtlpMetrics) ProcessGauge(m interface{}, labels map[string]string, value float64, key string) error { + obs := m.(Float64Gauge) + // set attributes using the labels + attributes := obtainAttributesFromLabels(labels) + obs.Set(key, value, attributes) + return nil +} + +func (e *EncodeOtlpMetrics) ProcessHist(m interface{}, labels map[string]string, value float64) error { + histo := m.(metric.Float64Histogram) + // set attributes using the labels + attributes := obtainAttributesFromLabels(labels) + histo.Record(e.ctx, value, metric.WithAttributes(attributes...)) + return nil +} + +func (e *EncodeOtlpMetrics) ProcessAggHist(m interface{}, labels map[string]string, values []float64) error { + histo := m.(metric.Float64Histogram) + // set attributes using the labels + attributes := obtainAttributesFromLabels(labels) + for _, v := range values { + histo.Record(e.ctx, v, metric.WithAttributes(attributes...)) + } + return nil +} + +func (e *EncodeOtlpMetrics) GetChacheEntry(entryLabels map[string]string, _ interface{}) interface{} { + return entryLabels +} + +func NewEncodeOtlpMetrics(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) { + log.Tracef("entering NewEncodeOtlpMetrics \n") + cfg := api.EncodeOtlpMetrics{} + if params.Encode != nil && params.Encode.OtlpMetrics != nil { + cfg = *params.Encode.OtlpMetrics + } + log.Debugf("NewEncodeOtlpMetrics cfg = %v \n", cfg) + + ctx := context.Background() + res := newResource() + + mp, err := NewOtlpMetricsProvider(ctx, params, res) + if err != nil { + return nil, err + } + meter := mp.Meter( + flpMeterName, + ) + + expiryTime := cfg.ExpiryTime + if expiryTime.Duration == 0 { + expiryTime.Duration = defaultExpiryTime + } + + meterFactory := otel.Meter(flpMeterName) + + w := &EncodeOtlpMetrics{ + cfg: cfg, + ctx: ctx, + res: res, + mp: mp, + meter: meterFactory, + } + + metricCommon := encode.NewMetricsCommonStruct(opMetrics, 0, params.Name, expiryTime, nil) + w.metricCommon = metricCommon + + for i := range cfg.Metrics { + mCfg := &cfg.Metrics[i] + fullMetricName := cfg.Prefix + mCfg.Name + log.Debugf("fullMetricName = %v", fullMetricName) + log.Debugf("Labels = %v", mCfg.Labels) + mInfo := metrics.Preprocess(mCfg) + switch mCfg.Type { + case api.MetricCounter: + counter, err := meter.Float64Counter(fullMetricName) + if err != nil { + log.Errorf("error during counter creation: %v", err) + return nil, err + } + metricCommon.AddCounter(fullMetricName, counter, mInfo) + case api.MetricGauge: + // at implementation time, only asynchronous gauges are supported by otel in golang + obs := Float64Gauge{observations: make(map[string]Float64GaugeEntry)} + gauge, err := meterFactory.Float64ObservableGauge( + fullMetricName, + metric.WithFloat64Callback(obs.Callback), + ) + if err != nil { + log.Errorf("error during gauge creation: %v", err) + return nil, err + } + metricCommon.AddGauge(fullMetricName, gauge, mInfo) + case api.MetricHistogram: + var histo metric.Float64Histogram + if len(mCfg.Buckets) == 0 { + histo, err = meter.Float64Histogram(fullMetricName) + } else { + histo, err = meter.Float64Histogram(fullMetricName, + metric.WithExplicitBucketBoundaries(mCfg.Buckets...), + ) + + } + if err != nil { + log.Errorf("error during histogram creation: %v", err) + return nil, err + } + metricCommon.AddHist(fullMetricName, histo, mInfo) + case api.MetricAggHistogram: + fallthrough + default: + log.Errorf("invalid metric type = %v, skipping", mCfg.Type) + continue + } + } + + return w, nil +} + +// At present, golang only supports asynchronous gauge, so we have some function here to support this + +type Float64GaugeEntry struct { + attributes []attribute.KeyValue + value float64 +} + +type Float64Gauge struct { + observations map[string]Float64GaugeEntry +} + +// Callback implements the callback function for the underlying asynchronous gauge +// it observes the current state of all previous Set() calls. +func (f *Float64Gauge) Callback(_ context.Context, o metric.Float64Observer) error { + for _, fEntry := range f.observations { + o.Observe(fEntry.value, metric.WithAttributes(fEntry.attributes...)) + } + // re-initialize the observed items + f.observations = make(map[string]Float64GaugeEntry) + return nil +} + +func (f *Float64Gauge) Set(key string, val float64, attrs []attribute.KeyValue) { + f.observations[key] = Float64GaugeEntry{ + value: val, + attributes: attrs, + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go new file mode 100644 index 000000000..cd5e62c19 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/encode_otlptrace.go @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package opentelemetry + +import ( + "context" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode" + log "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +const ( + flpTracerName = "flp_tracer" + flpEncodeSpanName = "flp_encode" +) + +type EncodeOtlpTrace struct { + cfg api.EncodeOtlpTraces + ctx context.Context + res *resource.Resource + tp *sdktrace.TracerProvider +} + +// Encode encodes a metric to be exported +func (e *EncodeOtlpTrace) Encode(entry config.GenericMap) { + log.Tracef("entering EncodeOtlpTrace. entry = %v", entry) + tr := e.tp.Tracer(flpTracerName) + ll := len(e.cfg.SpanSplitter) + + // create parent span + newCtx, span0 := tr.Start(e.ctx, flpEncodeSpanName) + attributes := obtainAttributesFromEntry(entry) + span0.SetAttributes(*attributes...) + defer span0.End() + if ll == 0 { + return + } + // for each item in SpanSplitter, make a separate entry for each listed item + // do not include fields that belong exclusively to other items + ss := e.cfg.SpanSplitter + records := make([]config.GenericMap, ll) + keepItem := make([]bool, ll) + for i := 0; i < ll; i++ { + records[i] = make(config.GenericMap) + } +OUTER: + for key, value := range entry { + for i := 0; i < ll; i++ { + if strings.HasPrefix(key, ss[i]) { + trimmed := strings.TrimPrefix(key, ss[i]) + records[i][trimmed] = value + keepItem[i] = true + continue OUTER + } + } + // if we reach here, the field did not have any of the prefixes. + // copy it into each of the records + for i := 0; i < ll; i++ { + records[i][key] = value + } + } + // only create child spans for records that have a field directly related to their item + for i := 0; i < ll; i++ { + if keepItem[i] { + _, span := tr.Start(newCtx, ss[i]) + attributes := obtainAttributesFromEntry(records[i]) + span.SetAttributes(*attributes...) + span.End() + } + } +} + +func (e *EncodeOtlpTrace) Update(_ config.StageParam) { + log.Warn("EncodeOtlpTrace, update not supported") +} + +func NewEncodeOtlpTraces(_ *operational.Metrics, params config.StageParam) (encode.Encoder, error) { + log.Tracef("entering NewEncodeOtlpTraces \n") + cfg := api.EncodeOtlpTraces{} + if params.Encode != nil && params.Encode.OtlpTraces != nil { + cfg = *params.Encode.OtlpTraces + } + log.Debugf("NewEncodeOtlpTraces cfg = %v \n", cfg) + + ctx := context.Background() + res := newResource() + + tp, err := NewOtlpTracerProvider(ctx, params, res) + if err != nil { + return nil, err + } + + w := &EncodeOtlpTrace{ + cfg: cfg, + ctx: ctx, + res: res, + tp: tp, + } + return w, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go new file mode 100644 index 000000000..2aebf1463 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry/opentelemetry.go @@ -0,0 +1,353 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package opentelemetry + +import ( + "context" + "encoding/json" + "fmt" + "time" + + otel2 "github.com/agoda-com/opentelemetry-logs-go" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc" + "github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp" + "github.com/agoda-com/opentelemetry-logs-go/logs" + sdklog2 "github.com/agoda-com/opentelemetry-logs-go/sdk/logs" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + "google.golang.org/grpc/credentials" +) + +// Note: +// As of the writing of this module, go.opentelemetry.io does not provide interfaces for logs. +// We therefore temporarily use agoda-com/opentelemetry-logs-go for logs. +// When go.opentelemetry.io provides interfaces for logs, the code here should be updated to use those interfaces. + +const ( + flpOtlpLoggerName = "flp-otlp-logger" + defaultTimeInterval = time.Duration(20 * time.Second) + flpOtlpResourceVersion = "v0.1.0" + flpOtlpResourceName = "netobserv-otlp" + grpcType = "grpc" + httpType = "http" +) + +func NewOtlpTracerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdktrace.TracerProvider, error) { + cfg := api.EncodeOtlpTraces{} + if params.Encode != nil && params.Encode.OtlpTraces != nil { + cfg = *params.Encode.OtlpTraces + } + if cfg.OtlpConnectionInfo == nil { + return nil, fmt.Errorf("otlptraces missing connection info") + } + addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port) + var err error + var traceProvider *sdktrace.TracerProvider + var traceExporter *otlptrace.Exporter + if cfg.ConnectionType == grpcType { + var expOption otlptracegrpc.Option + var tlsOption otlptracegrpc.Option + tlsOption = otlptracegrpc.WithInsecure() + if cfg.TLS != nil { + tlsConfig, err := cfg.OtlpConnectionInfo.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig)) + } + expOption = otlptracegrpc.WithEndpoint(addr) + traceExporter, err = otlptracegrpc.New(ctx, + expOption, + tlsOption, + otlptracegrpc.WithHeaders(cfg.Headers)) + if err != nil { + return nil, err + } + } else if cfg.ConnectionType == httpType { + var expOption otlptracehttp.Option + var tlsOption otlptracehttp.Option + tlsOption = otlptracehttp.WithInsecure() + if cfg.TLS != nil { + tlsConfig, err := cfg.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlptracehttp.WithTLSClientConfig(tlsConfig) + } + expOption = otlptracehttp.WithEndpoint(addr) + traceExporter, err = otlptracehttp.New(ctx, + expOption, + tlsOption, + otlptracehttp.WithHeaders(cfg.Headers)) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("must specify grpcaddress or httpaddress") + } + traceProvider = sdktrace.NewTracerProvider( + sdktrace.WithResource(res), + sdktrace.WithSpanProcessor(sdktrace.NewBatchSpanProcessor(traceExporter)), + ) + + otel.SetTracerProvider(traceProvider) + return traceProvider, nil +} + +func NewOtlpMetricsProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdkmetric.MeterProvider, error) { + cfg := api.EncodeOtlpMetrics{} + if params.Encode != nil && params.Encode.OtlpMetrics != nil { + cfg = *params.Encode.OtlpMetrics + } + timeInterval := cfg.PushTimeInterval + if timeInterval.Duration == 0 { + timeInterval.Duration = defaultTimeInterval + } + if cfg.OtlpConnectionInfo == nil { + return nil, fmt.Errorf("otlpmetrics missing connection info") + } + addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port) + var err error + var meterProvider *sdkmetric.MeterProvider + if cfg.ConnectionType == grpcType { + var metricExporter *otlpmetricgrpc.Exporter + var expOption otlpmetricgrpc.Option + var tlsOption otlpmetricgrpc.Option + tlsOption = otlpmetricgrpc.WithInsecure() + if cfg.TLS != nil { + tlsConfig, err := cfg.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig)) + } + expOption = otlpmetricgrpc.WithEndpoint(addr) + metricExporter, err = otlpmetricgrpc.New(ctx, expOption, tlsOption) + if err != nil { + return nil, err + } + meterProvider = sdkmetric.NewMeterProvider( + sdkmetric.WithResource(res), + sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter, + sdkmetric.WithInterval(timeInterval.Duration))), + ) + } else if cfg.ConnectionType == httpType { + var metricExporter *otlpmetrichttp.Exporter + var expOption otlpmetrichttp.Option + var tlsOption otlpmetrichttp.Option + tlsOption = otlpmetrichttp.WithInsecure() + if cfg.TLS != nil { + tlsConfig, err := cfg.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlpmetrichttp.WithTLSClientConfig(tlsConfig) + } + expOption = otlpmetrichttp.WithEndpoint(addr) + metricExporter, err = otlpmetrichttp.New(ctx, expOption, tlsOption) + if err != nil { + return nil, err + } + meterProvider = sdkmetric.NewMeterProvider( + sdkmetric.WithResource(res), + sdkmetric.WithReader(sdkmetric.NewPeriodicReader(metricExporter, + sdkmetric.WithInterval(timeInterval.Duration))), + ) + } else { + return nil, fmt.Errorf("must specify grpcaddress or httpaddress") + } + + otel.SetMeterProvider(meterProvider) + return meterProvider, nil +} + +func NewOtlpLoggerProvider(ctx context.Context, params config.StageParam, res *resource.Resource) (*sdklog2.LoggerProvider, error) { + cfg := api.EncodeOtlpLogs{} + if params.Encode != nil && params.Encode.OtlpLogs != nil { + cfg = *params.Encode.OtlpLogs + } + if cfg.OtlpConnectionInfo == nil { + return nil, fmt.Errorf("otlplogs missing connection info") + } + addr := fmt.Sprintf("%s:%v", cfg.OtlpConnectionInfo.Address, cfg.OtlpConnectionInfo.Port) + var expOption otlplogs.ExporterOption + if cfg.ConnectionType == grpcType { + var tlsOption otlplogsgrpc.Option + tlsOption = otlplogsgrpc.WithInsecure() + if params.Encode.OtlpLogs.TLS != nil { + tlsConfig, err := cfg.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlplogsgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig)) + } + expOption = otlplogs.WithClient(otlplogsgrpc.NewClient( + otlplogsgrpc.WithEndpoint(addr), + tlsOption, + otlplogsgrpc.WithHeaders(cfg.Headers), + )) + } else if cfg.ConnectionType == httpType { + var tlsOption otlplogshttp.Option + tlsOption = otlplogshttp.WithInsecure() + if params.Encode.OtlpLogs.TLS != nil { + tlsConfig, err := cfg.TLS.Build() + if err != nil { + return nil, err + } + tlsOption = otlplogshttp.WithTLSClientConfig(tlsConfig) + } + expOption = otlplogs.WithClient(otlplogshttp.NewClient( + otlplogshttp.WithEndpoint(addr), + tlsOption, + otlplogshttp.WithHeaders(cfg.Headers), + )) + } else { + return nil, fmt.Errorf("must specify grpcaddress or httpaddress") + } + logExporter, err := otlplogs.NewExporter(ctx, expOption) + if err != nil { + return nil, err + } + + loggerProvider := sdklog2.NewLoggerProvider( + sdklog2.WithBatcher(logExporter), + sdklog2.WithResource(res), + ) + otel2.SetLoggerProvider(loggerProvider) + return loggerProvider, nil +} + +func (e *EncodeOtlpLogs) LogWrite(entry config.GenericMap) { + now := time.Now() + sn := logs.INFO + st := "INFO" + msgByteArray, _ := json.Marshal(entry) + msg := string(msgByteArray) + // TODO: Decide whether the content should be delivered as Body or as Attributes + lrc := logs.LogRecordConfig{ + // Timestamp: &now, // take timestamp from entry, if present? + ObservedTimestamp: now, + SeverityNumber: &sn, + SeverityText: &st, + Resource: e.res, + Body: &msg, + Attributes: obtainAttributesFromEntry(entry), + } + logRecord := logs.NewLogRecord(lrc) + + logger := otel2.GetLoggerProvider().Logger( + flpOtlpLoggerName, + logs.WithSchemaURL(semconv.SchemaURL), + ) + logger.Emit(logRecord) +} + +func obtainAttributesFromEntry(entry config.GenericMap) *[]attribute.KeyValue { + // convert the entry fields to Attributes of the message + var att = make([]attribute.KeyValue, len(entry)) + index := 0 + for k, v := range entry { + switch v := v.(type) { + case []string: + att[index] = attribute.StringSlice(k, v) + case string: + att[index] = attribute.String(k, v) + case []int: + att[index] = attribute.IntSlice(k, v) + case []int32: + valInt64Slice := []int64{} + for _, valInt32 := range v { + valInt64, _ := utils.ConvertToInt64(valInt32) + valInt64Slice = append(valInt64Slice, valInt64) + } + att[index] = attribute.Int64Slice(k, valInt64Slice) + case []int64: + att[index] = attribute.Int64Slice(k, v) + case int: + att[index] = attribute.Int(k, v) + case int32, int64, int16, uint, uint8, uint16, uint32, uint64: + valInt, _ := utils.ConvertToInt64(v) + att[index] = attribute.Int64(k, valInt) + case []float32: + valFloat64Slice := []float64{} + for _, valFloat32 := range v { + valFloat64, _ := utils.ConvertToFloat64(valFloat32) + valFloat64Slice = append(valFloat64Slice, valFloat64) + } + att[index] = attribute.Float64Slice(k, valFloat64Slice) + case []float64: + att[index] = attribute.Float64Slice(k, v) + case float32: + valFloat, _ := utils.ConvertToFloat64(v) + att[index] = attribute.Float64(k, valFloat) + case float64: + att[index] = attribute.Float64(k, v) + case []bool: + att[index] = attribute.BoolSlice(k, v) + case bool: + att[index] = attribute.Bool(k, v) + case nil: + // skip this field + continue + } + index++ + } + addjustedAtt := att[0:index] + return &addjustedAtt +} + +func obtainAttributesFromLabels(labels map[string]string) []attribute.KeyValue { + // convert the entry fields to Attributes of the message + var att = make([]attribute.KeyValue, len(labels)) + index := 0 + for k, v := range labels { + att[index] = attribute.String(k, v) + index++ + } + return att +} + +func (e *EncodeOtlpMetrics) MetricWrite(_ config.GenericMap) { + // nothing more to do at present +} + +// newResource returns a resource describing this application. +func newResource() *resource.Resource { + r, _ := resource.Merge( + resource.Default(), + resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceName(flpOtlpResourceName), + semconv.ServiceVersion(flpOtlpResourceVersion), + ), + ) + return r +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go new file mode 100644 index 000000000..22aa4292e --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregate.go @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aggregate + +import ( + "fmt" + "math" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + util "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" +) + +const ( + OperationSum = "sum" + OperationAvg = "avg" + OperationMax = "max" + OperationMin = "min" + OperationCount = "count" + OperationRawValues = "raw_values" +) + +type Labels map[string]string +type NormalizedValues string + +type Aggregate struct { + definition *api.AggregateDefinition + cache *utils.TimedCache + mutex *sync.Mutex + expiryTime time.Duration +} + +type GroupState struct { + normalizedValues NormalizedValues + labels Labels + recentRawValues []float64 + recentOpValue float64 + recentCount int + totalValue float64 + totalCount int +} + +func (aggregate *Aggregate) LabelsFromEntry(entry config.GenericMap) (Labels, bool) { + allLabelsFound := true + labels := Labels{} + + for _, key := range aggregate.definition.GroupByKeys { + value, ok := entry[key] + if !ok { + allLabelsFound = false + } + labels[key] = util.ConvertToString(value) + } + + return labels, allLabelsFound +} + +func (labels Labels) getNormalizedValues() NormalizedValues { + var normalizedAsString string + + keys := make([]string, 0, len(labels)) + for k := range labels { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + normalizedAsString += labels[k] + "," + } + + if len(normalizedAsString) > 0 { + normalizedAsString = normalizedAsString[:len(normalizedAsString)-1] + } + + return NormalizedValues(normalizedAsString) +} + +func (aggregate *Aggregate) filterEntry(entry config.GenericMap) (NormalizedValues, Labels, error) { + labels, allLabelsFound := aggregate.LabelsFromEntry(entry) + if !allLabelsFound { + return "", nil, fmt.Errorf("missing keys in entry") + } + + normalizedValues := labels.getNormalizedValues() + return normalizedValues, labels, nil +} + +func getInitValue(operation string) float64 { + switch operation { + case OperationSum, OperationAvg, OperationMax, OperationCount: + return 0 + case OperationMin: + return math.MaxFloat64 + case OperationRawValues: + // Actually, in OperationRawValues the value is ignored. + return 0 + default: + log.Panicf("unknown operation %v", operation) + return 0 + } +} + +func (aggregate *Aggregate) UpdateByEntry(entry config.GenericMap, normalizedValues NormalizedValues, labels Labels) error { + + aggregate.mutex.Lock() + defer aggregate.mutex.Unlock() + + var groupState *GroupState + oldEntry, ok := aggregate.cache.GetCacheEntry(string(normalizedValues)) + if !ok { + groupState = &GroupState{normalizedValues: normalizedValues, labels: labels} + initVal := getInitValue(string(aggregate.definition.OperationType)) + groupState.totalValue = initVal + groupState.recentOpValue = initVal + if aggregate.definition.OperationType == OperationRawValues { + groupState.recentRawValues = make([]float64, 0) + } + } else { + groupState = oldEntry.(*GroupState) + } + aggregate.cache.UpdateCacheEntry(string(normalizedValues), groupState) + + // update value + operationKey := aggregate.definition.OperationKey + operation := aggregate.definition.OperationType + + if operation == OperationCount { + groupState.totalValue = float64(groupState.totalCount + 1) + groupState.recentOpValue = float64(groupState.recentCount + 1) + } else if operationKey != "" { + value, ok := entry[operationKey] + if ok { + valueString := util.ConvertToString(value) + if valueFloat64, err := strconv.ParseFloat(valueString, 64); err != nil { + // Log as debug to avoid performance impact + log.Debugf("UpdateByEntry error when parsing float '%s': %v", valueString, err) + } else { + switch operation { + case OperationSum: + groupState.totalValue += valueFloat64 + groupState.recentOpValue += valueFloat64 + case OperationMax: + groupState.totalValue = math.Max(groupState.totalValue, valueFloat64) + groupState.recentOpValue = math.Max(groupState.recentOpValue, valueFloat64) + case OperationMin: + groupState.totalValue = math.Min(groupState.totalValue, valueFloat64) + groupState.recentOpValue = math.Min(groupState.recentOpValue, valueFloat64) + case OperationAvg: + groupState.totalValue = (groupState.totalValue*float64(groupState.totalCount) + valueFloat64) / float64(groupState.totalCount+1) + groupState.recentOpValue = (groupState.recentOpValue*float64(groupState.recentCount) + valueFloat64) / float64(groupState.recentCount+1) + case OperationRawValues: + groupState.recentRawValues = append(groupState.recentRawValues, valueFloat64) + } + } + } + } + + // update count + groupState.totalCount++ + groupState.recentCount++ + + return nil +} + +func (aggregate *Aggregate) Evaluate(entries []config.GenericMap) error { + for _, entry := range entries { + // filter entries matching labels with aggregates + normalizedValues, labels, err := aggregate.filterEntry(entry) + if err != nil { + continue + } + + // update aggregate group by entry + err = aggregate.UpdateByEntry(entry, normalizedValues, labels) + if err != nil { + log.Debugf("UpdateByEntry error %v", err) + continue + } + } + + return nil +} + +func (aggregate *Aggregate) GetMetrics() []config.GenericMap { + aggregate.mutex.Lock() + defer aggregate.mutex.Unlock() + + var metrics []config.GenericMap + + // iterate over the items in the cache + aggregate.cache.Iterate(func(_ string, value interface{}) { + group := value.(*GroupState) + newEntry := config.GenericMap{ + "name": aggregate.definition.Name, + "operation_type": aggregate.definition.OperationType, + "operation_key": aggregate.definition.OperationKey, + "by": strings.Join(aggregate.definition.GroupByKeys, ","), + "aggregate": string(group.normalizedValues), + "total_value": group.totalValue, + "total_count": group.totalCount, + "recent_raw_values": group.recentRawValues, + "recent_op_value": group.recentOpValue, + "recent_count": group.recentCount, + strings.Join(aggregate.definition.GroupByKeys, "_"): string(group.normalizedValues), + } + // add the items in aggregate.definition.GroupByKeys individually to the entry + for _, key := range aggregate.definition.GroupByKeys { + newEntry[key] = group.labels[key] + } + metrics = append(metrics, newEntry) + // Once reported, we reset the recentXXX fields + if aggregate.definition.OperationType == OperationRawValues { + group.recentRawValues = make([]float64, 0) + } + group.recentCount = 0 + group.recentOpValue = getInitValue(string(aggregate.definition.OperationType)) + }) + + return metrics +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go new file mode 100644 index 000000000..848e9bf26 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate/aggregates.go @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aggregate + +import ( + "sync" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + log "github.com/sirupsen/logrus" +) + +var defaultExpiryTime = 2 * time.Minute +var cleanupLoopTime = 2 * time.Minute + +type Aggregates struct { + Aggregates []Aggregate + cleanupLoopTime time.Duration + defaultExpiryTime time.Duration +} + +func (aggregates *Aggregates) Evaluate(entries []config.GenericMap) error { + for _, aggregate := range aggregates.Aggregates { + err := aggregate.Evaluate(entries) + if err != nil { + log.Debugf("Evaluate error %v", err) + continue + } + } + + return nil +} + +func (aggregates *Aggregates) GetMetrics() []config.GenericMap { + var metrics []config.GenericMap + for _, aggregate := range aggregates.Aggregates { + aggregateMetrics := aggregate.GetMetrics() + metrics = append(metrics, aggregateMetrics...) + } + + return metrics +} + +func (aggregates *Aggregates) addAggregate(aggregateDefinition *api.AggregateDefinition) []Aggregate { + expiryTime := aggregateDefinition.ExpiryTime + if expiryTime.Duration == 0 { + expiryTime.Duration = defaultExpiryTime + } + aggregate := Aggregate{ + definition: aggregateDefinition, + cache: utils.NewTimedCache(0, nil), + mutex: &sync.Mutex{}, + expiryTime: expiryTime.Duration, + } + + return append(aggregates.Aggregates, aggregate) +} + +func (aggregates *Aggregates) cleanupExpiredEntriesLoop() { + + ticker := time.NewTicker(aggregates.cleanupLoopTime) + go func() { + for { + select { + case <-utils.ExitChannel(): + return + case <-ticker.C: + aggregates.cleanupExpiredEntries() + } + } + }() +} + +func (aggregates *Aggregates) cleanupExpiredEntries() { + for _, aggregate := range aggregates.Aggregates { + aggregate.mutex.Lock() + aggregate.cache.CleanupExpiredEntries(aggregate.expiryTime, func(_ interface{}) {}) + aggregate.mutex.Unlock() + } +} + +func NewAggregatesFromConfig(aggConfig *api.Aggregates) (Aggregates, error) { + aggregates := Aggregates{ + cleanupLoopTime: cleanupLoopTime, + defaultExpiryTime: aggConfig.DefaultExpiryTime.Duration, + } + if aggregates.defaultExpiryTime == 0 { + aggregates.defaultExpiryTime = defaultExpiryTime + } + + for i := range aggConfig.Rules { + aggregates.Aggregates = aggregates.addAggregate(&aggConfig.Rules[i]) + } + + aggregates.cleanupExpiredEntriesLoop() + + return aggregates, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go new file mode 100644 index 000000000..cf5b1de01 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/aggregator.go @@ -0,0 +1,199 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "fmt" + "math" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" +) + +// aggregator represents a single aggregate field in a connection. The aggregated values are stored in the connection +// but managed by the aggregator. +type aggregator interface { + // addField adds an aggregate field to the connection + addField(conn connection) + // update updates the aggregate field in the connection based on the flow log. + update(conn connection, flowLog config.GenericMap, d direction, isFirst bool) +} + +type aggregateBase struct { + inputField string + outputField string + splitAB bool + initVal interface{} + metrics *metricsType + reportMissing bool +} + +type aSum struct{ aggregateBase } +type aCount struct{ aggregateBase } +type aMin struct{ aggregateBase } +type aMax struct{ aggregateBase } +type aFirst struct{ aggregateBase } +type aLast struct{ aggregateBase } + +// TODO: think of adding a more complex operation such as Average Packet Size which involves 2 input fields: Bytes/Packets + +// newAggregator returns a new aggregator depending on the output field operation +func newAggregator(of api.OutputField, metrics *metricsType) (aggregator, error) { + if of.Name == "" { + return nil, fmt.Errorf("empty name %v", of) + } + var inputField string + if of.Input != "" { + inputField = of.Input + } else { + inputField = of.Name + } + aggBase := aggregateBase{inputField: inputField, outputField: of.Name, splitAB: of.SplitAB, metrics: metrics, reportMissing: of.ReportMissing} + var agg aggregator + switch of.Operation { + case api.ConnTrackSum: + aggBase.initVal = float64(0) + agg = &aSum{aggBase} + case api.ConnTrackCount: + aggBase.initVal = float64(0) + agg = &aCount{aggBase} + case api.ConnTrackMin: + aggBase.initVal = math.MaxFloat64 + agg = &aMin{aggBase} + case api.ConnTrackMax: + aggBase.initVal = -math.MaxFloat64 + agg = &aMax{aggBase} + case api.ConnTrackFirst: + aggBase.initVal = nil + agg = &aFirst{aggBase} + case api.ConnTrackLast: + aggBase.initVal = nil + agg = &aLast{aggBase} + default: + return nil, fmt.Errorf("unknown operation: %q", of.Operation) + } + return agg, nil +} + +func (agg *aggregateBase) getOutputField(d direction) string { + outputField := agg.outputField + if agg.splitAB { + switch d { + case dirAB: + outputField += "_AB" + case dirBA: + outputField += "_BA" + case dirNA: + fallthrough + default: + log.Panicf("splitAB aggregator %v cannot determine outputField because direction is missing. Check configuration.", outputField) + } + } + return outputField +} + +func (agg *aggregateBase) getInputFieldValue(flowLog config.GenericMap) (float64, error) { + rawValue, ok := flowLog[agg.inputField] + if !ok { + // error only if explicitly specified as FLP skip empty fields by default to reduce storage size + if agg.reportMissing { + if agg.metrics != nil { + agg.metrics.aggregatorErrors.WithLabelValues("MissingFieldError", agg.inputField).Inc() + } + return 0, fmt.Errorf("missing field %v", agg.inputField) + } + // fallback on 0 without error + return 0, nil + } + floatValue, err := utils.ConvertToFloat64(rawValue) + if err != nil { + if agg.metrics != nil { + agg.metrics.aggregatorErrors.WithLabelValues("Float64ConversionError", agg.inputField).Inc() + } + return 0, fmt.Errorf("cannot convert %q to float64: %w", rawValue, err) + } + return floatValue, nil +} + +func (agg *aggregateBase) addField(conn connection) { + if agg.splitAB { + conn.addAgg(agg.getOutputField(dirAB), agg.initVal) + conn.addAgg(agg.getOutputField(dirBA), agg.initVal) + } else { + conn.addAgg(agg.getOutputField(dirNA), agg.initVal) + } +} + +func (agg *aSum) update(conn connection, flowLog config.GenericMap, d direction, _ bool) { + outputField := agg.getOutputField(d) + v, err := agg.getInputFieldValue(flowLog) + if err != nil { + log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err) + return + } + conn.updateAggFnValue(outputField, func(curr float64) float64 { + return curr + v + }) +} + +func (agg *aCount) update(conn connection, _ config.GenericMap, d direction, _ bool) { + outputField := agg.getOutputField(d) + conn.updateAggFnValue(outputField, func(curr float64) float64 { + return curr + 1 + }) +} + +func (agg *aMin) update(conn connection, flowLog config.GenericMap, d direction, _ bool) { + outputField := agg.getOutputField(d) + v, err := agg.getInputFieldValue(flowLog) + if err != nil { + log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err) + return + } + + conn.updateAggFnValue(outputField, func(curr float64) float64 { + return math.Min(curr, v) + }) +} + +func (agg *aMax) update(conn connection, flowLog config.GenericMap, d direction, _ bool) { + outputField := agg.getOutputField(d) + v, err := agg.getInputFieldValue(flowLog) + if err != nil { + log.Errorf("error updating connection %x: %v", conn.getHash().hashTotal, err) + return + } + + conn.updateAggFnValue(outputField, func(curr float64) float64 { + return math.Max(curr, v) + }) +} + +func (cp *aFirst) update(conn connection, flowLog config.GenericMap, _ direction, isNew bool) { + if isNew { + conn.updateAggValue(cp.outputField, flowLog[cp.inputField]) + } +} + +func (cp *aLast) update(conn connection, flowLog config.GenericMap, _ direction, _ bool) { + if flowLog[cp.inputField] != nil { + conn.updateAggValue(cp.outputField, flowLog[cp.inputField]) + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go new file mode 100644 index 000000000..e67c5125b --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conn.go @@ -0,0 +1,237 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "fmt" + "reflect" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" +) + +type connection interface { + addAgg(fieldName string, initValue interface{}) + updateAggValue(fieldName string, newValue interface{}) + updateAggFnValue(fieldName string, newValueFn func(curr float64) float64) + setExpiryTime(t time.Time) + getExpiryTime() time.Time + setNextHeartbeatTime(t time.Time) + getNextHeartbeatTime() time.Time + toGenericMap() config.GenericMap + getHash() totalHashType + // markReported marks the connection as has been reported. That is, at least one connection record has been emitted + // for this connection (i.e. newConnection, heartbeat, endConnection). + // It returns true on the first invocation to indicate the first report. Otherwise, it returns false. + markReported() bool + isMatchSelector(map[string]interface{}) bool +} + +type connType struct { + hash totalHashType + keys config.GenericMap + aggFields map[string]interface{} + expiryTime time.Time + nextHeartbeatTime time.Time + isReported bool +} + +func (c *connType) addAgg(fieldName string, initValue interface{}) { + c.aggFields[fieldName] = initValue +} + +func (c *connType) updateAggValue(fieldName string, newValue interface{}) { + _, ok := c.aggFields[fieldName] + if !ok { + log.Panicf("tried updating missing field %v", fieldName) + } + c.aggFields[fieldName] = newValue +} + +func (c *connType) updateAggFnValue(fieldName string, newValueFn func(curr float64) float64) { + v, ok := c.aggFields[fieldName] + if !ok { + log.Panicf("tried updating missing field %v", fieldName) + } + + // existing value must be float64 for function aggregation + switch value := v.(type) { + case float64: + c.aggFields[fieldName] = newValueFn(value) + default: + log.Panicf("tried to aggregate non float64 field %v value %v", fieldName, v) + } +} + +func (c *connType) setExpiryTime(t time.Time) { + c.expiryTime = t +} + +func (c *connType) getExpiryTime() time.Time { + return c.expiryTime +} + +func (c *connType) setNextHeartbeatTime(t time.Time) { + c.nextHeartbeatTime = t +} + +func (c *connType) getNextHeartbeatTime() time.Time { + return c.nextHeartbeatTime +} + +func (c *connType) toGenericMap() config.GenericMap { + gm := config.GenericMap{} + for k, v := range c.aggFields { + if v != nil && (reflect.TypeOf(v).Kind() != reflect.Float64 || v.(float64) != 0) { + gm[k] = v + } + } + + // In case of a conflict between the keys and the aggFields / cpFields, the keys should prevail. + for k, v := range c.keys { + gm[k] = v + } + return gm +} + +func (c *connType) getHash() totalHashType { + return c.hash +} + +func (c *connType) markReported() bool { + isFirst := !c.isReported + c.isReported = true + return isFirst +} + +//nolint:cyclop +func (c *connType) isMatchSelector(selector map[string]interface{}) bool { + for k, v := range selector { + connValueRaw, found := c.keys[k] + if !found { + return false + } + switch connValue := connValueRaw.(type) { + case int: + selectorValue, err := utils.ConvertToInt(v) + if err != nil || connValue != selectorValue { + return false + } + case uint32: + selectorValue, err := utils.ConvertToUint32(v) + if err != nil || connValue != selectorValue { + return false + } + case uint64: + selectorValue, err := utils.ConvertToUint64(v) + if err != nil || connValue != selectorValue { + return false + } + case int64: + selectorValue, err := utils.ConvertToInt64(v) + if err != nil || connValue != selectorValue { + return false + } + case float64: + selectorValue, err := utils.ConvertToFloat64(v) + if err != nil || connValue != selectorValue { + return false + } + case bool: + selectorValue, err := utils.ConvertToBool(v) + if err != nil || connValue != selectorValue { + return false + } + case string: + selectorValue := utils.ConvertToString(v) + if connValue != selectorValue { + return false + } + default: + connValue = utils.ConvertToString(connValue) + selectorValue := fmt.Sprintf("%v", v) + if connValue != selectorValue { + return false + } + } + } + return true +} + +type connBuilder struct { + conn *connType + shouldSwapAB bool + metrics *metricsType +} + +func newConnBuilder(metrics *metricsType) *connBuilder { + return &connBuilder{ + conn: &connType{ + aggFields: make(map[string]interface{}), + keys: config.GenericMap{}, + isReported: false, + }, + metrics: metrics, + } +} + +func (cb *connBuilder) Hash(h totalHashType) *connBuilder { + if cb.shouldSwapAB { + h.hashA, h.hashB = h.hashB, h.hashA + } + cb.conn.hash = h + return cb +} + +func (cb *connBuilder) ShouldSwapAB(b bool) *connBuilder { + cb.shouldSwapAB = b + return cb +} + +func (cb *connBuilder) keysFrom(flowLog config.GenericMap, kd *api.KeyDefinition, endpointAFields, endpointBFields []string) *connBuilder { + for _, fg := range kd.FieldGroups { + for _, f := range fg.Fields { + cb.conn.keys[f] = flowLog[f] + } + } + if cb.shouldSwapAB { + for i := range endpointAFields { + fieldA := endpointAFields[i] + fieldB := endpointBFields[i] + cb.conn.keys[fieldA] = flowLog[fieldB] + cb.conn.keys[fieldB] = flowLog[fieldA] + } + cb.metrics.tcpFlags.WithLabelValues("swapAB").Inc() + } + return cb +} + +func (cb *connBuilder) Aggregators(aggs []aggregator) *connBuilder { + for _, agg := range aggs { + agg.addField(cb.conn) + } + return cb +} + +func (cb *connBuilder) Build() connection { + return cb.conn +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go new file mode 100644 index 000000000..47d67a493 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/conntrack.go @@ -0,0 +1,291 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "fmt" + "hash" + "hash/fnv" + "strconv" + + "github.com/benbjohnson/clock" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" +) + +// direction indicates the direction of a flow log in a connection. It's used by aggregators to determine which split +// of the aggregator should be updated, xxx_AB or xxx_BA. +type direction uint8 + +const ( + dirNA direction = iota + dirAB + dirBA +) + +type conntrackImpl struct { + clock clock.Clock + config *api.ConnTrack + endpointAFields, endpointBFields []string + hashProvider func() hash.Hash64 + connStore *connectionStore + aggregators []aggregator + shouldOutputFlowLogs bool + shouldOutputNewConnection bool + shouldOutputEndConnection bool + shouldOutputHeartbeats bool + metrics *metricsType +} + +func (ct *conntrackImpl) filterFlowLog(fl config.GenericMap) bool { + if !fl.IsValidProtocol() || !fl.IsTransportProtocol() { + return true + } + return false +} + +func (ct *conntrackImpl) Extract(flowLogs []config.GenericMap) []config.GenericMap { + log.Debugf("entering Extract conntrack, in = %v", flowLogs) + + var outputRecords []config.GenericMap + for _, fl := range flowLogs { + if ct.filterFlowLog(fl) { + ct.metrics.inputRecords.WithLabelValues("discarded").Inc() + continue + } + computedHash, err := computeHash(fl, &ct.config.KeyDefinition, ct.hashProvider(), ct.metrics) + if err != nil { + log.Warningf("skipping flow log %v: %v", fl, err) + ct.metrics.inputRecords.WithLabelValues("rejected").Inc() + continue + } + + if fl.IsDuplicate() { + log.Debugf("skipping duplicated flow log %v", fl) + ct.metrics.inputRecords.WithLabelValues("duplicate").Inc() + } else { + conn, exists, _ := ct.connStore.getConnection(computedHash.hashTotal) + if !exists { + if (ct.config.MaxConnectionsTracked > 0) && (ct.connStore.len() >= ct.config.MaxConnectionsTracked) { + log.Warningf("too many connections; skipping flow log %v: ", fl) + ct.metrics.inputRecords.WithLabelValues("discarded").Inc() + } else { + builder := newConnBuilder(ct.metrics) + conn = builder. + ShouldSwapAB(ct.config.TCPFlags.SwapAB && ct.containsTCPFlag(fl, SYNACKFlag)). + Hash(computedHash). + keysFrom(fl, &ct.config.KeyDefinition, ct.endpointAFields, ct.endpointBFields). + Aggregators(ct.aggregators). + Hash(computedHash). + Build() + ct.connStore.addConnection(computedHash.hashTotal, conn) + ct.connStore.updateNextHeartbeatTime(computedHash.hashTotal) + ct.updateConnection(conn, fl, computedHash, true) + ct.metrics.inputRecords.WithLabelValues("newConnection").Inc() + if ct.shouldOutputNewConnection { + record := conn.toGenericMap() + addHashField(record, computedHash.hashTotal) + addTypeField(record, api.ConnTrackNewConnection) + isFirst := conn.markReported() + addIsFirstField(record, isFirst) + outputRecords = append(outputRecords, record) + ct.metrics.outputRecords.WithLabelValues("newConnection").Inc() + } + } + } else { + ct.updateConnection(conn, fl, computedHash, false) + ct.metrics.inputRecords.WithLabelValues("update").Inc() + } + } + + if ct.shouldOutputFlowLogs { + record := fl.Copy() + addHashField(record, computedHash.hashTotal) + addTypeField(record, api.ConnTrackFlowLog) + outputRecords = append(outputRecords, record) + ct.metrics.outputRecords.WithLabelValues("flowLog").Inc() + } + } + + endConnectionRecords := ct.popEndConnections() + if ct.shouldOutputEndConnection { + outputRecords = append(outputRecords, endConnectionRecords...) + ct.metrics.outputRecords.WithLabelValues("endConnection").Add(float64(len(endConnectionRecords))) + } + + if ct.shouldOutputHeartbeats { + heartbeatRecords := ct.prepareHeartbeatRecords() + outputRecords = append(outputRecords, heartbeatRecords...) + ct.metrics.outputRecords.WithLabelValues("heartbeat").Add(float64(len(heartbeatRecords))) + } + + return outputRecords +} + +func (ct *conntrackImpl) popEndConnections() []config.GenericMap { + connections := ct.connStore.popEndConnections() + + var outputRecords []config.GenericMap + // Convert the connections to GenericMaps and add meta fields + for _, conn := range connections { + record := conn.toGenericMap() + addHashField(record, conn.getHash().hashTotal) + addTypeField(record, api.ConnTrackEndConnection) + var isFirst bool + if ct.shouldOutputEndConnection { + isFirst = conn.markReported() + } + addIsFirstField(record, isFirst) + outputRecords = append(outputRecords, record) + } + return outputRecords +} + +func (ct *conntrackImpl) prepareHeartbeatRecords() []config.GenericMap { + connections := ct.connStore.prepareHeartbeats() + + var outputRecords []config.GenericMap + // Convert the connections to GenericMaps and add meta fields + for _, conn := range connections { + record := conn.toGenericMap() + addHashField(record, conn.getHash().hashTotal) + addTypeField(record, api.ConnTrackHeartbeat) + var isFirst bool + if ct.shouldOutputHeartbeats { + isFirst = conn.markReported() + } + addIsFirstField(record, isFirst) + outputRecords = append(outputRecords, record) + } + return outputRecords +} + +func (ct *conntrackImpl) updateConnection(conn connection, flowLog config.GenericMap, flowLogHash totalHashType, isNew bool) { + d := ct.getFlowLogDirection(conn, flowLogHash) + for _, agg := range ct.aggregators { + agg.update(conn, flowLog, d, isNew) + } + + if ct.config.TCPFlags.DetectEndConnection && ct.containsTCPFlag(flowLog, FINFlag) { + ct.metrics.tcpFlags.WithLabelValues("detectEndConnection").Inc() + ct.connStore.setConnectionTerminating(flowLogHash.hashTotal) + } else { + ct.connStore.updateConnectionExpiryTime(flowLogHash.hashTotal) + } +} + +func (ct *conntrackImpl) containsTCPFlag(flowLog config.GenericMap, queryFlag uint32) bool { + tcpFlagsRaw, ok := flowLog[ct.config.TCPFlags.FieldName] + if ok { + tcpFlags, err := utils.ConvertToUint32(tcpFlagsRaw) + if err != nil { + log.Warningf("cannot convert TCP flag %q to uint32: %v", tcpFlagsRaw, err) + return false + } + containsFlag := (tcpFlags & queryFlag) == queryFlag + if containsFlag { + return true + } + } + + return false +} + +func (ct *conntrackImpl) getFlowLogDirection(conn connection, flowLogHash totalHashType) direction { + d := dirNA + if ct.config.KeyDefinition.Hash.FieldGroupARef != "" { + if conn.getHash().hashA == flowLogHash.hashA { + // A -> B + d = dirAB + } else { + // B -> A + d = dirBA + } + } + return d +} + +// NewConnectionTrack creates a new connection track instance +func NewConnectionTrack(opMetrics *operational.Metrics, params config.StageParam, clock clock.Clock) (extract.Extractor, error) { + cfg := params.Extract.ConnTrack + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("ConnectionTrack config is invalid: %w", err) + } + + metrics := newMetrics(opMetrics) + + var aggregators []aggregator + for _, of := range cfg.OutputFields { + agg, err := newAggregator(of, metrics) + if err != nil { + return nil, fmt.Errorf("error creating aggregator: %w", err) + } + aggregators = append(aggregators, agg) + } + shouldOutputFlowLogs := false + shouldOutputNewConnection := false + shouldOutputEndConnection := false + shouldOutputHeartbeats := false + for _, option := range cfg.OutputRecordTypes { + switch option { + case api.ConnTrackFlowLog: + shouldOutputFlowLogs = true + case api.ConnTrackNewConnection: + shouldOutputNewConnection = true + case api.ConnTrackEndConnection: + shouldOutputEndConnection = true + case api.ConnTrackHeartbeat: + shouldOutputHeartbeats = true + default: + return nil, fmt.Errorf("unknown OutputRecordTypes: %v", option) + } + } + + endpointAFields, endpointBFields := cfg.GetABFields() + conntrack := &conntrackImpl{ + clock: clock, + connStore: newConnectionStore(cfg.Scheduling, metrics, clock.Now), + config: cfg, + endpointAFields: endpointAFields, + endpointBFields: endpointBFields, + hashProvider: fnv.New64a, + aggregators: aggregators, + shouldOutputFlowLogs: shouldOutputFlowLogs, + shouldOutputNewConnection: shouldOutputNewConnection, + shouldOutputEndConnection: shouldOutputEndConnection, + shouldOutputHeartbeats: shouldOutputHeartbeats, + metrics: metrics, + } + return conntrack, nil +} + +func addHashField(record config.GenericMap, hashID uint64) { + record[api.HashIDFieldName] = strconv.FormatUint(hashID, 16) +} + +func addTypeField(record config.GenericMap, recordType api.ConnTrackOutputRecordTypeEnum) { + record[api.RecordTypeFieldName] = recordType +} + +func addIsFirstField(record config.GenericMap, isFirst bool) { + record[api.IsFirstFieldName] = isFirst +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go new file mode 100644 index 000000000..042331b73 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/hash.go @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "fmt" + "hash" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + log "github.com/sirupsen/logrus" +) + +// TODO: what's a better name for this struct? +type totalHashType struct { + hashA uint64 + hashB uint64 + hashTotal uint64 +} + +// computeHash computes the hash of a flow log according to keyDefinition. +// Two flow logs will have the same hash if they belong to the same connection. +func computeHash(flowLog config.GenericMap, keyDefinition *api.KeyDefinition, hasher hash.Hash64, metrics *metricsType) (totalHashType, error) { + fieldGroup2hash := make(map[string]uint64) + + // Compute the hash of each field group + for _, fg := range keyDefinition.FieldGroups { + h, err := computeHashFields(flowLog, fg.Fields, hasher, metrics) + if err != nil { + return totalHashType{}, fmt.Errorf("compute hash: %w", err) + } + fieldGroup2hash[fg.Name] = h + } + + // Compute the total hash + th := totalHashType{} + hasher.Reset() + for _, fgName := range keyDefinition.Hash.FieldGroupRefs { + hasher.Write(uint64ToBytes(fieldGroup2hash[fgName])) + } + if keyDefinition.Hash.FieldGroupARef != "" { + th.hashA = fieldGroup2hash[keyDefinition.Hash.FieldGroupARef] + th.hashB = fieldGroup2hash[keyDefinition.Hash.FieldGroupBRef] + // Determine order between A's and B's hash to get the same hash for both flow logs from A to B and from B to A. + if th.hashA < th.hashB { + hasher.Write(uint64ToBytes(th.hashA)) + hasher.Write(uint64ToBytes(th.hashB)) + } else { + hasher.Write(uint64ToBytes(th.hashB)) + hasher.Write(uint64ToBytes(th.hashA)) + } + } + th.hashTotal = hasher.Sum64() + return th, nil +} + +func computeHashFields(flowLog config.GenericMap, fieldNames []string, hasher hash.Hash64, metrics *metricsType) (uint64, error) { + hasher.Reset() + for _, fn := range fieldNames { + f, ok := flowLog[fn] + if !ok { + log.Warningf("Missing field %v", fn) + if metrics != nil { + metrics.hashErrors.WithLabelValues("MissingFieldError", fn).Inc() + } + continue + } + bytes, err := toBytes(f) + if err != nil { + return 0, err + } + hasher.Write(bytes) + } + return hasher.Sum64(), nil +} + +func uint64ToBytes(data uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, data) + return b +} + +func toBytes(data interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(data) + if err != nil { + return nil, err + } + bytes := buf.Bytes() + return bytes, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go new file mode 100644 index 000000000..66baaa71f --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/metrics.go @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + connStoreLengthDef = operational.DefineMetric( + "conntrack_memory_connections", + "The total number of tracked connections in memory per group and phase", + operational.TypeGauge, + "group", "phase", + ) + + inputRecordsDef = operational.DefineMetric( + "conntrack_input_records", + "The total number of input records per classification", + operational.TypeCounter, + "classification", + ) + + outputRecordsDef = operational.DefineMetric( + "conntrack_output_records", + "The total number of output records", + operational.TypeCounter, + "type", + ) + + tcpFlagsDef = operational.DefineMetric( + "conntrack_tcp_flags", + "The total number of actions taken based on TCP flags", + operational.TypeCounter, + "action", + ) + + hashErrorsDef = operational.DefineMetric( + "conntrack_hash_errors", + "The total number of errors during hash computation", + operational.TypeCounter, + "error", "field", + ) + + aggregatorErrorsDef = operational.DefineMetric( + "conntrack_aggregator_errors", + "The total number of errors during aggregation", + operational.TypeCounter, + "error", "field", + ) + + endConnectionsDef = operational.DefineMetric( + "conntrack_end_connections", + "The total number of connections ended per group and reason", + operational.TypeCounter, + "group", "reason", + ) +) + +type metricsType struct { + connStoreLength *prometheus.GaugeVec + inputRecords *prometheus.CounterVec + outputRecords *prometheus.CounterVec + tcpFlags *prometheus.CounterVec + hashErrors *prometheus.CounterVec + aggregatorErrors *prometheus.CounterVec + endConnections *prometheus.CounterVec +} + +func newMetrics(opMetrics *operational.Metrics) *metricsType { + return &metricsType{ + connStoreLength: opMetrics.NewGaugeVec(&connStoreLengthDef), + inputRecords: opMetrics.NewCounterVec(&inputRecordsDef), + outputRecords: opMetrics.NewCounterVec(&outputRecordsDef), + tcpFlags: opMetrics.NewCounterVec(&tcpFlagsDef), + hashErrors: opMetrics.NewCounterVec(&hashErrorsDef), + aggregatorErrors: opMetrics.NewCounterVec(&aggregatorErrorsDef), + endConnections: opMetrics.NewCounterVec(&endConnectionsDef), + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go new file mode 100644 index 000000000..bc7040d64 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/store.go @@ -0,0 +1,294 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +import ( + "fmt" + "sort" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + log "github.com/sirupsen/logrus" +) + +const ( + expiryOrder = utils.OrderID("expiryOrder") + nextHeartbeatTimeOrder = utils.OrderID("nextHeartbeatTimeOrder") + activeLabel = "active" + terminatingLabel = "terminating" +) + +// connectionStore provides means to manage the connections such as retrieving a connection by its hash and organizing +// them in groups sorted by expiry time and next report time. +// This allows efficient retrieval and removal of connections. +type connectionStore struct { + groups []*groupType + hashID2groupIdx map[uint64]int + metrics *metricsType + now func() time.Time +} + +type groupType struct { + scheduling api.ConnTrackSchedulingGroup + // active connections + activeMom *utils.MultiOrderedMap + // connections that detected EndConnection from TCP FIN flag. These will not trigger updates anymore until pop + // check expireConnection func + terminatingMom *utils.MultiOrderedMap + labelValue string +} + +func (cs *connectionStore) getGroupIdx(conn connection) (groupIdx int) { + for i, group := range cs.groups { + if conn.isMatchSelector(group.scheduling.Selector) { + // connection belongs to scheduling group i + return i + } + } + // Shouldn't get here since the last scheduling group should have a selector that matches any connection. + log.Errorf("BUG. connection with hash %x doesn't match any selector", conn.getHash().hashTotal) + lastGroupIdx := len(cs.groups) - 1 + return lastGroupIdx +} + +func (cs *connectionStore) addConnection(hashID uint64, conn connection) { + groupIdx := cs.getGroupIdx(conn) + mom := cs.groups[groupIdx].activeMom + + err := mom.AddRecord(utils.Key(hashID), conn) + if err != nil { + log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn) + } + cs.hashID2groupIdx[hashID] = groupIdx + + groupLabel := cs.groups[groupIdx].labelValue + activeLen := cs.groups[groupIdx].activeMom.Len() + cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen)) +} + +func (cs *connectionStore) getConnection(hashID uint64) (connection, bool, bool) { + groupIdx, found := cs.hashID2groupIdx[hashID] + if !found { + return nil, false, false + } + mom := cs.groups[groupIdx].activeMom + + // get connection from active map + isRunning := true + record, ok := mom.GetRecord(utils.Key(hashID)) + if !ok { + // fallback on terminating map if not found + isRunning = false + mom := cs.groups[groupIdx].terminatingMom + record, ok = mom.GetRecord(utils.Key(hashID)) + if !ok { + return nil, false, false + } + } + conn := record.(connection) + return conn, true, isRunning +} + +func (cs *connectionStore) setConnectionTerminating(hashID uint64) { + conn, ok, active := cs.getConnection(hashID) + if !ok { + log.Panicf("BUG. connection hash %x doesn't exist", hashID) + return + } else if !active { + // connection is terminating + return + } + groupIdx := cs.hashID2groupIdx[hashID] + groupLabel := cs.groups[groupIdx].labelValue + activeMom := cs.groups[groupIdx].activeMom + terminatingMom := cs.groups[groupIdx].terminatingMom + timeout := cs.groups[groupIdx].scheduling.TerminatingTimeout.Duration + newExpiryTime := cs.now().Add(timeout) + conn.setExpiryTime(newExpiryTime) + // Remove connection from active map + activeMom.RemoveRecord(utils.Key(hashID)) + activeLen := cs.groups[groupIdx].activeMom.Len() + cs.metrics.connStoreLength.WithLabelValues(groupLabel, activeLabel).Set(float64(activeLen)) + // Add connection to terminating map + err := terminatingMom.AddRecord(utils.Key(hashID), conn) + if err != nil { + log.Errorf("BUG. connection with hash %x already exists in store. %v", hashID, conn) + } + terminatingLen := cs.groups[groupIdx].terminatingMom.Len() + cs.metrics.connStoreLength.WithLabelValues(groupLabel, terminatingLabel).Set(float64(terminatingLen)) +} + +func (cs *connectionStore) updateConnectionExpiryTime(hashID uint64) { + conn, ok, active := cs.getConnection(hashID) + if !ok { + log.Panicf("BUG. connection hash %x doesn't exist", hashID) + return + } else if !active { + // connection is terminating. expiry time can't be updated anymore + return + } + groupIdx := cs.hashID2groupIdx[hashID] + mom := cs.groups[groupIdx].activeMom + timeout := cs.groups[groupIdx].scheduling.EndConnectionTimeout.Duration + newExpiryTime := cs.now().Add(timeout) + conn.setExpiryTime(newExpiryTime) + // Move to the back of the list + err := mom.MoveToBack(utils.Key(hashID), expiryOrder) + if err != nil { + log.Panicf("BUG. Can't update connection expiry time for hash %x: %v", hashID, err) + return + } +} + +func (cs *connectionStore) updateNextHeartbeatTime(hashID uint64) { + conn, ok, active := cs.getConnection(hashID) + if !ok { + log.Panicf("BUG. connection hash %x doesn't exist", hashID) + return + } else if !active { + // connection is terminating. heartbeat are disabled + return + } + groupIdx := cs.hashID2groupIdx[hashID] + mom := cs.groups[groupIdx].activeMom + timeout := cs.groups[groupIdx].scheduling.HeartbeatInterval.Duration + newNextHeartbeatTime := cs.now().Add(timeout) + conn.setNextHeartbeatTime(newNextHeartbeatTime) + // Move to the back of the list + err := mom.MoveToBack(utils.Key(hashID), nextHeartbeatTimeOrder) + if err != nil { + log.Panicf("BUG. Can't update next heartbeat time for hash %x: %v", hashID, err) + return + } +} + +func (cs *connectionStore) popEndConnectionOfMap(mom *utils.MultiOrderedMap, group *groupType) []connection { + var poppedConnections []connection + + mom.IterateFrontToBack(expiryOrder, func(r utils.Record) (shouldDelete, shouldStop bool) { + conn := r.(connection) + expiryTime := conn.getExpiryTime() + if cs.now().After(expiryTime) { + // The connection has expired. We want to pop it. + poppedConnections = append(poppedConnections, conn) + shouldDelete, shouldStop = true, false + delete(cs.hashID2groupIdx, conn.getHash().hashTotal) + } else { + // No more expired connections + shouldDelete, shouldStop = false, true + } + return + }) + groupLabel := group.labelValue + momLen := mom.Len() + var phaseLabel string + switch mom { + case group.activeMom: + phaseLabel = activeLabel + case group.terminatingMom: + phaseLabel = terminatingLabel + } + cs.metrics.connStoreLength.WithLabelValues(groupLabel, phaseLabel).Set(float64(momLen)) + + return poppedConnections +} + +func (cs *connectionStore) popEndConnections() []connection { + // Iterate over the connections by scheduling groups. + // In each scheduling group iterate over them by their expiry time from old to new. + var poppedConnections []connection + for _, group := range cs.groups { + // Pop terminating connections first + terminatedConnections := cs.popEndConnectionOfMap(group.terminatingMom, group) + poppedConnections = append(poppedConnections, terminatedConnections...) + cs.metrics.endConnections.WithLabelValues(group.labelValue, "FIN_flag").Add(float64(len(terminatedConnections))) + + // Pop active connections that expired without TCP flag + timedoutConnections := cs.popEndConnectionOfMap(group.activeMom, group) + poppedConnections = append(poppedConnections, timedoutConnections...) + cs.metrics.endConnections.WithLabelValues(group.labelValue, "timeout").Add(float64(len(timedoutConnections))) + } + return poppedConnections +} + +func (cs *connectionStore) prepareHeartbeats() []connection { + var connections []connection + // Iterate over the connections by scheduling groups. + // In each scheduling group iterate over them by their next heartbeat time from old to new. + for _, group := range cs.groups { + group.activeMom.IterateFrontToBack(nextHeartbeatTimeOrder, func(r utils.Record) (shouldDelete, shouldStop bool) { + conn := r.(connection) + nextHeartbeat := conn.getNextHeartbeatTime() + needToReport := cs.now().After(nextHeartbeat) + if needToReport { + connections = append(connections, conn) + cs.updateNextHeartbeatTime(conn.getHash().hashTotal) + shouldDelete, shouldStop = false, false + } else { + shouldDelete, shouldStop = false, true + } + return + }) + } + return connections +} + +func (cs *connectionStore) len() int { + return len(cs.hashID2groupIdx) +} + +// schedulingGroupToLabelValue returns a string representation of a scheduling group to be used as a Prometheus label +// value. +func schedulingGroupToLabelValue(groupIdx int, group api.ConnTrackSchedulingGroup) string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("%v: ", groupIdx)) + var keys []string + for k := range group.Selector { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + sb.WriteString(fmt.Sprintf("%s=%v, ", k, group.Selector[k])) + } + if len(group.Selector) == 0 { + sb.WriteString("DEFAULT") + } + return sb.String() +} + +func newConnectionStore(scheduling []api.ConnTrackSchedulingGroup, metrics *metricsType, nowFunc func() time.Time) *connectionStore { + groups := make([]*groupType, len(scheduling)) + for groupIdx, sg := range scheduling { + groups[groupIdx] = &groupType{ + scheduling: sg, + activeMom: utils.NewMultiOrderedMap(expiryOrder, nextHeartbeatTimeOrder), + terminatingMom: utils.NewMultiOrderedMap(expiryOrder, nextHeartbeatTimeOrder), + labelValue: schedulingGroupToLabelValue(groupIdx, sg), + } + } + + cs := &connectionStore{ + groups: groups, + hashID2groupIdx: map[uint64]int{}, + metrics: metrics, + now: nowFunc, + } + return cs +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go new file mode 100644 index 000000000..2e695129b --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack/tcpflags.go @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package conntrack + +// From: https://github.com/netobserv/netobserv-ebpf-agent/blob/c54e7eb9e37e8ef5bb948eff6141cdddf584a6f9/bpf/flows.c#L45-L56 +const ( + FINFlag = uint32(0x01) + SYNFlag = uint32(0x02) + RSTFlag = uint32(0x04) + PSHFlag = uint32(0x08) + ACKFlag = uint32(0x10) + URGFlag = uint32(0x20) + ECEFlag = uint32(0x40) + CWRFlag = uint32(0x80) + // Custom flags + SYNACKFlag = uint32(0x100) + FINACKFlag = uint32(0x200) + RSTACKFlag = uint32(0x400) + // Note: The difference between SYNFlag | ACKFlag (0x12) and SYN_ACKFlag (0x100) is that the former indicates + // that a flowlog contains TCP packets with the SYN flag set and the ACK flag set, but not necessary in the same packet. + // While the latter indicates that a flowlog contains a TCP packet with both flags set. +) diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go new file mode 100644 index 000000000..e33ba87f8 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract.go @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package extract + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" + log "github.com/sirupsen/logrus" +) + +type Extractor interface { + Extract(in []config.GenericMap) []config.GenericMap +} + +type extractNone struct { +} + +// Extract extracts a flow before being stored +func (t *extractNone) Extract(f []config.GenericMap) []config.GenericMap { + return f +} + +// NewExtractNone create a new extract +func NewExtractNone() (Extractor, error) { + log.Debugf("entering NewExtractNone") + return &extractNone{}, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go new file mode 100644 index 000000000..cc982ae93 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_aggregate.go @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package extract + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" + agg "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate" + log "github.com/sirupsen/logrus" +) + +type aggregates struct { + agg.Aggregates +} + +// Extract extracts a flow before being stored +func (ea *aggregates) Extract(entries []config.GenericMap) []config.GenericMap { + err := ea.Aggregates.Evaluate(entries) + if err != nil { + log.Debugf("Evaluate error %v", err) + } + + // TODO: This need to be async function that is being called for the metrics and not + // TODO: synchronized from the pipeline directly. + return ea.Aggregates.GetMetrics() +} + +// NewExtractAggregate creates a new extractor +func NewExtractAggregate(params config.StageParam) (Extractor, error) { + log.Debugf("entering NewExtractAggregate") + cfg, err := agg.NewAggregatesFromConfig(params.Extract.Aggregates) + if err != nil { + log.Errorf("error in NewAggregatesFromConfig: %v", err) + return nil, err + } + + return &aggregates{ + Aggregates: cfg, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go new file mode 100644 index 000000000..4d93b0485 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/extract_timebased.go @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package extract + +import ( + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + tb "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased" + log "github.com/sirupsen/logrus" +) + +type timebased struct { + Filters []tb.FilterStruct + IndexKeyStructs map[string]*tb.IndexKeyTable +} + +// Extract extracts a flow before being stored +func (et *timebased) Extract(entries []config.GenericMap) []config.GenericMap { + log.Debugf("entering timebased Extract") + nowInSecs := time.Now() + // Populate the Table with the current entries + for _, entry := range entries { + log.Debugf("timebased Extract, entry = %v", entry) + tb.AddEntryToTables(et.IndexKeyStructs, entry, nowInSecs) + } + + output := make([]config.GenericMap, 0) + // Calculate Filters based on time windows + for i := range et.Filters { + filter := &et.Filters[i] + filter.CalculateResults(nowInSecs) + filter.ComputeTopkBotk() + genMap := filter.CreateGenericMap() + output = append(output, genMap...) + } + log.Debugf("output of extract timebased: %v", output) + + // delete entries from tables that are outside time windows + tb.DeleteOldEntriesFromTables(et.IndexKeyStructs, nowInSecs) + + return output +} + +// NewExtractTimebased creates a new extractor +func NewExtractTimebased(params config.StageParam) (Extractor, error) { + var rules []api.TimebasedFilterRule + if params.Extract != nil && params.Extract.Timebased.Rules != nil { + rules = params.Extract.Timebased.Rules + } + log.Debugf("NewExtractTimebased; rules = %v", rules) + + tmpIndexKeyStructs, tmpFilters := tb.CreateIndexKeysAndFilters(rules) + + return &timebased{ + Filters: tmpFilters, + IndexKeyStructs: tmpIndexKeyStructs, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go new file mode 100644 index 000000000..e753c3b31 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/filters.go @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package timebased + +import ( + "container/list" + "math" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" +) + +func (fs *FilterStruct) CalculateResults(nowInSecs time.Time) { + log.Debugf("CalculateResults nowInSecs = %v", nowInSecs) + oldestValidTime := nowInSecs.Add(-fs.Rule.TimeInterval.Duration) + for tableKey, l := range fs.IndexKeyDataTable.dataTableMap { + var valueFloat64 = float64(0) + var err error + //nolint:exhaustive + switch fs.Rule.OperationType { + case api.FilterOperationLast: + // handle empty list + if l.Len() == 0 { + continue + } + valueFloat64, err = utils.ConvertToFloat64(l.Back().Value.(*TableEntry).entry[fs.Rule.OperationKey]) + if err != nil { + continue + } + case api.FilterOperationDiff: + for e := l.Front(); e != nil; e = e.Next() { + cEntry := e.Value.(*TableEntry) + if cEntry.timeStamp.Before(oldestValidTime) { + // entry is out of time range; ignore it + continue + } + first, err := utils.ConvertToFloat64(e.Value.(*TableEntry).entry[fs.Rule.OperationKey]) + if err != nil { + continue + } + last, err := utils.ConvertToFloat64(l.Back().Value.(*TableEntry).entry[fs.Rule.OperationKey]) + if err != nil { + continue + } + valueFloat64 = last - first + break + } + default: + valueFloat64 = fs.CalculateValue(l, oldestValidTime) + } + fs.Results[tableKey] = &filterOperationResult{ + values: tableKey, + operationResult: valueFloat64, + } + } + log.Debugf("CalculateResults Results = %v", fs.Results) +} + +func (fs *FilterStruct) CalculateValue(l *list.List, oldestValidTime time.Time) float64 { + log.Debugf("CalculateValue nowInSecs = %v", oldestValidTime) + currentValue := getInitValue(fs.Rule.OperationType) + nItems := 0 + for e := l.Front(); e != nil; e = e.Next() { + cEntry := e.Value.(*TableEntry) + if cEntry.timeStamp.Before(oldestValidTime) { + // entry is out of time range; ignore it + continue + } + if valueFloat64, err := utils.ConvertToFloat64(cEntry.entry[fs.Rule.OperationKey]); err != nil { + // Log as debug to avoid performance impact + log.Debugf("CalculateValue error with OperationKey %s: %v", fs.Rule.OperationKey, err) + } else { + nItems++ + switch fs.Rule.OperationType { + case api.FilterOperationSum, api.FilterOperationAvg: + currentValue += valueFloat64 + case api.FilterOperationMax: + currentValue = math.Max(currentValue, valueFloat64) + case api.FilterOperationMin: + currentValue = math.Min(currentValue, valueFloat64) + case api.FilterOperationCnt, api.FilterOperationLast, api.FilterOperationDiff: + } + } + } + if fs.Rule.OperationType == api.FilterOperationAvg && nItems > 0 { + currentValue /= float64(nItems) + } + if fs.Rule.OperationType == api.FilterOperationCnt { + currentValue = float64(nItems) + } + return currentValue +} + +func getInitValue(operation api.FilterOperationEnum) float64 { + switch operation { + case api.FilterOperationSum, + api.FilterOperationAvg, + api.FilterOperationCnt, + api.FilterOperationLast, + api.FilterOperationDiff: + return 0 + case api.FilterOperationMax: + return (-math.MaxFloat64) + case api.FilterOperationMin: + return math.MaxFloat64 + default: + log.Panicf("unknown operation %v", operation) + return 0 + } +} + +func (fs *FilterStruct) ComputeTopkBotk() { + var output []filterOperationResult + if fs.Rule.TopK > 0 { + if fs.Rule.Reversed { + output = fs.computeBotK(fs.Results) + } else { + output = fs.computeTopK(fs.Results) + } + } else { + // return all Results; convert map to array + output = make([]filterOperationResult, len(fs.Results)) + i := 0 + for _, item := range fs.Results { + output[i] = *item + i++ + } + } + fs.Output = output +} + +func (fs *FilterStruct) CreateGenericMap() []config.GenericMap { + output := make([]config.GenericMap, 0) + for _, result := range fs.Output { + t := config.GenericMap{ + "name": fs.Rule.Name, + "index_key": fs.Rule.IndexKey, + "operation": fs.Rule.OperationType, + } + + // append operation key and result as key / value + t[fs.Rule.OperationKey] = result.operationResult + + // append index key / value pairs + values := strings.Split(result.values, ",") + if len(fs.Rule.IndexKeys) == len(values) { + for i, k := range fs.Rule.IndexKeys { + t[k] = values[i] + } + } + + log.Debugf("FilterStruct CreateGenericMap: %v", t) + output = append(output, t) + } + log.Debugf("FilterStruct CreateGenericMap: output = %v \n", output) + return output +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go new file mode 100644 index 000000000..5c52dc656 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/heap.go @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package timebased + +import ( + "container/heap" + "math" + + log "github.com/sirupsen/logrus" +) + +// functions to manipulate a heap to generate TopK/BotK entries +// We need to implement the heap interface: Len(), Less(), Swap(), Push(), Pop() + +type heapItem struct { + value float64 + result *filterOperationResult +} + +type topkHeap []heapItem +type botkHeap []heapItem + +func (h topkHeap) Len() int { + return len(h) +} + +func (h topkHeap) Less(i, j int) bool { + return h[i].value < h[j].value +} + +func (h topkHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *topkHeap) Push(x interface{}) { + *h = append(*h, x.(heapItem)) +} + +func (h *topkHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +func (fs *FilterStruct) computeTopK(inputs filterOperationResults) []filterOperationResult { + // maintain a heap with k items, always dropping the lowest + // we will be left with the TopK items + var prevMin float64 + prevMin = -math.MaxFloat64 + topk := fs.Rule.TopK + h := &topkHeap{} + for key, metricMap := range inputs { + val := metricMap.operationResult + if val < prevMin { + continue + } + item := heapItem{ + result: inputs[key], + value: val, + } + heap.Push(h, item) + if h.Len() > topk { + x := heap.Pop(h) + prevMin = x.(heapItem).value + } + } + log.Debugf("heap: %v", h) + + // convert the remaining heap to a sorted array + result := make([]filterOperationResult, h.Len()) + heapLen := h.Len() + for i := heapLen; i > 0; i-- { + poppedItem := heap.Pop(h).(heapItem) + log.Debugf("poppedItem: %v", poppedItem) + result[i-1] = *poppedItem.result + } + log.Debugf("topk items: %v", result) + return result +} + +func (h botkHeap) Len() int { + return len(h) +} + +// For a botk heap, we reverse the order of the Less() operation +func (h botkHeap) Less(i, j int) bool { + return h[i].value > h[j].value +} + +func (h botkHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *botkHeap) Push(x interface{}) { + *h = append(*h, x.(heapItem)) +} + +func (h *botkHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +func (fs *FilterStruct) computeBotK(inputs filterOperationResults) []filterOperationResult { + // maintain a heap with k items, always dropping the highest + // we will be left with the BotK items + var prevMax float64 + prevMax = math.MaxFloat64 + botk := fs.Rule.TopK + h := &botkHeap{} + for key, metricMap := range inputs { + val := metricMap.operationResult + if val > prevMax { + continue + } + item := heapItem{ + result: inputs[key], + value: val, + } + heap.Push(h, item) + if h.Len() > botk { + x := heap.Pop(h) + prevMax = x.(heapItem).value + } + } + log.Debugf("heap: %v", h) + + // convert the remaining heap to a sorted array + result := make([]filterOperationResult, h.Len()) + heapLen := h.Len() + for i := heapLen; i > 0; i-- { + poppedItem := heap.Pop(h).(heapItem) + log.Debugf("poppedItem: %v", poppedItem) + result[i-1] = *poppedItem.result + } + log.Debugf("botk items: %v", result) + return result +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go new file mode 100644 index 000000000..3bdb41dfe --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/tables.go @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package timebased + +import ( + "bytes" + "container/list" + "fmt" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" +) + +func AddEntryToTables(indexKeyStructs map[string]*IndexKeyTable, entry config.GenericMap, nowInSecs time.Time) { + for tableKey, recordTable := range indexKeyStructs { + keys := strings.Split(tableKey, ",") + + validValuesCount := 0 + var b bytes.Buffer + for _, key := range keys { + if b.Len() > 0 { + b.WriteRune(',') + } + if val, ok := entry[key]; ok { + valStr := utils.ConvertToString(val) + if len(valStr) > 0 { + b.WriteString(valStr) + validValuesCount++ + } + } + } + + // add entry to the table only if all values are non empty + if len(keys) == validValuesCount { + val := b.String() + log.Debugf("ExtractTimebased addEntryToTables: key = %s, recordTable = %v", tableKey, recordTable) + cEntry := &TableEntry{ + timeStamp: nowInSecs, + entry: entry, + } + // allocate list if it does not yet exist + if recordTable.dataTableMap[val] == nil { + recordTable.dataTableMap[val] = list.New() + } + log.Debugf("ExtractTimebased addEntryToTables: adding to table %s", val) + AddEntryToTable(cEntry, recordTable.dataTableMap[val]) + } + } +} + +func AddEntryToTable(cEntry *TableEntry, tableList *list.List) { + log.Debugf("AddEntryToTable: adding table entry %v", cEntry) + tableList.PushBack(cEntry) +} + +func DeleteOldEntriesFromTables(indexKeyStructs map[string]*IndexKeyTable, nowInSecs time.Time) { + for _, recordTable := range indexKeyStructs { + oldestTime := nowInSecs.Add(-recordTable.maxTimeInterval) + for _, tableMap := range recordTable.dataTableMap { + for { + head := tableMap.Front() + if head == nil { + break + } + tableEntry := head.Value.(*TableEntry) + if tableEntry.timeStamp.Before(oldestTime) { + tableMap.Remove(head) + continue + } + break + } + // TODO: if tableMap is empty, we should clean it up and remove it from recordTable.dataTableMap + } + } +} + +func PrintTable(l *list.List) { + fmt.Printf("start PrintTable: \n") + for e := l.Front(); e != nil; e = e.Next() { + fmt.Printf("PrintTable: e = %v, Value = %v \n", e, e.Value) + } + fmt.Printf("end PrintTable: \n") +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go new file mode 100644 index 000000000..d6fded1d6 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased/timebased.go @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package timebased + +import ( + "container/list" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + log "github.com/sirupsen/logrus" +) + +type FilterStruct struct { + Rule api.TimebasedFilterRule + IndexKeyDataTable *IndexKeyTable + Results filterOperationResults + Output []filterOperationResult +} + +type filterOperationResults map[string]*filterOperationResult + +type filterOperationResult struct { + values string + operationResult float64 +} + +type DataTableMap map[string]*list.List + +type IndexKeyTable struct { + maxTimeInterval time.Duration + dataTableMap DataTableMap +} + +type TableEntry struct { + timeStamp time.Time + entry config.GenericMap +} + +// CreateIndexKeysAndFilters creates structures for each IndexKey that appears in the rules. +// Note that the same IndexKey might appear in more than one Rule. +// Connect IndexKey structure to its filters. +// For each IndexKey, we need a table of history to handle the largest TimeInterval. +func CreateIndexKeysAndFilters(rules []api.TimebasedFilterRule) (map[string]*IndexKeyTable, []FilterStruct) { + tmpIndexKeyStructs := make(map[string]*IndexKeyTable) + tmpFilters := make([]FilterStruct, 0) + for _, filterRule := range rules { + log.Debugf("CreateIndexKeysAndFilters: filterRule = %v", filterRule) + if len(filterRule.IndexKeys) > 0 { + // reuse indexKey as table index + filterRule.IndexKey = strings.Join(filterRule.IndexKeys, ",") + } else if len(filterRule.IndexKey) > 0 { + // append indexKey to indexKeys + filterRule.IndexKeys = append(filterRule.IndexKeys, filterRule.IndexKey) + } else { + log.Errorf("missing IndexKey(s) for filter %s", filterRule.Name) + continue + } + rStruct, ok := tmpIndexKeyStructs[filterRule.IndexKey] + if !ok { + rStruct = &IndexKeyTable{ + maxTimeInterval: filterRule.TimeInterval.Duration, + dataTableMap: make(DataTableMap), + } + tmpIndexKeyStructs[filterRule.IndexKey] = rStruct + log.Debugf("new IndexKeyTable: name = %s = %v", filterRule.IndexKey, *rStruct) + } else if filterRule.TimeInterval.Duration > rStruct.maxTimeInterval { + rStruct.maxTimeInterval = filterRule.TimeInterval.Duration + } + // verify the validity of the OperationType field in the filterRule + switch filterRule.OperationType { + case api.FilterOperationLast, + api.FilterOperationDiff, + api.FilterOperationCnt, + api.FilterOperationAvg, + api.FilterOperationMax, + api.FilterOperationMin, + api.FilterOperationSum: + // OK; nothing to do + default: + log.Errorf("illegal operation type %s", filterRule.OperationType) + continue + } + tmpFilter := FilterStruct{ + Rule: filterRule, + IndexKeyDataTable: rStruct, + Results: make(filterOperationResults), + } + log.Debugf("new Rule = %v", tmpFilter) + tmpFilters = append(tmpFilters, tmpFilter) + } + return tmpIndexKeyStructs, tmpFilters +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go new file mode 100644 index 000000000..de804f1ac --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest.go @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import "github.com/netobserv/flowlogs-pipeline/pkg/config" + +type Ingester interface { + Ingest(out chan<- config.GenericMap) +} +type IngesterNone struct { +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go new file mode 100644 index 000000000..bbe1a356d --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_collector.go @@ -0,0 +1,195 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "context" + "encoding/binary" + "fmt" + "net" + + ms "github.com/mitchellh/mapstructure" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/netsampler/goflow2/decoders/netflow/templates" + _ "github.com/netsampler/goflow2/decoders/netflow/templates/memory" // required for goflow in-memory templates + goflowFormat "github.com/netsampler/goflow2/format" + goflowCommonFormat "github.com/netsampler/goflow2/format/common" + _ "github.com/netsampler/goflow2/format/protobuf" // required for goflow protobuf + goflowpb "github.com/netsampler/goflow2/pb" + "github.com/netsampler/goflow2/utils" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" +) + +const ( + channelSize = 1000 +) + +type ingestCollector struct { + hostname string + port int + portLegacy int + in chan map[string]interface{} + exitChan <-chan struct{} + metrics *metrics +} + +// TransportWrapper is an implementation of the goflow2 transport interface +type TransportWrapper struct { + c chan map[string]interface{} +} + +func NewWrapper(c chan map[string]interface{}) *TransportWrapper { + tw := TransportWrapper{c: c} + return &tw +} + +func RenderMessage(message *goflowpb.FlowMessage) (map[string]interface{}, error) { + outputMap := make(map[string]interface{}) + err := ms.Decode(message, &outputMap) + if err != nil { + return nil, err + } + outputMap["DstAddr"] = goflowCommonFormat.RenderIP(message.DstAddr) + outputMap["SrcAddr"] = goflowCommonFormat.RenderIP(message.SrcAddr) + outputMap["DstMac"] = renderMac(message.DstMac) + outputMap["SrcMac"] = renderMac(message.SrcMac) + return outputMap, nil +} + +func renderMac(macValue uint64) string { + mac := make([]byte, 8) + binary.BigEndian.PutUint64(mac, macValue) + return net.HardwareAddr(mac[2:]).String() +} + +func (w *TransportWrapper) Send(_, data []byte) error { + message := goflowpb.FlowMessage{} + err := proto.Unmarshal(data, &message) + if err != nil { + // temporary fix + // A PR was submitted to log this error from goflow2: + // https://github.com/netsampler/goflow2/pull/86 + log.Error(err) + return err + } + renderedMsg, err := RenderMessage(&message) + if err == nil { + w.c <- renderedMsg + } + return err +} + +// Ingest ingests entries from a network collector using goflow2 library (https://github.com/netsampler/goflow2) +func (c *ingestCollector) Ingest(out chan<- config.GenericMap) { + ctx := context.Background() + c.metrics.createOutQueueLen(out) + + // initialize background listeners (a.k.a.netflow+legacy collector) + c.initCollectorListener(ctx) + + // forever process log lines received by collector + c.processLogLines(out) +} + +func (c *ingestCollector) initCollectorListener(ctx context.Context) { + transporter := NewWrapper(c.in) + formatter, err := goflowFormat.FindFormat(ctx, "pb") + if err != nil { + log.Fatal(err) + } + + if c.port > 0 { + // cf https://github.com/netsampler/goflow2/pull/49 + tpl, err := templates.FindTemplateSystem(ctx, "memory") + if err != nil { + log.Fatalf("goflow2 error: could not find memory template system: %v", err) + } + defer tpl.Close(ctx) + + go func() { + sNF := utils.NewStateNetFlow() + sNF.Format = formatter + sNF.Transport = transporter + sNF.Logger = log.StandardLogger() + sNF.TemplateSystem = tpl + + log.Infof("listening for netflow on host %s, port = %d", c.hostname, c.port) + err = sNF.FlowRoutine(1, c.hostname, c.port, false) + log.Fatal(err) + }() + } + + if c.portLegacy > 0 { + go func() { + sLegacyNF := utils.NewStateNFLegacy() + sLegacyNF.Format = formatter + sLegacyNF.Transport = transporter + sLegacyNF.Logger = log.StandardLogger() + + log.Infof("listening for legacy netflow on host %s, port = %d", c.hostname, c.portLegacy) + err = sLegacyNF.FlowRoutine(1, c.hostname, c.portLegacy, false) + log.Fatal(err) + }() + } +} + +func (c *ingestCollector) processLogLines(out chan<- config.GenericMap) { + for { + select { + case <-c.exitChan: + log.Debugf("exiting ingestCollector because of signal") + return + case record := <-c.in: + out <- record + } + } +} + +// NewIngestCollector create a new ingester +func NewIngestCollector(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) { + jsonIngestCollector := api.IngestCollector{} + if params.Ingest != nil && params.Ingest.Collector != nil { + jsonIngestCollector = *params.Ingest.Collector + } + if jsonIngestCollector.HostName == "" { + return nil, fmt.Errorf("ingest hostname not specified") + } + if jsonIngestCollector.Port == 0 && jsonIngestCollector.PortLegacy == 0 { + return nil, fmt.Errorf("no ingest port specified") + } + + log.Infof("hostname = %s", jsonIngestCollector.HostName) + log.Infof("port = %d", jsonIngestCollector.Port) + log.Infof("portLegacy = %d", jsonIngestCollector.PortLegacy) + + in := make(chan map[string]interface{}, channelSize) + metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(in) }) + + return &ingestCollector{ + hostname: jsonIngestCollector.HostName, + port: jsonIngestCollector.Port, + portLegacy: jsonIngestCollector.PortLegacy, + exitChan: pUtils.ExitChannel(), + in: in, + metrics: metrics, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go new file mode 100644 index 000000000..f4d7ef3b5 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_fake.go @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "fmt" + "sync/atomic" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + log "github.com/sirupsen/logrus" +) + +type Fake struct { + Count int64 + params config.Ingest + In chan config.GenericMap + exitChan <-chan struct{} +} + +// Ingest reads records from an input channel and writes them as-is to the output channel +func (inf *Fake) Ingest(out chan<- config.GenericMap) { + for { + select { + case <-inf.exitChan: + log.Debugf("exiting IngestFake because of signal") + return + case records := <-inf.In: + out <- records + atomic.AddInt64(&inf.Count, 1) + } + } +} + +// NewIngestFake creates a new ingester +func NewIngestFake(params config.StageParam) (Ingester, error) { + log.Debugf("entering NewIngestFake") + if params.Ingest == nil { + return nil, fmt.Errorf("ingest not specified") + } + + return &Fake{ + params: *params.Ingest, + In: make(chan config.GenericMap), + exitChan: utils.ExitChannel(), + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go new file mode 100644 index 000000000..4d1127b6c --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_file.go @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "bufio" + "fmt" + "os" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + log "github.com/sirupsen/logrus" +) + +type ingestFile struct { + params config.Ingest + decoder decode.Decoder + exitChan <-chan struct{} + PrevRecords []config.GenericMap + TotalRecords int +} + +const ( + delaySeconds = 10 + chunkLines = 100 +) + +// Ingest ingests entries from a file and resends the same data every delaySeconds seconds +func (ingestF *ingestFile) Ingest(out chan<- config.GenericMap) { + var filename string + if ingestF.params.File != nil { + filename = ingestF.params.File.Filename + } + var lines [][]byte + file, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + text := scanner.Text() + log.Debugf("%s", text) + lines = append(lines, []byte(text)) + } + + log.Debugf("Ingesting %d log lines from %s", len(lines), filename) + switch ingestF.params.Type { + case "file": + ingestF.sendAllLines(lines, out) + case "file_loop": + // loop forever + ticker := time.NewTicker(time.Duration(delaySeconds) * time.Second) + for { + select { + case <-ingestF.exitChan: + log.Debugf("exiting ingestFile because of signal") + return + case <-ticker.C: + ingestF.sendAllLines(lines, out) + } + } + case "file_chunks": + // sends the lines in chunks. Useful for testing parallelization + ingestF.TotalRecords = len(lines) + for len(lines) > 0 { + if len(lines) > chunkLines { + ingestF.sendAllLines(lines[:chunkLines], out) + lines = lines[chunkLines:] + } else { + ingestF.sendAllLines(lines, out) + lines = nil + } + } + } +} + +func (ingestF *ingestFile) sendAllLines(lines [][]byte, out chan<- config.GenericMap) { + log.Debugf("ingestFile sending %d lines", len(lines)) + ingestF.TotalRecords = len(lines) + for _, line := range lines { + decoded, err := ingestF.decoder.Decode(line) + if err != nil { + log.WithError(err).Warnf("ignoring line") + continue + } + out <- decoded + } +} + +// NewIngestFile create a new ingester +func NewIngestFile(params config.StageParam) (Ingester, error) { + log.Debugf("entering NewIngestFile") + if params.Ingest == nil || params.Ingest.File == nil || params.Ingest.File.Filename == "" { + return nil, fmt.Errorf("ingest filename not specified") + } + + log.Debugf("input file name = %s", params.Ingest.File.Filename) + decoder, err := decode.GetDecoder(params.Ingest.File.Decoder) + if err != nil { + return nil, err + } + + return &ingestFile{ + params: *params.Ingest, + exitChan: utils.ExitChannel(), + decoder: decoder, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go new file mode 100644 index 000000000..16afb8d76 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_grpc.go @@ -0,0 +1,120 @@ +package ingest + +import ( + "context" + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/netobserv/netobserv-ebpf-agent/pkg/decode" + grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" + + "github.com/sirupsen/logrus" + grpc2 "google.golang.org/grpc" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +var glog = logrus.WithField("component", "ingest.GRPCProtobuf") + +const ( + defaultBufferLen = 100 +) + +// GRPCProtobuf ingests data from the NetObserv eBPF Agent, using Protocol Buffers over gRPC +type GRPCProtobuf struct { + collector *grpc.CollectorServer + flowPackets chan *pbflow.Records + metrics *metrics +} + +func NewGRPCProtobuf(opMetrics *operational.Metrics, params config.StageParam) (*GRPCProtobuf, error) { + netObserv := api.IngestGRPCProto{} + if params.Ingest != nil && params.Ingest.GRPC != nil { + netObserv = *params.Ingest.GRPC + } + if netObserv.Port == 0 { + return nil, fmt.Errorf("ingest port not specified") + } + bufLen := netObserv.BufferLen + if bufLen == 0 { + bufLen = defaultBufferLen + } + flowPackets := make(chan *pbflow.Records, bufLen) + metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(flowPackets) }) + collector, err := grpc.StartCollector(netObserv.Port, flowPackets, + grpc.WithGRPCServerOptions(grpc2.UnaryInterceptor(instrumentGRPC(metrics)))) + if err != nil { + return nil, err + } + return &GRPCProtobuf{ + collector: collector, + flowPackets: flowPackets, + metrics: metrics, + }, nil +} + +func (no *GRPCProtobuf) Ingest(out chan<- config.GenericMap) { + no.metrics.createOutQueueLen(out) + go func() { + <-pUtils.ExitChannel() + close(no.flowPackets) + no.collector.Close() + }() + for fp := range no.flowPackets { + glog.Debugf("Ingested %v records", len(fp.Entries)) + for _, entry := range fp.Entries { + out <- decode.PBFlowToMap(entry) + } + } +} + +func (no *GRPCProtobuf) Close() error { + err := no.collector.Close() + close(no.flowPackets) + return err +} + +func instrumentGRPC(m *metrics) grpc2.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc2.UnaryServerInfo, + handler grpc2.UnaryHandler, + ) (resp interface{}, err error) { + timer := m.stageDurationTimer() + timeReceived := timer.Start() + if info.FullMethod != "/pbflow.Collector/Send" { + return handler(ctx, req) + } + flowRecords := req.(*pbflow.Records) + + // instrument difference between flow time and ingest time + for _, entry := range flowRecords.Entries { + delay := timeReceived.Sub(entry.TimeFlowEnd.AsTime()).Seconds() + m.latency.Observe(delay) + } + + // instrument flows processed counter + m.flowsProcessed.Add(float64(len(flowRecords.Entries))) + + // instrument message bytes + m.batchSizeBytes.Observe(float64(proto.Size(flowRecords))) + + resp, err = handler(ctx, req) + if err != nil { + // "trace" level used to minimize performance impact + glog.Tracef("Reporting metric error: %v", err) + m.error(utils.ConvertToString(status.Code(err))) + } + + // Stage duration + timer.ObserveMilliseconds() + + return resp, err + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go new file mode 100644 index 000000000..266b26247 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_inprocess.go @@ -0,0 +1,29 @@ +package ingest + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" +) + +// InProcess ingester is meant to be imported and used from another program +// via pipeline.StartFLPInProcess +type InProcess struct { + in chan config.GenericMap +} + +func NewInProcess(in chan config.GenericMap) *InProcess { + return &InProcess{in: in} +} + +func (d *InProcess) Ingest(out chan<- config.GenericMap) { + go func() { + <-utils.ExitChannel() + d.Close() + }() + for rec := range d.in { + out <- rec + } +} + +func (d *InProcess) Close() { +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go new file mode 100644 index 000000000..c5dac331a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_kafka.go @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "errors" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + kafkago "github.com/segmentio/kafka-go" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +var klog = logrus.WithField("component", "ingest.Kafka") + +type kafkaReadMessage interface { + ReadMessage(ctx context.Context) (kafkago.Message, error) + Config() kafkago.ReaderConfig + Stats() kafkago.ReaderStats +} + +type ingestKafka struct { + kafkaReader kafkaReadMessage + decoder decode.Decoder + in chan []byte + exitChan <-chan struct{} + batchReadTimeout int64 + batchMaxLength int + metrics *metrics + canLogMessages bool +} + +const defaultBatchReadTimeout = int64(1000) +const defaultKafkaBatchMaxLength = 500 +const defaultKafkaCommitInterval = 500 + +const kafkaStatsPeriod = 15 * time.Second + +// Ingest ingests entries from kafka topic +func (k *ingestKafka) Ingest(out chan<- config.GenericMap) { + klog.Debugf("entering ingestKafka.Ingest") + k.metrics.createOutQueueLen(out) + + // initialize background listener + k.kafkaListener() + + // forever process log lines received by collector + k.processLogLines(out) +} + +// background thread to read kafka messages; place received items into ingestKafka input channel +func (k *ingestKafka) kafkaListener() { + klog.Debugf("entering kafkaListener") + + if logrus.IsLevelEnabled(logrus.DebugLevel) { + go k.reportStats() + } + + go func() { + for { + if k.isStopped() { + klog.Info("gracefully exiting") + return + } + klog.Trace("fetching messages from Kafka") + // block until a message arrives + kafkaMessage, err := k.kafkaReader.ReadMessage(context.Background()) + if err != nil { + klog.Errorln(err) + k.metrics.error("Cannot read message") + continue + } + if k.canLogMessages && logrus.IsLevelEnabled(logrus.TraceLevel) { + klog.Tracef("string(kafkaMessage) = %s\n", string(kafkaMessage.Value)) + } + k.metrics.flowsProcessed.Inc() + messageLen := len(kafkaMessage.Value) + k.metrics.batchSizeBytes.Observe(float64(messageLen) + float64(len(kafkaMessage.Key))) + if messageLen > 0 { + // process message + k.in <- kafkaMessage.Value + } + } + }() +} + +func (k *ingestKafka) isStopped() bool { + select { + case <-k.exitChan: + return true + default: + return false + } +} + +func (k *ingestKafka) processRecordDelay(record config.GenericMap) { + timeFlowEndInterface, ok := record["TimeFlowEndMs"] + if !ok { + // "trace" level used to minimize performance impact + klog.Tracef("TimeFlowEndMs missing in record %v", record) + k.metrics.error("TimeFlowEndMs missing") + return + } + timeFlowEnd, ok := timeFlowEndInterface.(int64) + if !ok { + // "trace" level used to minimize performance impact + klog.Tracef("Cannot parse TimeFlowEndMs of record %v", record) + k.metrics.error("Cannot parse TimeFlowEndMs") + return + } + delay := time.Since(time.UnixMilli(timeFlowEnd)).Seconds() + k.metrics.latency.Observe(delay) +} + +func (k *ingestKafka) processRecord(record []byte, out chan<- config.GenericMap) { + // Decode batch + decoded, err := k.decoder.Decode(record) + if err != nil { + klog.WithError(err).Warnf("ignoring flow") + return + } + k.processRecordDelay(decoded) + + // Send batch + out <- decoded +} + +// read items from ingestKafka input channel, pool them, and send down the pipeline +func (k *ingestKafka) processLogLines(out chan<- config.GenericMap) { + for { + select { + case <-k.exitChan: + klog.Debugf("exiting ingestKafka because of signal") + return + case record := <-k.in: + k.processRecord(record, out) + } + } +} + +// reportStats periodically reports kafka stats +func (k *ingestKafka) reportStats() { + ticker := time.NewTicker(kafkaStatsPeriod) + defer ticker.Stop() + for { + select { + case <-k.exitChan: + klog.Debug("gracefully exiting stats reporter") + case <-ticker.C: + klog.Debugf("reader stats: %#v", k.kafkaReader.Stats()) + } + } +} + +// NewIngestKafka create a new ingester +// nolint:cyclop +func NewIngestKafka(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) { + klog.Debugf("entering NewIngestKafka") + jsonIngestKafka := api.IngestKafka{} + var ingestType string + if params.Ingest != nil { + ingestType = params.Ingest.Type + if params.Ingest.Kafka != nil { + jsonIngestKafka = *params.Ingest.Kafka + } + } + + // connect to the kafka server + startOffsetString := jsonIngestKafka.StartOffset + var startOffset int64 + switch startOffsetString { + case "FirstOffset", "": + startOffset = kafkago.FirstOffset + case "LastOffset": + startOffset = kafkago.LastOffset + default: + startOffset = kafkago.FirstOffset + klog.Errorf("illegal value for StartOffset: %s\n", startOffsetString) + } + klog.Debugf("startOffset = %v", startOffset) + groupBalancers := make([]kafkago.GroupBalancer, 0) + for _, gb := range jsonIngestKafka.GroupBalancers { + switch gb { + case "range": + groupBalancers = append(groupBalancers, &kafkago.RangeGroupBalancer{}) + case "roundRobin": + groupBalancers = append(groupBalancers, &kafkago.RoundRobinGroupBalancer{}) + case "rackAffinity": + groupBalancers = append(groupBalancers, &kafkago.RackAffinityGroupBalancer{}) + default: + klog.Warningf("groupbalancers parameter missing") + groupBalancers = append(groupBalancers, &kafkago.RoundRobinGroupBalancer{}) + } + } + + batchReadTimeout := defaultBatchReadTimeout + if jsonIngestKafka.BatchReadTimeout != 0 { + batchReadTimeout = jsonIngestKafka.BatchReadTimeout + } + klog.Infof("batchReadTimeout = %d", batchReadTimeout) + + commitInterval := int64(defaultKafkaCommitInterval) + if jsonIngestKafka.CommitInterval != 0 { + commitInterval = jsonIngestKafka.CommitInterval + } + klog.Infof("commitInterval = %d", jsonIngestKafka.CommitInterval) + + dialer := &kafkago.Dialer{ + Timeout: kafkago.DefaultDialer.Timeout, + DualStack: kafkago.DefaultDialer.DualStack, + } + if jsonIngestKafka.TLS != nil { + klog.Infof("Using TLS configuration: %v", jsonIngestKafka.TLS) + tlsConfig, err := jsonIngestKafka.TLS.Build() + if err != nil { + return nil, err + } + dialer.TLS = tlsConfig + } + + if jsonIngestKafka.SASL != nil { + m, err := utils.SetupSASLMechanism(jsonIngestKafka.SASL) + if err != nil { + return nil, err + } + dialer.SASLMechanism = m + } + + readerConfig := kafkago.ReaderConfig{ + Brokers: jsonIngestKafka.Brokers, + Topic: jsonIngestKafka.Topic, + GroupID: jsonIngestKafka.GroupID, + GroupBalancers: groupBalancers, + StartOffset: startOffset, + CommitInterval: time.Duration(commitInterval) * time.Millisecond, + Dialer: dialer, + } + + if jsonIngestKafka.PullQueueCapacity > 0 { + readerConfig.QueueCapacity = jsonIngestKafka.PullQueueCapacity + } + + if jsonIngestKafka.PullMaxBytes > 0 { + readerConfig.MaxBytes = jsonIngestKafka.PullMaxBytes + } + + klog.Debugf("reader config: %#v", readerConfig) + + kafkaReader := kafkago.NewReader(readerConfig) + if kafkaReader == nil { + errMsg := "NewIngestKafka: failed to create kafka-go reader" + klog.Errorf("%s", errMsg) + return nil, errors.New(errMsg) + } + + decoder, err := decode.GetDecoder(jsonIngestKafka.Decoder) + if err != nil { + return nil, err + } + + bml := defaultKafkaBatchMaxLength + if jsonIngestKafka.BatchMaxLen != 0 { + bml = jsonIngestKafka.BatchMaxLen + } + + in := make(chan []byte, 2*bml) + metrics := newMetrics(opMetrics, params.Name, ingestType, func() int { return len(in) }) + + return &ingestKafka{ + kafkaReader: kafkaReader, + decoder: decoder, + exitChan: utils.ExitChannel(), + in: in, + batchMaxLength: bml, + batchReadTimeout: batchReadTimeout, + metrics: metrics, + canLogMessages: jsonIngestKafka.Decoder.Type == api.DecoderJSON, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go new file mode 100644 index 000000000..1b425cf4d --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_stdin.go @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "bufio" + "os" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode" + pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/sirupsen/logrus" +) + +const ( + stdinChannelSize = 1000 +) + +var slog = logrus.WithField("component", "ingest.Stdin") + +type ingestStdin struct { + in chan string + eof chan struct{} + exitChan <-chan struct{} + metrics *metrics + decoder decode.Decoder +} + +// Ingest ingests entries from stdin +func (s *ingestStdin) Ingest(out chan<- config.GenericMap) { + slog.Debugf("entering ingestStdin.Ingest") + s.metrics.createOutQueueLen(out) + + go s.getStdinInput() + + // process log lines received by stdin + s.processLogLines(out) +} + +func (s *ingestStdin) getStdinInput() { + scanner := bufio.NewScanner(os.Stdin) + // Loop to read lines from stdin until an error or EOF is encountered + for scanner.Scan() { + s.in <- scanner.Text() + } + + // Check for errors + if err := scanner.Err(); err != nil { + slog.WithError(err).Errorf("Error reading standard input") + } + close(s.eof) +} + +func (s *ingestStdin) processLogLines(out chan<- config.GenericMap) { + for { + select { + case <-s.exitChan: + slog.Debugf("exiting ingestStdin because of signal") + return + case <-s.eof: + slog.Debugf("exiting ingestStdin because of EOF") + return + case line := <-s.in: + s.processRecord(out, line) + } + } +} + +func (s *ingestStdin) processRecord(out chan<- config.GenericMap, line string) { + slog.Debugf("Decoding %s", line) + decoded, err := s.decoder.Decode([]byte(line)) + if err != nil { + slog.WithError(err).Warnf("ignoring line %v", line) + s.metrics.error("Ignoring line") + return + } + s.metrics.flowsProcessed.Inc() + out <- decoded +} + +// NewIngestStdin create a new ingester +func NewIngestStdin(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) { + slog.Debugf("Entering NewIngestStdin") + + in := make(chan string, stdinChannelSize) + eof := make(chan struct{}) + metrics := newMetrics(opMetrics, params.Name, params.Ingest.Type, func() int { return len(in) }) + decoderParams := api.Decoder{Type: api.DecoderJSON} + decoder, err := decode.GetDecoder(decoderParams) + if err != nil { + return nil, err + } + + return &ingestStdin{ + exitChan: pUtils.ExitChannel(), + in: in, + eof: eof, + metrics: metrics, + decoder: decoder, + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go new file mode 100644 index 000000000..66ee85724 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/ingest_synthetic.go @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ingest + +import ( + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" +) + +type ingestSynthetic struct { + params api.IngestSynthetic + exitChan <-chan struct{} + flowLogsProcessed prometheus.Counter +} + +const ( + defaultConnections = 100 + defaultBatchLen = 10 + defaultFlowLogsPerMin = 2000 +) + +var ( + flowLogsProcessed = operational.DefineMetric( + "ingest_synthetic_flows_processed", + "Number of flow logs processed", + operational.TypeCounter, + "stage", + ) +) + +// Ingest generates flow logs according to provided parameters +func (ingestS *ingestSynthetic) Ingest(out chan<- config.GenericMap) { + log.Debugf("entering IngestSynthetic Ingest, params = %v", ingestS.params) + // get a list of flow log entries, one per desired connection + // these flow logs will be sent again and again to simulate ongoing traffic on those connections + flowLogs := utils.GenerateConnectionFlowEntries(ingestS.params.Connections) + nLogs := len(flowLogs) + next := 0 + + // compute time interval between batches; divide BatchMaxLen by FlowLogsPerMin and adjust the types + ticker := time.NewTicker(time.Duration(int(time.Minute*time.Duration(ingestS.params.BatchMaxLen)) / ingestS.params.FlowLogsPerMin)) + + // loop forever + for { + select { + case <-ingestS.exitChan: + log.Debugf("exiting IngestSynthetic because of signal") + return + case <-ticker.C: + log.Debugf("sending a batch of %d flow logs from index %d", ingestS.params.BatchMaxLen, next) + for i := 0; i < ingestS.params.BatchMaxLen; i++ { + out <- flowLogs[next] + ingestS.flowLogsProcessed.Inc() + next++ + if next >= nLogs { + next = 0 + } + } + } + } +} + +// NewIngestSynthetic create a new ingester +func NewIngestSynthetic(opMetrics *operational.Metrics, params config.StageParam) (Ingester, error) { + log.Debugf("entering NewIngestSynthetic") + confIngestSynthetic := api.IngestSynthetic{} + if params.Ingest != nil && params.Ingest.Synthetic != nil { + confIngestSynthetic = *params.Ingest.Synthetic + } + if confIngestSynthetic.Connections == 0 { + confIngestSynthetic.Connections = defaultConnections + } + if confIngestSynthetic.FlowLogsPerMin == 0 { + confIngestSynthetic.FlowLogsPerMin = defaultFlowLogsPerMin + } + if confIngestSynthetic.BatchMaxLen == 0 { + confIngestSynthetic.BatchMaxLen = defaultBatchLen + } + log.Debugf("params = %v", confIngestSynthetic) + + return &ingestSynthetic{ + params: confIngestSynthetic, + exitChan: utils.ExitChannel(), + flowLogsProcessed: opMetrics.NewCounter(&flowLogsProcessed, params.Name), + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go new file mode 100644 index 000000000..d59f761f2 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest/metrics.go @@ -0,0 +1,74 @@ +package ingest + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + latencyHistogram = operational.DefineMetric( + "ingest_latency_ms", + "Latency between flow end time and ingest time, in milliseconds", + operational.TypeHistogram, + "stage", + ) + flowsProcessedCounter = operational.DefineMetric( + "ingest_flows_processed", + "Number of flows received by the ingester", + operational.TypeCounter, + "stage", + ) + batchSizeBytesSummary = operational.DefineMetric( + "ingest_batch_size_bytes", + "Ingested batch size distribution, in bytes", + operational.TypeSummary, + "stage", + ) + errorsCounter = operational.DefineMetric( + "ingest_errors", + "Counter of errors during ingestion", + operational.TypeCounter, + "stage", "type", "code", + ) +) + +type metrics struct { + *operational.Metrics + stage string + stageType string + stageDuration prometheus.Observer + latency prometheus.Histogram + flowsProcessed prometheus.Counter + batchSizeBytes prometheus.Summary + errors *prometheus.CounterVec +} + +func newMetrics(opMetrics *operational.Metrics, stage, stageType string, inGaugeFunc func() int) *metrics { + opMetrics.CreateInQueueSizeGauge(stage, inGaugeFunc) + return &metrics{ + Metrics: opMetrics, + stage: stage, + stageType: stageType, + latency: opMetrics.NewHistogram(&latencyHistogram, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000}, stage), + stageDuration: opMetrics.GetOrCreateStageDurationHisto().WithLabelValues(stage), + flowsProcessed: opMetrics.NewCounter(&flowsProcessedCounter, stage), + batchSizeBytes: opMetrics.NewSummary(&batchSizeBytesSummary, stage), + errors: opMetrics.NewCounterVec(&errorsCounter), + } +} + +func (m *metrics) createOutQueueLen(out chan<- config.GenericMap) { + m.CreateOutQueueSizeGauge(m.stage, func() int { return len(out) }) +} + +// Increment error counter +// `code` should reflect any error code relative to this type. It can be a short string message, +// but make sure to not include any dynamic value with high cardinality +func (m *metrics) error(code string) { + m.errors.WithLabelValues(m.stage, m.stageType, code).Inc() +} + +func (m *metrics) stageDurationTimer() *operational.Timer { + return operational.NewTimer(m.stageDuration) +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go new file mode 100644 index 000000000..9b578610b --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/inprocess.go @@ -0,0 +1,32 @@ +package pipeline + +import ( + "context" + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest" + "github.com/netobserv/flowlogs-pipeline/pkg/prometheus" +) + +// StartFLPInProcess is an entry point to start the whole FLP / pipeline processing from imported code +func StartFLPInProcess(cfg *config.ConfigFileStruct, in chan config.GenericMap) error { + promServer := prometheus.InitializePrometheus(&cfg.MetricsSettings) + + // Create new flows pipeline + ingester := ingest.NewInProcess(in) + flp, err := newPipelineFromIngester(cfg, ingester) + if err != nil { + return fmt.Errorf("failed to initialize pipeline %w", err) + } + + // Starts the flows pipeline; blocking call + go func() { + flp.Run() + if promServer != nil { + _ = promServer.Shutdown(context.Background()) + } + }() + + return nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go new file mode 100644 index 000000000..35cde905a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline.go @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2019 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pipeline + +import ( + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest" + "github.com/netobserv/gopipes/pkg/node" + log "github.com/sirupsen/logrus" +) + +// interface definitions of pipeline components +const ( + StageIngest = "ingest" + StageTransform = "transform" + StageExtract = "extract" + StageEncode = "encode" + StageWrite = "write" +) + +// Pipeline manager +type Pipeline struct { + startNodes []*node.Start[config.GenericMap] + terminalNodes []*node.Terminal[config.GenericMap] + pipelineEntryMap map[string]*pipelineEntry + IsRunning bool + // TODO: this field is only used for test verification. We should rewrite the build process + // to be able to remove it from here + pipelineStages []*pipelineEntry + Metrics *operational.Metrics + configWatcher *pipelineConfigWatcher +} + +// NewPipeline defines the pipeline elements +func NewPipeline(cfg *config.ConfigFileStruct) (*Pipeline, error) { + return newPipelineFromIngester(cfg, nil) +} + +// newPipelineFromIngester defines the pipeline elements from a preset ingester (e.g. for in-process receiver) +func newPipelineFromIngester(cfg *config.ConfigFileStruct, ing ingest.Ingester) (*Pipeline, error) { + log.Debugf("entering newPipelineFromIngester") + + log.Debugf("stages = %v ", cfg.Pipeline) + log.Debugf("configParams = %v ", cfg.Parameters) + + builder := newBuilder(cfg) + if ing != nil { + builder.presetIngester(ing) + } + if err := builder.readStages(); err != nil { + return nil, err + } + pipeline, err := builder.build() + if err != nil { + return nil, err + } + pipeline.configWatcher, err = newPipelineConfigWatcher(cfg, pipeline.pipelineEntryMap) + return pipeline, err +} + +func (p *Pipeline) Run() { + // starting the graph + for _, s := range p.startNodes { + s.Start() + } + p.IsRunning = true + + if p.configWatcher != nil { + go p.configWatcher.Run() + } + + // blocking the execution until the graph terminal stages end + for _, t := range p.terminalNodes { + <-t.Done() + } + p.IsRunning = false +} + +func (p *Pipeline) IsReady() error { + if !p.IsRunning { + return fmt.Errorf("pipeline is not running") + } + return nil +} + +func (p *Pipeline) IsAlive() error { + if !p.IsRunning { + return fmt.Errorf("pipeline is not running") + } + return nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go new file mode 100644 index 000000000..cfffc894f --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_builder.go @@ -0,0 +1,516 @@ +package pipeline + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/benbjohnson/clock" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write" + k8sutils "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/netobserv/gopipes/pkg/node" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +const ( + defaultNodeBufferLen = 1000 + defaultExtractBatching = 1000 + defaultExtractBatchingTimeout = 5 * time.Second +) + +// Error wraps any error caused by a wrong formation of the pipeline +type Error struct { + StageName string + wrapped error +} + +func (e *Error) Error() string { + return fmt.Sprintf("pipeline stage %q: %s", e.StageName, e.wrapped.Error()) +} + +func (e *Error) Unwrap() error { + return e.wrapped +} + +// builder stores the information that is only required during the build of the pipeline +type builder struct { + pipelineStages []*pipelineEntry + configStages []config.Stage + configParams []config.StageParam + pipelineEntryMap map[string]*pipelineEntry + createdStages map[string]interface{} + startNodes []*node.Start[config.GenericMap] + terminalNodes []*node.Terminal[config.GenericMap] + opMetrics *operational.Metrics + stageDuration *prometheus.HistogramVec + batchMaxLen int + batchTimeout time.Duration + nodeBufferLen int + updtChans map[string]chan config.StageParam +} + +type pipelineEntry struct { + stageName string + stageType string + Ingester ingest.Ingester + Transformer transform.Transformer + Extractor extract.Extractor + Encoder encode.Encoder + Writer write.Writer +} + +func getDynConfig(cfg *config.ConfigFileStruct) ([]config.StageParam, error) { + k8sconfig, err := k8sutils.LoadK8sConfig(cfg.DynamicParameters.KubeConfigPath) + if err != nil { + log.Errorf("Cannot get k8s config: %v", err) + return nil, err + } + + clientset, err := kubernetes.NewForConfig(k8sconfig) + if err != nil { + log.Errorf("Cannot init k8s config: %v", err) + return nil, err + } + cm, err := clientset.CoreV1().ConfigMaps(cfg.DynamicParameters.Namespace).Get(context.TODO(), cfg.DynamicParameters.Name, metav1.GetOptions{}) + if err != nil { + log.Errorf("Cannot get dynamic config: %v", err) + return nil, err + } + rawConfig, ok := cm.Data[cfg.DynamicParameters.FileName] + if !ok { + log.Errorf("Cannot get file in configMap: %v", err) + return nil, err + } + dynConfig := config.HotReloadStruct{} + err = json.Unmarshal([]byte(rawConfig), &dynConfig) + if err != nil { + log.Errorf("Cannot parse config: %v", err) + return nil, err + } + return dynConfig.Parameters, nil +} + +func newBuilder(cfg *config.ConfigFileStruct) *builder { + // Get global metrics settings + opMetrics := operational.NewMetrics(&cfg.MetricsSettings) + stageDuration := opMetrics.GetOrCreateStageDurationHisto() + + bl := cfg.PerfSettings.BatcherMaxLen + if bl == 0 { + bl = defaultExtractBatching + } + bt := cfg.PerfSettings.BatcherTimeout + if bt == 0 { + bt = defaultExtractBatchingTimeout + } + nb := cfg.PerfSettings.NodeBufferLen + if nb == 0 { + nb = defaultNodeBufferLen + } + + if cfg.DynamicParameters.Name != "" && + cfg.DynamicParameters.Namespace != "" && + cfg.DynamicParameters.FileName != "" { + dynParameters, err := getDynConfig(cfg) + if err == nil { + cfg.Parameters = append(cfg.Parameters, dynParameters...) + } + } + + return &builder{ + pipelineEntryMap: map[string]*pipelineEntry{}, + createdStages: map[string]interface{}{}, + configStages: cfg.Pipeline, + configParams: cfg.Parameters, + opMetrics: opMetrics, + stageDuration: stageDuration, + batchMaxLen: bl, + batchTimeout: bt, + nodeBufferLen: nb, + updtChans: map[string]chan config.StageParam{}, + } +} + +// use a preset ingester +func (b *builder) presetIngester(ing ingest.Ingester) { + name := config.PresetIngesterStage + log.Debugf("stage = %v", name) + b.appendEntry(&pipelineEntry{ + stageName: name, + stageType: StageIngest, + Ingester: ing, + }) +} + +// read the configuration stages definition and instantiate the corresponding native Go objects +func (b *builder) readStages() error { + for _, param := range b.configParams { + log.Debugf("stage = %v", param.Name) + pEntry := pipelineEntry{ + stageName: param.Name, + stageType: findStageType(¶m), + } + var err error + switch pEntry.stageType { + case StageIngest: + pEntry.Ingester, err = getIngester(b.opMetrics, param) + case StageTransform: + pEntry.Transformer, err = getTransformer(b.opMetrics, param) + case StageExtract: + pEntry.Extractor, err = getExtractor(b.opMetrics, param) + case StageEncode: + pEntry.Encoder, err = getEncoder(b.opMetrics, param) + case StageWrite: + pEntry.Writer, err = getWriter(b.opMetrics, param) + default: + err = fmt.Errorf("invalid stage type: %v, stage name: %v", pEntry.stageType, pEntry.stageName) + } + if err != nil { + return err + } + b.appendEntry(&pEntry) + } + log.Debugf("pipeline = %v", b.pipelineStages) + return nil +} + +func (b *builder) appendEntry(pEntry *pipelineEntry) { + b.pipelineEntryMap[pEntry.stageName] = pEntry + b.pipelineStages = append(b.pipelineStages, pEntry) + log.Debugf("pipeline = %v", b.pipelineStages) +} + +// reads the configured Go stages and connects between them +// readStages must be invoked before this +func (b *builder) build() (*Pipeline, error) { + // accounts start and middle nodes that are connected to another node + sendingNodes := map[string]struct{}{} + // accounts middle or terminal nodes that receive data from another node + receivingNodes := map[string]struct{}{} + for _, connection := range b.configStages { + if connection.Name == "" || connection.Follows == "" { + // ignore entries that do not represent a connection + continue + } + // instantiates (or loads from cache) the destination node of a connection + dstEntry, ok := b.pipelineEntryMap[connection.Name] + if !ok { + return nil, fmt.Errorf("unknown pipeline stage: %s", connection.Name) + } + dstNode, err := b.getStageNode(dstEntry, connection.Name) + if err != nil { + return nil, err + } + dst, ok := dstNode.(node.Receiver[config.GenericMap]) + if !ok { + return nil, fmt.Errorf("stage %q of type %q can't receive data", + connection.Name, dstEntry.stageType) + } + // instantiates (or loads from cache) the source node of a connection + srcEntry, ok := b.pipelineEntryMap[connection.Follows] + if !ok { + return nil, fmt.Errorf("unknown pipeline stage: %s", connection.Follows) + } + srcNode, err := b.getStageNode(srcEntry, connection.Follows) + if err != nil { + return nil, err + } + src, ok := srcNode.(node.Sender[config.GenericMap]) + if !ok { + return nil, fmt.Errorf("stage %q of type %q can't send data", + connection.Follows, srcEntry.stageType) + } + log.Infof("connecting stages: %s --> %s", connection.Follows, connection.Name) + + sendingNodes[connection.Follows] = struct{}{} + receivingNodes[connection.Name] = struct{}{} + // connects source and destination node, and catches any panic from the Go-Pipes library. + var catchErr *Error + func() { + defer func() { + if msg := recover(); msg != nil { + catchErr = &Error{ + StageName: connection.Name, + wrapped: fmt.Errorf("%q and %q stages haven't compatible input/outputs: %v", + connection.Follows, connection.Name, msg), + } + } + }() + src.SendsTo(dst) + }() + if catchErr != nil { + return nil, catchErr + } + } + + if err := b.verifyConnections(sendingNodes, receivingNodes); err != nil { + return nil, err + } + if len(b.startNodes) == 0 { + return nil, errors.New("no ingesters have been defined") + } + if len(b.terminalNodes) == 0 { + return nil, errors.New("no writers have been defined") + } + return &Pipeline{ + startNodes: b.startNodes, + terminalNodes: b.terminalNodes, + pipelineStages: b.pipelineStages, + pipelineEntryMap: b.pipelineEntryMap, + Metrics: b.opMetrics, + }, nil +} + +// verifies that all the start and middle nodes send data to another node +// verifies that all the middle and terminal nodes receive data from another node +func (b *builder) verifyConnections(sendingNodes, receivingNodes map[string]struct{}) error { + for _, stg := range b.pipelineStages { + if isReceptor(stg) { + if _, ok := receivingNodes[stg.stageName]; !ok { + return &Error{ + StageName: stg.stageName, + wrapped: fmt.Errorf("pipeline stage from type %q"+ + " should receive data from at least another stage", stg.stageType), + } + } + } + if isSender(stg) { + if _, ok := sendingNodes[stg.stageName]; !ok { + return &Error{ + StageName: stg.stageName, + wrapped: fmt.Errorf("pipeline stage from type %q"+ + " should send data to at least another stage", stg.stageType), + } + } + } + } + return nil +} + +func isReceptor(p *pipelineEntry) bool { + return p.stageType != StageIngest +} + +func isSender(p *pipelineEntry) bool { + return p.stageType != StageWrite && p.stageType != StageEncode +} + +func (b *builder) runMeasured(name string, f func()) { + start := time.Now() + f() + duration := time.Since(start) + b.stageDuration.WithLabelValues(name).Observe(float64(duration.Milliseconds())) +} + +func (b *builder) getStageNode(pe *pipelineEntry, stageID string) (interface{}, error) { + if stg, ok := b.createdStages[stageID]; ok { + return stg, nil + } + var stage interface{} + // TODO: modify all the types' interfaces to not need to write loops here, the same + // as we do with Ingest + switch pe.stageType { + case StageIngest: + init := node.AsStart(pe.Ingester.Ingest) + b.startNodes = append(b.startNodes, init) + stage = init + case StageWrite: + term := node.AsTerminal(func(in <-chan config.GenericMap) { + b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) }) + for i := range in { + b.runMeasured(stageID, func() { + pe.Writer.Write(i) + }) + } + }, node.ChannelBufferLen(b.nodeBufferLen)) + b.terminalNodes = append(b.terminalNodes, term) + stage = term + case StageEncode: + encode := node.AsTerminal(func(in <-chan config.GenericMap) { + b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) }) + for i := range in { + b.runMeasured(stageID, func() { + pe.Encoder.Encode(i) + }) + } + }, node.ChannelBufferLen(b.nodeBufferLen)) + b.terminalNodes = append(b.terminalNodes, encode) + stage = encode + case StageTransform: + stage = node.AsMiddle(func(in <-chan config.GenericMap, out chan<- config.GenericMap) { + b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) }) + b.opMetrics.CreateOutQueueSizeGauge(stageID, func() int { return len(out) }) + for i := range in { + b.runMeasured(stageID, func() { + if transformed, ok := pe.Transformer.Transform(i); ok { + out <- transformed + } + }) + } + }, node.ChannelBufferLen(b.nodeBufferLen)) + case StageExtract: + stage = node.AsMiddle(func(in <-chan config.GenericMap, out chan<- config.GenericMap) { + b.opMetrics.CreateInQueueSizeGauge(stageID, func() int { return len(in) }) + b.opMetrics.CreateOutQueueSizeGauge(stageID, func() int { return len(out) }) + // TODO: replace batcher by rewriting the different extractor implementations + // to keep the status while processing flows one by one + utils.Batcher(utils.ExitChannel(), b.batchMaxLen, b.batchTimeout, in, + func(maps []config.GenericMap) { + outs := pe.Extractor.Extract(maps) + for _, o := range outs { + out <- o + } + }, + ) + }, node.ChannelBufferLen(b.nodeBufferLen)) + default: + return nil, &Error{ + StageName: stageID, + wrapped: fmt.Errorf("invalid stage type: %s", pe.stageType), + } + } + b.createdStages[stageID] = stage + return stage, nil +} + +func getIngester(opMetrics *operational.Metrics, params config.StageParam) (ingest.Ingester, error) { + var ingester ingest.Ingester + var err error + switch params.Ingest.Type { + case api.FileType, api.FileLoopType, api.FileChunksType: + ingester, err = ingest.NewIngestFile(params) + case api.SyntheticType: + ingester, err = ingest.NewIngestSynthetic(opMetrics, params) + case api.CollectorType: + ingester, err = ingest.NewIngestCollector(opMetrics, params) + case api.StdinType: + ingester, err = ingest.NewIngestStdin(opMetrics, params) + case api.KafkaType: + ingester, err = ingest.NewIngestKafka(opMetrics, params) + case api.GRPCType: + ingester, err = ingest.NewGRPCProtobuf(opMetrics, params) + case api.FakeType: + ingester, err = ingest.NewIngestFake(params) + default: + panic(fmt.Sprintf("`ingest` type %s not defined", params.Ingest.Type)) + } + return ingester, err +} + +func getWriter(opMetrics *operational.Metrics, params config.StageParam) (write.Writer, error) { + var writer write.Writer + var err error + switch params.Write.Type { + case api.GRPCType: + writer, err = write.NewWriteGRPC(params) + case api.StdoutType: + writer, err = write.NewWriteStdout(params) + case api.NoneType: + writer, err = write.NewWriteNone() + case api.LokiType: + writer, err = write.NewWriteLoki(opMetrics, params) + case api.IpfixType: + writer, err = write.NewWriteIpfix(params) + case api.FakeType: + writer, err = write.NewWriteFake(params) + default: + panic(fmt.Sprintf("`write` type %s not defined; if no writer needed, specify `none`", params.Write.Type)) + } + return writer, err +} + +func getTransformer(opMetrics *operational.Metrics, params config.StageParam) (transform.Transformer, error) { + var transformer transform.Transformer + var err error + switch params.Transform.Type { + case api.GenericType: + transformer, err = transform.NewTransformGeneric(params) + case api.FilterType: + transformer, err = transform.NewTransformFilter(params) + case api.NetworkType: + transformer, err = transform.NewTransformNetwork(params, opMetrics) + case api.NoneType: + transformer, err = transform.NewTransformNone() + default: + panic(fmt.Sprintf("`transform` type %s not defined; if no transformer needed, specify `none`", params.Transform.Type)) + } + return transformer, err +} + +func getExtractor(opMetrics *operational.Metrics, params config.StageParam) (extract.Extractor, error) { + var extractor extract.Extractor + var err error + switch params.Extract.Type { + case api.NoneType: + extractor, _ = extract.NewExtractNone() + case api.AggregateType: + extractor, err = extract.NewExtractAggregate(params) + case api.ConnTrackType: + extractor, err = conntrack.NewConnectionTrack(opMetrics, params, clock.New()) + case api.TimebasedType: + extractor, err = extract.NewExtractTimebased(params) + default: + panic(fmt.Sprintf("`extract` type %s not defined; if no extractor needed, specify `none`", params.Extract.Type)) + } + return extractor, err +} + +func getEncoder(opMetrics *operational.Metrics, params config.StageParam) (encode.Encoder, error) { + var encoder encode.Encoder + var err error + switch params.Encode.Type { + case api.PromType: + encoder, err = encode.NewEncodeProm(opMetrics, params) + case api.KafkaType: + encoder, err = encode.NewEncodeKafka(opMetrics, params) + case api.S3Type: + encoder, err = encode.NewEncodeS3(opMetrics, params) + case api.OtlpLogsType: + encoder, err = opentelemetry.NewEncodeOtlpLogs(opMetrics, params) + case api.OtlpMetricsType: + encoder, err = opentelemetry.NewEncodeOtlpMetrics(opMetrics, params) + case api.OtlpTracesType: + encoder, err = opentelemetry.NewEncodeOtlpTraces(opMetrics, params) + case api.NoneType: + encoder, _ = encode.NewEncodeNone() + default: + panic(fmt.Sprintf("`encode` type %s not defined; if no encoder needed, specify `none`", params.Encode.Type)) + } + return encoder, err +} + +// findStageParameters finds the matching config.param structure and identifies the stage type +func findStageType(param *config.StageParam) string { + log.Debugf("findStageType: stage = %v", param.Name) + if param.Ingest != nil && param.Ingest.Type != "" { + return StageIngest + } + if param.Transform != nil && param.Transform.Type != "" { + return StageTransform + } + if param.Extract != nil && param.Extract.Type != "" { + return StageExtract + } + if param.Encode != nil && param.Encode.Type != "" { + return StageEncode + } + if param.Write != nil && param.Write.Type != "" { + return StageWrite + } + return "unknown" +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go new file mode 100644 index 000000000..35eaf1a41 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/pipeline_watcher.go @@ -0,0 +1,114 @@ +package pipeline + +import ( + "context" + "encoding/json" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" +) + +type pipelineConfigWatcher struct { + clientSet kubernetes.Clientset + cmName string + cmNamespace string + configFile string + pipelineEntryMap map[string]*pipelineEntry +} + +func newPipelineConfigWatcher(cfg *config.ConfigFileStruct, pipelineEntryMap map[string]*pipelineEntry) (*pipelineConfigWatcher, error) { + if cfg.DynamicParameters.Name == "" || + cfg.DynamicParameters.Namespace == "" || + cfg.DynamicParameters.FileName == "" { + return nil, nil + } + + config, err := utils.LoadK8sConfig(cfg.DynamicParameters.KubeConfigPath) + if err != nil { + return nil, err + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + pipelineCW := pipelineConfigWatcher{ + clientSet: *clientset, + pipelineEntryMap: pipelineEntryMap, + cmName: cfg.DynamicParameters.Name, + cmNamespace: cfg.DynamicParameters.Namespace, + configFile: cfg.DynamicParameters.FileName, + } + + return &pipelineCW, nil + +} + +func (pcw *pipelineConfigWatcher) Run() { + for { + watcher, err := pcw.clientSet.CoreV1().ConfigMaps(pcw.cmNamespace).Watch(context.TODO(), + metav1.SingleObject(metav1.ObjectMeta{Name: pcw.cmName, Namespace: pcw.cmNamespace})) + if err != nil { + log.Errorf("Unable to create watcher: %s", err) + return + } + pcw.handleEvent(watcher.ResultChan()) + } +} + +func (pcw *pipelineConfigWatcher) handleEvent(eventChannel <-chan watch.Event) { + for { + event, open := <-eventChannel + if open { + switch event.Type { + case watch.Added: + fallthrough + case watch.Modified: + // Update our endpoint + if updatedMap, ok := event.Object.(*corev1.ConfigMap); ok { + pcw.updateFromConfigmap(updatedMap) + } + case watch.Deleted: + fallthrough + case watch.Bookmark: + case watch.Error: + default: + // Do nothing + } + } else { + // If eventChannel is closed, it means the server has closed the connection + return + } + } +} + +func (pcw *pipelineConfigWatcher) updateFromConfigmap(cm *corev1.ConfigMap) { + if rawConfig, ok := cm.Data[pcw.configFile]; ok { + config := config.HotReloadStruct{} + err := json.Unmarshal([]byte(rawConfig), &config) + if err != nil { + log.Errorf("Cannot parse config: %v", err) + return + } + for _, param := range config.Parameters { + if pentry, ok := pcw.pipelineEntryMap[param.Name]; ok { + pcw.updateEntry(pentry, param) + } + } + } +} + +func (pcw *pipelineConfigWatcher) updateEntry(pEntry *pipelineEntry, param config.StageParam) { + switch pEntry.stageType { + case StageEncode: + pEntry.Encoder.Update(param) + default: + log.Warningf("Hot reloading not supported for: %s", pEntry.stageType) + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go new file mode 100644 index 000000000..05533886d --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/cni.go @@ -0,0 +1,9 @@ +package cni + +import ( + v1 "k8s.io/api/core/v1" +) + +type Plugin interface { + GetNodeIPs(node *v1.Node) []string +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go new file mode 100644 index 000000000..b37d45ea9 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/multus.go @@ -0,0 +1,134 @@ +package cni + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + v1 "k8s.io/api/core/v1" +) + +const ( + statusAnnotation = "k8s.v1.cni.cncf.io/network-status" + // Index names + indexIP = "ip" + indexMAC = "mac" + indexInterface = "interface" +) + +type MultusHandler struct { +} + +type SecondaryNetKey struct { + NetworkName string + Key string +} + +func (m *MultusHandler) BuildKeys(flow config.GenericMap, rule *api.K8sRule, secNets []api.SecondaryNetwork) []SecondaryNetKey { + if len(secNets) == 0 { + return nil + } + var keys []SecondaryNetKey + for _, sn := range secNets { + snKeys := m.buildSNKeys(flow, rule, &sn) + if snKeys != nil { + keys = append(keys, snKeys...) + } + } + return keys +} + +func (m *MultusHandler) buildSNKeys(flow config.GenericMap, rule *api.K8sRule, sn *api.SecondaryNetwork) []SecondaryNetKey { + var keys []SecondaryNetKey + + var ip, mac string + var interfaces []string + if _, ok := sn.Index[indexIP]; ok && len(rule.IPField) > 0 { + ip, ok = flow.LookupString(rule.IPField) + if !ok { + return nil + } + } + if _, ok := sn.Index[indexMAC]; ok && len(rule.MACField) > 0 { + mac, ok = flow.LookupString(rule.MACField) + if !ok { + return nil + } + } + if _, ok := sn.Index[indexInterface]; ok && len(rule.InterfacesField) > 0 { + v, ok := flow[rule.InterfacesField] + if !ok { + return nil + } + interfaces, ok = v.([]string) + if !ok { + return nil + } + } + + macIP := "~" + ip + "~" + mac + if interfaces == nil { + return []SecondaryNetKey{{NetworkName: sn.Name, Key: macIP}} + } + for _, intf := range interfaces { + keys = append(keys, SecondaryNetKey{NetworkName: sn.Name, Key: intf + macIP}) + } + + return keys +} + +func (m *MultusHandler) GetPodUniqueKeys(pod *v1.Pod, secNets []api.SecondaryNetwork) ([]string, error) { + if len(secNets) == 0 { + return nil, nil + } + // Cf https://k8snetworkplumbingwg.github.io/multus-cni/docs/quickstart.html#network-status-annotations + if statusAnnotationJSON, ok := pod.Annotations[statusAnnotation]; ok { + var networks []NetStatItem + if err := json.Unmarshal([]byte(statusAnnotationJSON), &networks); err != nil { + return nil, fmt.Errorf("failed to index from network-status annotation, cannot read annotation %s: %w", statusAnnotation, err) + } + var keys []string + for _, network := range networks { + for _, snConfig := range secNets { + if snConfig.Name == network.Name { + keys = append(keys, network.Keys(snConfig)...) + } + } + } + return keys, nil + } + // Annotation not present => just ignore, no error + return nil, nil +} + +type NetStatItem struct { + Name string `json:"name"` + Interface string `json:"interface"` + IPs []string `json:"ips"` + MAC string `json:"mac"` +} + +func (n *NetStatItem) Keys(snConfig api.SecondaryNetwork) []string { + var mac, intf string + if _, ok := snConfig.Index[indexMAC]; ok { + mac = n.MAC + } + if _, ok := snConfig.Index[indexInterface]; ok { + intf = n.Interface + } + if _, ok := snConfig.Index[indexIP]; ok { + var keys []string + for _, ip := range n.IPs { + keys = append(keys, key(intf, ip, mac)) + } + return keys + } + // Ignore IP + return []string{key(intf, "", mac)} +} + +func key(intf, ip, mac string) string { + return intf + "~" + ip + "~" + strings.ToUpper(mac) +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go new file mode 100644 index 000000000..ae5701de1 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni/ovn_kubernetes.go @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package cni + +import ( + "encoding/json" + "fmt" + "net" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" +) + +const ( + ovnSubnetAnnotation = "k8s.ovn.org/node-subnets" +) + +type OVNPlugin struct { + Plugin +} + +func (o *OVNPlugin) GetNodeIPs(node *v1.Node) []string { + // Add IP that is used in OVN for some traffic on mp0 interface + // (no IP / error returned when not using ovn-k) + ip, err := findOvnMp0IP(node.Annotations) + if err != nil { + // Log the error as Info, do not block other ips indexing + log.Infof("failed to index OVN mp0 IP: %v", err) + } else if ip != "" { + return []string{ip} + } + return nil +} + +func unmarshalOVNAnnotation(annot []byte) (string, error) { + // Depending on OVN (OCP) version, the annotation might be JSON-encoded as a string (legacy), or an array of strings + var subnetsAsArray map[string][]string + err := json.Unmarshal(annot, &subnetsAsArray) + if err == nil { + if subnets, ok := subnetsAsArray["default"]; ok { + if len(subnets) > 0 { + return subnets[0], nil + } + } + return "", fmt.Errorf("unexpected content for annotation %s: %s", ovnSubnetAnnotation, annot) + } + + var subnetsAsString map[string]string + err = json.Unmarshal(annot, &subnetsAsString) + if err == nil { + if subnet, ok := subnetsAsString["default"]; ok { + return subnet, nil + } + return "", fmt.Errorf("unexpected content for annotation %s: %s", ovnSubnetAnnotation, annot) + } + + return "", fmt.Errorf("cannot read annotation %s: %w", ovnSubnetAnnotation, err) +} + +func findOvnMp0IP(annotations map[string]string) (string, error) { + if subnetsJSON, ok := annotations[ovnSubnetAnnotation]; ok { + subnet, err := unmarshalOVNAnnotation([]byte(subnetsJSON)) + if err != nil { + return "", err + } + // From subnet like 10.128.0.0/23, we want to index IP 10.128.0.2 + ip0, _, err := net.ParseCIDR(subnet) + if err != nil { + return "", err + } + ip4 := ip0.To4() + if ip4 == nil { + // TODO: what's the rule with ipv6? + return "", nil + } + return fmt.Sprintf("%d.%d.%d.%d", ip4[0], ip4[1], ip4[2], ip4[3]+2), nil + } + // Annotation not present (expected if not using ovn-kubernetes) => just ignore, no error + return "", nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go new file mode 100644 index 000000000..b6a79df7a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/enrich.go @@ -0,0 +1,154 @@ +package kubernetes + +import ( + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + inf "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers" + "github.com/sirupsen/logrus" +) + +var informers inf.InformersInterface = &inf.Informers{} + +// For testing +func MockInformers() { + informers = inf.NewInformersMock() +} + +func InitFromConfig(config api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error { + return informers.InitFromConfig(config, opMetrics) +} + +func Enrich(outputEntry config.GenericMap, rule *api.K8sRule) { + ip, ok := outputEntry.LookupString(rule.IPField) + if !ok { + return + } + potentialKeys := informers.BuildSecondaryNetworkKeys(outputEntry, rule) + kubeInfo, err := informers.GetInfo(potentialKeys, ip) + if err != nil { + logrus.WithError(err).Tracef("can't find kubernetes info for keys %v and IP %s", potentialKeys, ip) + return + } + if rule.Assignee != "otel" { + // NETOBSERV-666: avoid putting empty namespaces or Loki aggregation queries will + // differentiate between empty and nil namespaces. + if kubeInfo.Namespace != "" { + outputEntry[rule.Output+"_Namespace"] = kubeInfo.Namespace + } + outputEntry[rule.Output+"_Name"] = kubeInfo.Name + outputEntry[rule.Output+"_Type"] = kubeInfo.Type + outputEntry[rule.Output+"_OwnerName"] = kubeInfo.Owner.Name + outputEntry[rule.Output+"_OwnerType"] = kubeInfo.Owner.Type + outputEntry[rule.Output+"_NetworkName"] = kubeInfo.NetworkName + if rule.LabelsPrefix != "" { + for labelKey, labelValue := range kubeInfo.Labels { + outputEntry[rule.LabelsPrefix+"_"+labelKey] = labelValue + } + } + if kubeInfo.HostIP != "" { + outputEntry[rule.Output+"_HostIP"] = kubeInfo.HostIP + if kubeInfo.HostName != "" { + outputEntry[rule.Output+"_HostName"] = kubeInfo.HostName + } + } + fillInK8sZone(outputEntry, rule, kubeInfo, "_Zone") + } else { + // NOTE: Some of these fields are taken from opentelemetry specs. + // See https://opentelemetry.io/docs/specs/semconv/resource/k8s/ + // Other fields (not specified in the specs) are named similarly + if kubeInfo.Namespace != "" { + outputEntry[rule.Output+"k8s.namespace.name"] = kubeInfo.Namespace + } + switch kubeInfo.Type { + case inf.TypeNode: + outputEntry[rule.Output+"k8s.node.name"] = kubeInfo.Name + outputEntry[rule.Output+"k8s.node.uid"] = kubeInfo.UID + case inf.TypePod: + outputEntry[rule.Output+"k8s.pod.name"] = kubeInfo.Name + outputEntry[rule.Output+"k8s.pod.uid"] = kubeInfo.UID + case inf.TypeService: + outputEntry[rule.Output+"k8s.service.name"] = kubeInfo.Name + outputEntry[rule.Output+"k8s.service.uid"] = kubeInfo.UID + } + outputEntry[rule.Output+"k8s.name"] = kubeInfo.Name + outputEntry[rule.Output+"k8s.type"] = kubeInfo.Type + outputEntry[rule.Output+"k8s.owner.name"] = kubeInfo.Owner.Name + outputEntry[rule.Output+"k8s.owner.type"] = kubeInfo.Owner.Type + if rule.LabelsPrefix != "" { + for labelKey, labelValue := range kubeInfo.Labels { + outputEntry[rule.LabelsPrefix+"."+labelKey] = labelValue + } + } + if kubeInfo.HostIP != "" { + outputEntry[rule.Output+"k8s.host.ip"] = kubeInfo.HostIP + if kubeInfo.HostName != "" { + outputEntry[rule.Output+"k8s.host.name"] = kubeInfo.HostName + } + } + fillInK8sZone(outputEntry, rule, kubeInfo, "k8s.zone") + } +} + +const nodeZoneLabelName = "topology.kubernetes.io/zone" + +func fillInK8sZone(outputEntry config.GenericMap, rule *api.K8sRule, kubeInfo *inf.Info, zonePrefix string) { + if !rule.AddZone { + // Nothing to do + return + } + switch kubeInfo.Type { + case inf.TypeNode: + zone, ok := kubeInfo.Labels[nodeZoneLabelName] + if ok { + outputEntry[rule.Output+zonePrefix] = zone + } + return + case inf.TypePod: + nodeInfo, err := informers.GetNodeInfo(kubeInfo.HostName) + if err != nil { + logrus.WithError(err).Tracef("can't find nodes info for node %v", kubeInfo.HostName) + return + } + if nodeInfo != nil { + zone, ok := nodeInfo.Labels[nodeZoneLabelName] + if ok { + outputEntry[rule.Output+zonePrefix] = zone + } + } + return + + case inf.TypeService: + // A service is not assigned to a dedicated zone, skipping + return + } +} + +func EnrichLayer(outputEntry config.GenericMap, rule *api.K8sInfraRule) { + outputEntry[rule.Output] = "infra" + for _, nsnameFields := range rule.NamespaceNameFields { + if namespace, _ := outputEntry.LookupString(nsnameFields.Namespace); namespace != "" { + name, _ := outputEntry.LookupString(nsnameFields.Name) + if objectIsApp(namespace, name, rule) { + outputEntry[rule.Output] = "app" + return + } + } + } +} + +func objectIsApp(namespace, name string, rule *api.K8sInfraRule) bool { + for _, prefix := range rule.InfraPrefixes { + if strings.HasPrefix(namespace, prefix) { + return false + } + } + for _, ref := range rule.InfraRefs { + if namespace == ref.Namespace && name == ref.Name { + return false + } + } + return true +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go new file mode 100644 index 000000000..513d32ecc --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers-mock.go @@ -0,0 +1,204 @@ +package informers + +import ( + "errors" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni" + "github.com/stretchr/testify/mock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +var ( + secondaryNetConfig = []api.SecondaryNetwork{ + { + Name: "my-network", + Index: map[string]any{"mac": nil}, + }, + } +) + +type Mock struct { + mock.Mock + InformersInterface +} + +func NewInformersMock() *Mock { + inf := new(Mock) + inf.On("InitFromConfig", mock.Anything, mock.Anything).Return(nil) + return inf +} + +func (o *Mock) InitFromConfig(cfg api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error { + args := o.Called(cfg, opMetrics) + return args.Error(0) +} + +type IndexerMock struct { + mock.Mock + cache.Indexer +} + +type InformerMock struct { + mock.Mock + InformerInterface +} + +type InformerInterface interface { + cache.SharedInformer + AddIndexers(indexers cache.Indexers) error + GetIndexer() cache.Indexer +} + +func (m *IndexerMock) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + args := m.Called(indexName, indexedValue) + return args.Get(0).([]interface{}), args.Error(1) +} + +func (m *IndexerMock) GetByKey(key string) (interface{}, bool, error) { + args := m.Called(key) + return args.Get(0), args.Bool(1), args.Error(2) +} + +func (m *InformerMock) GetIndexer() cache.Indexer { + args := m.Called() + return args.Get(0).(cache.Indexer) +} + +func (m *IndexerMock) MockPod(ip, mac, intf, name, namespace, nodeIP string, owner *Owner) { + var ownerRef []metav1.OwnerReference + if owner != nil { + ownerRef = []metav1.OwnerReference{{ + Kind: owner.Type, + Name: owner.Name, + }} + } + info := Info{ + Type: "Pod", + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: ownerRef, + }, + HostIP: nodeIP, + ips: []string{}, + secondaryNetKeys: []string{}, + } + if len(mac) > 0 { + nsi := cni.NetStatItem{ + Interface: intf, + MAC: mac, + IPs: []string{ip}, + } + info.secondaryNetKeys = nsi.Keys(secondaryNetConfig[0]) + m.On("ByIndex", IndexCustom, info.secondaryNetKeys[0]).Return([]interface{}{&info}, nil) + } + if len(ip) > 0 { + info.ips = []string{ip} + m.On("ByIndex", IndexIP, ip).Return([]interface{}{&info}, nil) + } +} + +func (m *IndexerMock) MockNode(ip, name string) { + m.On("ByIndex", IndexIP, ip).Return([]interface{}{&Info{ + Type: "Node", + ObjectMeta: metav1.ObjectMeta{Name: name}, + ips: []string{ip}, + }}, nil) +} + +func (m *IndexerMock) MockService(ip, name, namespace string) { + m.On("ByIndex", IndexIP, ip).Return([]interface{}{&Info{ + Type: "Service", + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + ips: []string{ip}, + }}, nil) +} + +func (m *IndexerMock) MockReplicaSet(name, namespace string, owner Owner) { + m.On("GetByKey", namespace+"/"+name).Return(&metav1.ObjectMeta{ + Name: name, + OwnerReferences: []metav1.OwnerReference{{ + Kind: owner.Type, + Name: owner.Name, + }}, + }, true, nil) +} + +func (m *IndexerMock) FallbackNotFound() { + m.On("ByIndex", IndexIP, mock.Anything).Return([]interface{}{}, nil) +} + +func SetupIndexerMocks(kd *Informers) (pods, nodes, svc, rs *IndexerMock) { + // pods informer + pods = &IndexerMock{} + pim := InformerMock{} + pim.On("GetIndexer").Return(pods) + kd.pods = &pim + // nodes informer + nodes = &IndexerMock{} + him := InformerMock{} + him.On("GetIndexer").Return(nodes) + kd.nodes = &him + // svc informer + svc = &IndexerMock{} + sim := InformerMock{} + sim.On("GetIndexer").Return(svc) + kd.services = &sim + // rs informer + rs = &IndexerMock{} + rim := InformerMock{} + rim.On("GetIndexer").Return(rs) + kd.replicaSets = &rim + return +} + +type FakeInformers struct { + InformersInterface + ipInfo map[string]*Info + customKeysInfo map[string]*Info + nodes map[string]*Info +} + +func SetupStubs(ipInfo map[string]*Info, customKeysInfo map[string]*Info, nodes map[string]*Info) *FakeInformers { + return &FakeInformers{ + ipInfo: ipInfo, + customKeysInfo: customKeysInfo, + nodes: nodes, + } +} + +func (f *FakeInformers) InitFromConfig(_ api.NetworkTransformKubeConfig, _ *operational.Metrics) error { + return nil +} + +func (f *FakeInformers) GetInfo(keys []cni.SecondaryNetKey, ip string) (*Info, error) { + if len(keys) > 0 { + i := f.customKeysInfo[keys[0].Key] + if i != nil { + return i, nil + } + } + + i := f.ipInfo[ip] + if i != nil { + return i, nil + } + return nil, errors.New("notFound") +} + +func (f *FakeInformers) BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey { + m := cni.MultusHandler{} + return m.BuildKeys(flow, rule, secondaryNetConfig) +} + +func (f *FakeInformers) GetNodeInfo(n string) (*Info, error) { + i := f.nodes[n] + if i != nil { + return i, nil + } + return nil, errors.New("notFound") +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go new file mode 100644 index 000000000..2c7a4a4ce --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers/informers.go @@ -0,0 +1,495 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package informers + +import ( + "fmt" + "net" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/sirupsen/logrus" + + "github.com/prometheus/client_golang/prometheus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + inf "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatainformer" + "k8s.io/client-go/tools/cache" +) + +const ( + kubeConfigEnvVariable = "KUBECONFIG" + syncTime = 10 * time.Minute + IndexCustom = "byCustomKey" + IndexIP = "byIP" + TypeNode = "Node" + TypePod = "Pod" + TypeService = "Service" +) + +var ( + log = logrus.WithField("component", "transform.Network.Kubernetes") + cniPlugins = map[string]cni.Plugin{ + api.OVN: &cni.OVNPlugin{}, + } + multus = cni.MultusHandler{} +) + +//nolint:revive +type InformersInterface interface { + BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey + GetInfo([]cni.SecondaryNetKey, string) (*Info, error) + GetNodeInfo(string) (*Info, error) + InitFromConfig(api.NetworkTransformKubeConfig, *operational.Metrics) error +} + +type Informers struct { + InformersInterface + // pods, nodes and services cache the different object types as *Info pointers + pods cache.SharedIndexInformer + nodes cache.SharedIndexInformer + services cache.SharedIndexInformer + // replicaSets caches the ReplicaSets as partially-filled *ObjectMeta pointers + replicaSets cache.SharedIndexInformer + stopChan chan struct{} + mdStopChan chan struct{} + managedCNI []string + secondaryNetworks []api.SecondaryNetwork + indexerHitMetric *prometheus.CounterVec +} + +type Owner struct { + Type string + Name string +} + +// Info contains precollected metadata for Pods, Nodes and Services. +// Not all the fields are populated for all the above types. To save +// memory, we just keep in memory the necessary data for each Type. +// For more information about which fields are set for each type, please +// refer to the instantiation function of the respective informers. +type Info struct { + // Informers need that internal object is an ObjectMeta instance + metav1.ObjectMeta + Type string + Owner Owner + HostName string + HostIP string + NetworkName string + ips []string + secondaryNetKeys []string +} + +var ( + ipIndexer = func(obj interface{}) ([]string, error) { + return obj.(*Info).ips, nil + } + customKeyIndexer = func(obj interface{}) ([]string, error) { + return obj.(*Info).secondaryNetKeys, nil + } +) + +func (k *Informers) BuildSecondaryNetworkKeys(flow config.GenericMap, rule *api.K8sRule) []cni.SecondaryNetKey { + return multus.BuildKeys(flow, rule, k.secondaryNetworks) +} + +func (k *Informers) GetInfo(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, error) { + if info, ok := k.fetchInformers(potentialKeys, ip); ok { + // Owner data might be discovered after the owned, so we fetch it + // at the last moment + if info.Owner.Name == "" { + info.Owner = k.getOwner(info) + } + return info, nil + } + + return nil, fmt.Errorf("informers can't find IP %s", ip) +} + +func (k *Informers) fetchInformers(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, bool) { + if info, ok := k.fetchPodInformer(potentialKeys, ip); ok { + // it might happen that the Host is discovered after the Pod + if info.HostName == "" { + info.HostName = k.getHostName(info.HostIP) + } + return info, true + } + // Nodes are only indexed by IP + if info, ok := k.infoForIP(k.nodes.GetIndexer(), "Node", ip); ok { + return info, true + } + // Services are only indexed by IP + if info, ok := k.infoForIP(k.services.GetIndexer(), "Service", ip); ok { + return info, true + } + return nil, false +} + +func (k *Informers) fetchPodInformer(potentialKeys []cni.SecondaryNetKey, ip string) (*Info, bool) { + // 1. Check if the unique key matches any Pod (secondary networks / multus case) + if info, ok := k.infoForCustomKeys(k.pods.GetIndexer(), "Pod", potentialKeys); ok { + return info, ok + } + // 2. Check if the IP matches any Pod (primary network) + return k.infoForIP(k.pods.GetIndexer(), "Pod", ip) +} + +func (k *Informers) increaseIndexerHits(kind, namespace, network, warn string) { + k.indexerHitMetric.WithLabelValues(kind, namespace, network, warn).Inc() +} + +func (k *Informers) infoForCustomKeys(idx cache.Indexer, kind string, potentialKeys []cni.SecondaryNetKey) (*Info, bool) { + for _, key := range potentialKeys { + objs, err := idx.ByIndex(IndexCustom, key.Key) + if err != nil { + k.increaseIndexerHits(kind, "", key.NetworkName, "informer error") + log.WithError(err).WithField("key", key).Debug("error accessing unique key index, ignoring") + return nil, false + } + if len(objs) > 0 { + info := objs[0].(*Info) + info.NetworkName = key.NetworkName + if len(objs) > 1 { + k.increaseIndexerHits(kind, info.Namespace, key.NetworkName, "multiple matches") + log.WithField("key", key).Debugf("found %d objects matching this key, returning first", len(objs)) + } else { + k.increaseIndexerHits(kind, info.Namespace, key.NetworkName, "") + } + log.Tracef("infoForUniqueKey found key %v", info) + return info, true + } + } + return nil, false +} + +func (k *Informers) infoForIP(idx cache.Indexer, kind string, ip string) (*Info, bool) { + objs, err := idx.ByIndex(IndexIP, ip) + if err != nil { + k.increaseIndexerHits(kind, "", "primary", "informer error") + log.WithError(err).WithField("ip", ip).Debug("error accessing IP index, ignoring") + return nil, false + } + if len(objs) > 0 { + info := objs[0].(*Info) + info.NetworkName = "primary" + if len(objs) > 1 { + k.increaseIndexerHits(kind, info.Namespace, "primary", "multiple matches") + log.WithField("ip", ip).Debugf("found %d objects matching this IP, returning first", len(objs)) + } else { + k.increaseIndexerHits(kind, info.Namespace, "primary", "") + } + log.Tracef("infoForIP found ip %v", info) + return info, true + } + return nil, false +} + +func (k *Informers) GetNodeInfo(name string) (*Info, error) { + item, ok, err := k.nodes.GetIndexer().GetByKey(name) + if err != nil { + return nil, err + } else if ok { + return item.(*Info), nil + } + return nil, nil +} + +func (k *Informers) getOwner(info *Info) Owner { + if len(info.OwnerReferences) != 0 { + ownerReference := info.OwnerReferences[0] + if ownerReference.Kind != "ReplicaSet" { + return Owner{ + Name: ownerReference.Name, + Type: ownerReference.Kind, + } + } + + item, ok, err := k.replicaSets.GetIndexer().GetByKey(info.Namespace + "/" + ownerReference.Name) + if err != nil { + log.WithError(err).WithField("key", info.Namespace+"/"+ownerReference.Name). + Debug("can't get ReplicaSet info from informer. Ignoring") + } else if ok { + rsInfo := item.(*metav1.ObjectMeta) + if len(rsInfo.OwnerReferences) > 0 { + return Owner{ + Name: rsInfo.OwnerReferences[0].Name, + Type: rsInfo.OwnerReferences[0].Kind, + } + } + } + } + // If no owner references found, return itself as owner + return Owner{ + Name: info.Name, + Type: info.Type, + } +} + +func (k *Informers) getHostName(hostIP string) string { + if hostIP != "" { + if info, ok := k.infoForIP(k.nodes.GetIndexer(), "Node (indirect)", hostIP); ok { + return info.Name + } + } + return "" +} + +func (k *Informers) initNodeInformer(informerFactory inf.SharedInformerFactory) error { + nodes := informerFactory.Core().V1().Nodes().Informer() + // Transform any *v1.Node instance into a *Info instance to save space + // in the informer's cache + if err := nodes.SetTransform(func(i interface{}) (interface{}, error) { + node, ok := i.(*v1.Node) + if !ok { + return nil, fmt.Errorf("was expecting a Node. Got: %T", i) + } + ips := make([]string, 0, len(node.Status.Addresses)) + hostIP := "" + for _, address := range node.Status.Addresses { + ip := net.ParseIP(address.Address) + if ip != nil { + ips = append(ips, ip.String()) + if hostIP == "" { + hostIP = ip.String() + } + } + } + // CNI-dependent logic (must not fail when the CNI is not installed) + for _, name := range k.managedCNI { + if plugin := cniPlugins[name]; plugin != nil { + moreIPs := plugin.GetNodeIPs(node) + if moreIPs != nil { + ips = append(ips, moreIPs...) + } + } + } + + return &Info{ + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + Labels: node.Labels, + }, + ips: ips, + Type: TypeNode, + // We duplicate HostIP and HostName information to simplify later filtering e.g. by + // Host IP, where we want to get all the Pod flows by src/dst host, but also the actual + // host-to-host flows by the same field. + HostIP: hostIP, + HostName: node.Name, + }, nil + }); err != nil { + return fmt.Errorf("can't set nodes transform: %w", err) + } + indexers := cache.Indexers{IndexIP: ipIndexer} + if err := nodes.AddIndexers(indexers); err != nil { + return fmt.Errorf("can't add %s indexer to Nodes informer: %w", IndexIP, err) + } + k.nodes = nodes + return nil +} + +func (k *Informers) initPodInformer(informerFactory inf.SharedInformerFactory) error { + pods := informerFactory.Core().V1().Pods().Informer() + // Transform any *v1.Pod instance into a *Info instance to save space + // in the informer's cache + if err := pods.SetTransform(func(i interface{}) (interface{}, error) { + pod, ok := i.(*v1.Pod) + if !ok { + return nil, fmt.Errorf("was expecting a Pod. Got: %T", i) + } + ips := make([]string, 0, len(pod.Status.PodIPs)) + for _, ip := range pod.Status.PodIPs { + // ignoring host-networked Pod IPs + if ip.IP != pod.Status.HostIP { + ips = append(ips, ip.IP) + } + } + // Index from secondary network info + keys, err := multus.GetPodUniqueKeys(pod, k.secondaryNetworks) + if err != nil { + // Log the error as Info, do not block other ips indexing + log.WithError(err).Infof("Secondary network cannot be identified") + } + + return &Info{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + Labels: pod.Labels, + OwnerReferences: pod.OwnerReferences, + }, + Type: TypePod, + HostIP: pod.Status.HostIP, + HostName: pod.Spec.NodeName, + secondaryNetKeys: keys, + ips: ips, + }, nil + }); err != nil { + return fmt.Errorf("can't set pods transform: %w", err) + } + indexers := cache.Indexers{ + IndexIP: ipIndexer, + IndexCustom: customKeyIndexer, + } + if err := pods.AddIndexers(indexers); err != nil { + return fmt.Errorf("can't add indexers to Pods informer: %w", err) + } + + k.pods = pods + return nil +} + +func (k *Informers) initServiceInformer(informerFactory inf.SharedInformerFactory) error { + services := informerFactory.Core().V1().Services().Informer() + // Transform any *v1.Service instance into a *Info instance to save space + // in the informer's cache + if err := services.SetTransform(func(i interface{}) (interface{}, error) { + svc, ok := i.(*v1.Service) + if !ok { + return nil, fmt.Errorf("was expecting a Service. Got: %T", i) + } + ips := make([]string, 0, len(svc.Spec.ClusterIPs)) + for _, ip := range svc.Spec.ClusterIPs { + // ignoring None IPs + if isServiceIPSet(ip) { + ips = append(ips, ip) + } + } + return &Info{ + ObjectMeta: metav1.ObjectMeta{ + Name: svc.Name, + Namespace: svc.Namespace, + Labels: svc.Labels, + }, + Type: TypeService, + ips: ips, + }, nil + }); err != nil { + return fmt.Errorf("can't set services transform: %w", err) + } + indexers := cache.Indexers{IndexIP: ipIndexer} + if err := services.AddIndexers(indexers); err != nil { + return fmt.Errorf("can't add %s indexer to Pods informer: %w", IndexIP, err) + } + + k.services = services + return nil +} + +func (k *Informers) initReplicaSetInformer(informerFactory metadatainformer.SharedInformerFactory) error { + k.replicaSets = informerFactory.ForResource( + schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "replicasets", + }).Informer() + // To save space, instead of storing a complete *metav1.ObjectMeta instance, the + // informer's cache will store only the minimal required fields + if err := k.replicaSets.SetTransform(func(i interface{}) (interface{}, error) { + rs, ok := i.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("was expecting a ReplicaSet. Got: %T", i) + } + return &metav1.ObjectMeta{ + Name: rs.Name, + Namespace: rs.Namespace, + OwnerReferences: rs.OwnerReferences, + }, nil + }); err != nil { + return fmt.Errorf("can't set ReplicaSets transform: %w", err) + } + return nil +} + +func (k *Informers) InitFromConfig(cfg api.NetworkTransformKubeConfig, opMetrics *operational.Metrics) error { + // Initialization variables + k.stopChan = make(chan struct{}) + k.mdStopChan = make(chan struct{}) + + kconf, err := utils.LoadK8sConfig(cfg.ConfigPath) + if err != nil { + return err + } + + kubeClient, err := kubernetes.NewForConfig(kconf) + if err != nil { + return err + } + + metaKubeClient, err := metadata.NewForConfig(kconf) + if err != nil { + return err + } + + k.managedCNI = cfg.ManagedCNI + if k.managedCNI == nil { + k.managedCNI = []string{api.OVN} + } + k.secondaryNetworks = cfg.SecondaryNetworks + k.indexerHitMetric = opMetrics.CreateIndexerHitCounter() + err = k.initInformers(kubeClient, metaKubeClient) + if err != nil { + return err + } + + return nil +} + +func (k *Informers) initInformers(client kubernetes.Interface, metaClient metadata.Interface) error { + informerFactory := inf.NewSharedInformerFactory(client, syncTime) + metadataInformerFactory := metadatainformer.NewSharedInformerFactory(metaClient, syncTime) + err := k.initNodeInformer(informerFactory) + if err != nil { + return err + } + err = k.initPodInformer(informerFactory) + if err != nil { + return err + } + err = k.initServiceInformer(informerFactory) + if err != nil { + return err + } + err = k.initReplicaSetInformer(metadataInformerFactory) + if err != nil { + return err + } + + log.Debugf("starting kubernetes informers, waiting for synchronization") + informerFactory.Start(k.stopChan) + informerFactory.WaitForCacheSync(k.stopChan) + log.Debugf("kubernetes informers started") + + log.Debugf("starting kubernetes metadata informers, waiting for synchronization") + metadataInformerFactory.Start(k.mdStopChan) + metadataInformerFactory.WaitForCacheSync(k.mdStopChan) + log.Debugf("kubernetes metadata informers started") + return nil +} + +func isServiceIPSet(ip string) bool { + return ip != v1.ClusterIPNone && ip != "" +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go new file mode 100644 index 000000000..42f9ab4a7 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location/location.go @@ -0,0 +1,221 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package location + +import ( + "archive/zip" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/ip2location/ip2location-go/v9" + log "github.com/sirupsen/logrus" +) + +type Info struct { + CountryName string `json:"country_name"` + CountryLongName string `json:"country_long"` + RegionName string `json:"region_name"` + CityName string `json:"city_name"` + Latitude string `json:"latitude"` + Longitude string `json:"longitude"` +} + +const ( + dbFilename = "IP2LOCATION-LITE-DB9.BIN" + dbFileLocation = "/tmp/location_db.bin" + dbZIPFileLocation = "/tmp/location_db.bin" + ".zip" + // REF: Original location from ip2location DB is: "https://www.ip2location.com/download/?token=OpOljbgT6K2WJnFrFBBmBzRVNpHlcYqNN4CMeGavvh0pPOpyu16gKQyqvDMxTDF4&file=DB9LITEBIN" + dbURL = "https://raw.githubusercontent.com/netobserv/flowlogs-pipeline/main/contrib/location/location.db" +) + +var locationDB *ip2location.DB + +type OSIO struct { + Stat func(string) (os.FileInfo, error) + Create func(string) (*os.File, error) + MkdirAll func(string, os.FileMode) error + OpenFile func(string, int, os.FileMode) (*os.File, error) + Copy func(io.Writer, io.Reader) (int64, error) +} + +var _osio = OSIO{} +var _dbURL string +var locationDBMutex *sync.Mutex + +func init() { + _osio.Stat = os.Stat + _osio.Create = os.Create + _osio.MkdirAll = os.MkdirAll + _osio.OpenFile = os.OpenFile + _osio.Copy = io.Copy + _dbURL = dbURL + locationDBMutex = &sync.Mutex{} +} + +func InitLocationDB() error { + locationDBMutex.Lock() + defer locationDBMutex.Unlock() + + if _, statErr := _osio.Stat(dbFileLocation); errors.Is(statErr, os.ErrNotExist) { + log.Infof("Downloading location DB into local file %s ", dbFileLocation) + out, createErr := _osio.Create(dbZIPFileLocation) + if createErr != nil { + return fmt.Errorf("failed os.Create %w ", createErr) + } + + timeout := time.Minute + tr := &http.Transport{IdleConnTimeout: timeout} + client := &http.Client{Transport: tr, Timeout: timeout} + resp, getErr := client.Get(_dbURL) + if getErr != nil { + return fmt.Errorf("failed http.Get %w ", getErr) + } + + log.Infof("Got response %s", resp.Status) + + written, copyErr := io.Copy(out, resp.Body) + if copyErr != nil { + return fmt.Errorf("failed io.Copy %w ", copyErr) + } + + log.Infof("Wrote %d bytes to %s", written, dbZIPFileLocation) + + bodyCloseErr := resp.Body.Close() + if bodyCloseErr != nil { + return fmt.Errorf("failed resp.Body.Close %w ", bodyCloseErr) + } + + outCloseErr := out.Close() + if outCloseErr != nil { + return fmt.Errorf("failed out.Close %w ", outCloseErr) + } + + unzipErr := unzip(dbZIPFileLocation, dbFileLocation) + if unzipErr != nil { + file, openErr := os.Open(dbFileLocation + "/" + dbFilename) + if openErr == nil { + fi, fileStatErr := file.Stat() + if fileStatErr == nil { + log.Infof("length of %s is: %d", dbFileLocation+"/"+dbFilename, fi.Size()) + _ = file.Close() + } else { + log.Infof("file.Stat err %v", fileStatErr) + } + } else { + log.Infof("os.Open err %v", openErr) + } + + fileContent, readFileErr := os.ReadFile(dbFileLocation + "/" + dbFilename) + if readFileErr == nil { + log.Infof("content of first 100 bytes of %s is: %s", dbFileLocation+"/"+dbFilename, fileContent[:100]) + } else { + log.Infof("os.ReadFile err %v", readFileErr) + } + + return fmt.Errorf("failed unzip %w ", unzipErr) + } + + log.Infof("Download completed successfully") + } + + log.Debugf("Loading location DB") + db, openDBErr := ip2location.OpenDB(dbFileLocation + "/" + dbFilename) + if openDBErr != nil { + return fmt.Errorf("OpenDB err - %w ", openDBErr) + } + + locationDB = db + return nil +} + +func GetLocation(ip string) (*Info, error) { + + if locationDB == nil { + return nil, fmt.Errorf("no location DB available") + } + + res, err := locationDB.Get_all(ip) + if err != nil { + return nil, err + } + + return &Info{ + CountryName: res.Country_short, + CountryLongName: res.Country_long, + RegionName: res.Region, + CityName: res.City, + Latitude: fmt.Sprintf("%f", res.Latitude), + Longitude: fmt.Sprintf("%f", res.Longitude), + }, nil +} + +//goland:noinspection ALL +func unzip(src, dest string) error { + r, err := zip.OpenReader(src) + if err != nil { + return err + } + defer r.Close() + + for _, f := range r.File { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + + filePath := filepath.Join(dest, f.Name) + if f.FileInfo().IsDir() { + err = _osio.MkdirAll(filePath, f.Mode()) + if err != nil { + log.Error(err) + return err + } + } else { + var fileDir string + if lastIndex := strings.LastIndex(filePath, string(os.PathSeparator)); lastIndex > -1 { + fileDir = filePath[:lastIndex] + } + + err = _osio.MkdirAll(fileDir, f.Mode()) + if err != nil { + log.Error(err) + return err + } + df, err := _osio.OpenFile( + filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return err + } + defer df.Close() + + _, err = _osio.Copy(df, rc) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go new file mode 100644 index 000000000..f4b4f21d8 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb/netdb.go @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * > Note: this code is a revised and enhanced version of the netdb.go file + * > from https://github.com/dominikh/go-netdb/ (MIT License) + */ + +package netdb + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/sirupsen/logrus" +) + +var slog = logrus.WithField("component", "netdb.ServiceNames") + +type numKey struct { + port int + protocolNumber int +} + +type nameKey struct { + port int + protocolName string +} + +type ServiceNames struct { + protoNums map[int]struct{} + // key: protocol name, value: protocol number + protoNames map[string]int + byPort map[int]string + byProtoNum map[numKey]string + byProtoName map[nameKey]string +} + +// LoadServicesDB receives readers to the /etc/protocols and /etc/services formatted content +// and returns a database that allow querying service names from ports and protocol information +func LoadServicesDB(protocols, services io.Reader) (*ServiceNames, error) { + log := slog.WithField("method", "LoadServicesDB") + db := ServiceNames{ + protoNums: map[int]struct{}{}, + protoNames: map[string]int{}, + byPort: map[int]string{}, + byProtoNum: map[numKey]string{}, + byProtoName: map[nameKey]string{}, + } + // Load protocols + protoData, err := io.ReadAll(protocols) + if err != nil { + return nil, fmt.Errorf("reading protocols data: %w", err) + } + + // key: proto name, value: aliases + protoAliases := map[string][]string{} + + for i, line := range strings.Split(string(protoData), "\n") { + line = strings.TrimSpace(line) + split := strings.SplitN(line, "#", 2) + fields := strings.Fields(split[0]) + if len(fields) < 2 { + continue + } + + num, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + log.WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "lineNum": i, + "line": line, + }).Debug("wrong protocol number. Ignoring entry") + continue + } + + db.protoNums[int(num)] = struct{}{} + db.protoNames[fields[0]] = int(num) + for _, alias := range fields[2:] { + db.protoNames[alias] = int(num) + } + protoAliases[fields[0]] = fields[2:] + } + + // Load services + svcData, err := io.ReadAll(services) + if err != nil { + return nil, fmt.Errorf("reading services data: %w", err) + } + + for i, line := range strings.Split(string(svcData), "\n") { + line = strings.TrimSpace(line) + split := strings.SplitN(line, "#", 2) + fields := strings.Fields(split[0]) + if len(fields) < 2 { + continue + } + + svcName := fields[0] + portproto := strings.SplitN(fields[1], "/", 2) + protoName := portproto[1] + port, err := strconv.ParseInt(portproto[0], 10, 32) + if err != nil { + log.WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "lineNum": i, + "line": line, + }).Debug("wrong service port number. Ignoring entry") + continue + } + db.byPort[int(port)] = svcName + if protoNum, ok := db.protoNames[protoName]; ok { + db.byProtoNum[numKey{port: int(port), protocolNumber: protoNum}] = svcName + } + db.byProtoName[nameKey{port: int(port), protocolName: protoName}] = svcName + for _, alias := range protoAliases[protoName] { + db.byProtoName[nameKey{port: int(port), protocolName: alias}] = svcName + } + } + return &db, nil +} + +// ByPortAndProtocolName returns the service name given a port and a protocol name (or +// its alias). If the protocol does not exist, returns the name of any service matching +// the port number. +func (db *ServiceNames) ByPortAndProtocolName(port int, nameOrAlias string) string { + if _, ok := db.protoNames[nameOrAlias]; ok { + return db.byProtoName[nameKey{port: port, protocolName: nameOrAlias}] + } + return db.byPort[port] +} + +// ByPortAndProtocolNumber returns the service name given a port and a protocol number. +// If the protocol does not exist, returns the name of any service matching +// the port number. +func (db *ServiceNames) ByPortAndProtocolNumber(port, protoNum int) string { + if _, ok := db.protoNums[protoNum]; ok { + return db.byProtoNum[numKey{port: port, protocolNumber: protoNum}] + } + return db.byPort[port] +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go new file mode 100644 index 000000000..5b51a41ae --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform.go @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transform + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +type Transformer interface { + Transform(in config.GenericMap) (config.GenericMap, bool) +} + +type transformNone struct { +} + +// Transform transforms a flow before being stored +func (t *transformNone) Transform(f config.GenericMap) (config.GenericMap, bool) { + return f, true +} + +// NewTransformNone create a new transform +func NewTransformNone() (Transformer, error) { + logrus.Debugf("entering NewTransformNone") + return &transformNone{}, nil +} + +type Definition struct { + Type string + Generic api.TransformGeneric + Network api.TransformNetwork +} + +type Definitions []Definition diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go new file mode 100644 index 000000000..0d8dd7189 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_filter.go @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transform + +import ( + "fmt" + "math/rand" + "regexp" + "strings" + "time" + + "github.com/Knetic/govaluate" + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/netobserv/flowlogs-pipeline/pkg/utils/filters" + "github.com/sirupsen/logrus" +) + +var ( + tlog = logrus.WithField("component", "transform.Filter") + rndgen = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +type Filter struct { + Rules []api.TransformFilterRule + KeepRules []predicatesRule +} + +type predicatesRule struct { + predicates []filters.Predicate + sampling uint16 +} + +// Transform transforms a flow; if false is returned as a second argument, the entry is dropped +func (f *Filter) Transform(entry config.GenericMap) (config.GenericMap, bool) { + tlog.Tracef("f = %v", f) + outputEntry := entry.Copy() + labels := make(map[string]string) + if len(f.KeepRules) > 0 { + keep := false + for _, r := range f.KeepRules { + if applyPredicates(outputEntry, r) { + keep = true + break + } + } + if !keep { + return nil, false + } + } + for i := range f.Rules { + tlog.Tracef("rule = %v", f.Rules[i]) + if cont := applyRule(outputEntry, labels, &f.Rules[i]); !cont { + return nil, false + } + } + // process accumulated labels into comma separated string + if len(labels) > 0 { + var sb strings.Builder + for key, value := range labels { + sb.WriteString(key) + sb.WriteString("=") + sb.WriteString(value) + sb.WriteString(",") + } + // remove trailing comma + labelsString := sb.String() + labelsString = strings.TrimRight(labelsString, ",") + outputEntry["labels"] = labelsString + } + return outputEntry, true +} + +// Apply a rule. Returns false if it must stop processing rules (e.g. if entry must be removed) +// nolint:cyclop +func applyRule(entry config.GenericMap, labels map[string]string, rule *api.TransformFilterRule) bool { + switch rule.Type { + case api.RemoveField: + delete(entry, rule.RemoveField.Input) + case api.RemoveEntryIfExists: + if _, ok := entry[rule.RemoveEntry.Input]; ok { + return false + } + case api.RemoveEntryIfDoesntExist: + if _, ok := entry[rule.RemoveEntry.Input]; !ok { + return false + } + case api.RemoveEntryIfEqual: + if val, ok := entry[rule.RemoveEntry.Input]; ok { + if val == rule.RemoveEntry.Value { + return false + } + } + case api.RemoveEntryIfNotEqual: + if val, ok := entry[rule.RemoveEntry.Input]; ok { + if val != rule.RemoveEntry.Value { + return false + } + } + case api.AddField: + entry[rule.AddField.Input] = rule.AddField.Value + case api.AddFieldIfDoesntExist: + if _, ok := entry[rule.AddFieldIfDoesntExist.Input]; !ok { + entry[rule.AddFieldIfDoesntExist.Input] = rule.AddFieldIfDoesntExist.Value + } + case api.AddRegExIf: + matched, err := regexp.MatchString(rule.AddRegExIf.Parameters, utils.ConvertToString(entry[rule.AddRegExIf.Input])) + if err != nil { + return true + } + if matched { + entry[rule.AddRegExIf.Output] = entry[rule.AddRegExIf.Input] + entry[rule.AddRegExIf.Output+"_Matched"] = true + } + case api.AddFieldIf: + expressionString := fmt.Sprintf("val %s", rule.AddFieldIf.Parameters) + expression, err := govaluate.NewEvaluableExpression(expressionString) + if err != nil { + log.Warningf("Can't evaluate AddIf rule: %+v expression: %v. err %v", rule, expressionString, err) + return true + } + result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": entry[rule.AddFieldIf.Input]}) + if evaluateErr == nil && result.(bool) { + if rule.AddFieldIf.Assignee != "" { + entry[rule.AddFieldIf.Output] = rule.AddFieldIf.Assignee + } else { + entry[rule.AddFieldIf.Output] = entry[rule.AddFieldIf.Input] + } + entry[rule.AddFieldIf.Output+"_Evaluate"] = true + } + case api.AddLabel: + labels[rule.AddLabel.Input] = utils.ConvertToString(rule.AddLabel.Value) + case api.AddLabelIf: + // TODO perhaps add a cache of previously evaluated expressions + expressionString := fmt.Sprintf("val %s", rule.AddLabelIf.Parameters) + expression, err := govaluate.NewEvaluableExpression(expressionString) + if err != nil { + log.Warningf("Can't evaluate AddLabelIf rule: %+v expression: %v. err %v", rule, expressionString, err) + return true + } + result, evaluateErr := expression.Evaluate(map[string]interface{}{"val": entry[rule.AddLabelIf.Input]}) + if evaluateErr == nil && result.(bool) { + labels[rule.AddLabelIf.Output] = rule.AddLabelIf.Assignee + } + case api.RemoveEntryAllSatisfied: + return !isRemoveEntrySatisfied(entry, rule.RemoveEntryAllSatisfied) + case api.ConditionalSampling: + return sample(entry, rule.ConditionalSampling) + case api.KeepEntryAllSatisfied: + // This should be processed only in "applyPredicates". Failure to do so is a bug. + tlog.Panicf("unexpected KeepEntryAllSatisfied: %v", rule) + default: + tlog.Panicf("unknown type %s for transform.Filter rule: %v", rule.Type, rule) + } + return true +} + +func isRemoveEntrySatisfied(entry config.GenericMap, rules []*api.RemoveEntryRule) bool { + for _, r := range rules { + // applyRule returns false if the entry must be removed + if dontRemove := applyRule(entry, nil, &api.TransformFilterRule{Type: api.TransformFilterEnum(r.Type), RemoveEntry: r.RemoveEntry}); dontRemove { + return false + } + } + return true +} + +func applyPredicates(entry config.GenericMap, rule predicatesRule) bool { + if !rollSampling(rule.sampling) { + return false + } + for _, p := range rule.predicates { + if !p(entry) { + return false + } + } + return true +} + +func sample(entry config.GenericMap, rules []*api.SamplingCondition) bool { + for _, r := range rules { + if isRemoveEntrySatisfied(entry, r.Rules) { + return rollSampling(r.Value) + } + } + return true +} + +func rollSampling(value uint16) bool { + return value == 0 || (rndgen.Intn(int(value)) == 0) +} + +// NewTransformFilter create a new filter transform +func NewTransformFilter(params config.StageParam) (Transformer, error) { + tlog.Debugf("entering NewTransformFilter") + keepRules := []predicatesRule{} + rules := []api.TransformFilterRule{} + if params.Transform != nil && params.Transform.Filter != nil { + params.Transform.Filter.Preprocess() + for i := range params.Transform.Filter.Rules { + baseRules := ¶ms.Transform.Filter.Rules[i] + if baseRules.Type == api.KeepEntryAllSatisfied { + pr := predicatesRule{sampling: baseRules.KeepEntrySampling} + for _, keepRule := range baseRules.KeepEntryAllSatisfied { + pred, err := filters.FromKeepEntry(keepRule) + if err != nil { + return nil, err + } + pr.predicates = append(pr.predicates, pred) + } + keepRules = append(keepRules, pr) + } else { + rules = append(rules, *baseRules) + } + } + } + transformFilter := &Filter{ + Rules: rules, + KeepRules: keepRules, + } + return transformFilter, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go new file mode 100644 index 000000000..f30ca840d --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_generic.go @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy ofthe License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specificlanguage governing permissions and + * limitations under the License. + * + */ + +package transform + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +var glog = logrus.WithField("component", "transform.Generic") + +type Generic struct { + policy api.TransformGenericOperationEnum + rules []api.GenericTransformRule +} + +// Transform transforms a flow to a new set of keys +func (g *Generic) Transform(entry config.GenericMap) (config.GenericMap, bool) { + var outputEntry config.GenericMap + ok := true + glog.Tracef("Transform input = %v", entry) + if g.policy != "replace_keys" { + outputEntry = entry.Copy() + } else { + outputEntry = config.GenericMap{} + } + for _, transformRule := range g.rules { + if transformRule.Multiplier != 0 { + ok = g.performMultiplier(entry, transformRule, outputEntry) + } else { + outputEntry[transformRule.Output] = entry[transformRule.Input] + } + } + glog.Tracef("Transform output = %v", outputEntry) + return outputEntry, ok +} + +func (g *Generic) performMultiplier(entry config.GenericMap, transformRule api.GenericTransformRule, outputEntry config.GenericMap) bool { + ok := true + switch val := entry[transformRule.Input].(type) { + case int: + outputEntry[transformRule.Output] = transformRule.Multiplier * val + case uint: + outputEntry[transformRule.Output] = uint(transformRule.Multiplier) * val + case int8: + outputEntry[transformRule.Output] = int8(transformRule.Multiplier) * val + case uint8: + outputEntry[transformRule.Output] = uint8(transformRule.Multiplier) * val + case int16: + outputEntry[transformRule.Output] = int16(transformRule.Multiplier) * val + case uint16: + outputEntry[transformRule.Output] = uint16(transformRule.Multiplier) * val + case int32: + outputEntry[transformRule.Output] = int32(transformRule.Multiplier) * val + case uint32: + outputEntry[transformRule.Output] = uint32(transformRule.Multiplier) * val + case int64: + outputEntry[transformRule.Output] = int64(transformRule.Multiplier) * val + case uint64: + outputEntry[transformRule.Output] = uint64(transformRule.Multiplier) * val + case float32: + outputEntry[transformRule.Output] = float32(transformRule.Multiplier) * val + case float64: + outputEntry[transformRule.Output] = float64(transformRule.Multiplier) * val + default: + ok = false + glog.Errorf("%s not of numerical type; cannot perform multiplication", transformRule.Output) + } + return ok +} + +// NewTransformGeneric create a new transform +func NewTransformGeneric(params config.StageParam) (Transformer, error) { + glog.Debugf("entering NewTransformGeneric") + genConfig := api.TransformGeneric{} + if params.Transform != nil && params.Transform.Generic != nil { + genConfig = *params.Transform.Generic + } + glog.Debugf("params.Transform.Generic = %v", genConfig) + rules := genConfig.Rules + policy := genConfig.Policy + switch policy { + case api.ReplaceKeys, api.PreserveOriginalKeys, "": + // valid; nothing to do + glog.Infof("NewTransformGeneric, policy = %s", policy) + default: + glog.Panicf("unknown policy %s for transform.generic", policy) + } + transformGeneric := &Generic{ + policy: policy, + rules: rules, + } + glog.Debugf("transformGeneric = %v", transformGeneric) + return transformGeneric, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go new file mode 100644 index 000000000..09934ae7e --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network.go @@ -0,0 +1,267 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transform + +import ( + "fmt" + "net" + "os" + "strconv" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + util "github.com/netobserv/flowlogs-pipeline/pkg/utils" + "github.com/sirupsen/logrus" +) + +var log = logrus.WithField("component", "transform.Network") + +type Network struct { + api.TransformNetwork + svcNames *netdb.ServiceNames + snLabels []subnetLabel + ipLabelCache *utils.TimedCache +} + +type subnetLabel struct { + cidrs []*net.IPNet + name string +} + +//nolint:cyclop +func (n *Network) Transform(inputEntry config.GenericMap) (config.GenericMap, bool) { + // copy input entry before transform to avoid alteration on parallel stages + outputEntry := inputEntry.Copy() + + for _, rule := range n.Rules { + switch rule.Type { + case api.NetworkAddSubnet: + if rule.AddSubnet == nil { + log.Errorf("Missing add subnet configuration") + continue + } + if v, ok := outputEntry.LookupString(rule.AddSubnet.Input); ok { + _, ipv4Net, err := net.ParseCIDR(v + rule.AddSubnet.SubnetMask) + if err != nil { + log.Warningf("Can't find subnet for IP %v and prefix length %s - err %v", v, rule.AddSubnet.SubnetMask, err) + continue + } + outputEntry[rule.AddSubnet.Output] = ipv4Net.String() + } + case api.NetworkAddLocation: + if rule.AddLocation == nil { + log.Errorf("Missing add location configuration") + continue + } + var locationInfo *location.Info + locationInfo, err := location.GetLocation(util.ConvertToString(outputEntry[rule.AddLocation.Input])) + if err != nil { + log.Warningf("Can't find location for IP %v err %v", outputEntry[rule.AddLocation.Input], err) + continue + } + outputEntry[rule.AddLocation.Output+"_CountryName"] = locationInfo.CountryName + outputEntry[rule.AddLocation.Output+"_CountryLongName"] = locationInfo.CountryLongName + outputEntry[rule.AddLocation.Output+"_RegionName"] = locationInfo.RegionName + outputEntry[rule.AddLocation.Output+"_CityName"] = locationInfo.CityName + outputEntry[rule.AddLocation.Output+"_Latitude"] = locationInfo.Latitude + outputEntry[rule.AddLocation.Output+"_Longitude"] = locationInfo.Longitude + case api.NetworkAddService: + if rule.AddService == nil { + log.Errorf("Missing add service configuration") + continue + } + // Should be optimized (unused in netobserv) + protocol := fmt.Sprintf("%v", outputEntry[rule.AddService.Protocol]) + portNumber, err := strconv.Atoi(fmt.Sprintf("%v", outputEntry[rule.AddService.Input])) + if err != nil { + log.Errorf("Can't convert port to int: Port %v - err %v", outputEntry[rule.AddService.Input], err) + continue + } + var serviceName string + protocolAsNumber, err := strconv.Atoi(protocol) + if err == nil { + // protocol has been submitted as number + serviceName = n.svcNames.ByPortAndProtocolNumber(portNumber, protocolAsNumber) + } else { + // protocol has been submitted as any string + serviceName = n.svcNames.ByPortAndProtocolName(portNumber, protocol) + } + if serviceName == "" { + if err != nil { + log.Debugf("Can't find service name for Port %v and protocol %v - err %v", outputEntry[rule.AddService.Input], protocol, err) + continue + } + } + outputEntry[rule.AddService.Output] = serviceName + case api.NetworkAddKubernetes: + kubernetes.Enrich(outputEntry, rule.Kubernetes) + case api.NetworkAddKubernetesInfra: + if rule.KubernetesInfra == nil { + logrus.Error("transformation rule: Missing configuration ") + continue + } + kubernetes.EnrichLayer(outputEntry, rule.KubernetesInfra) + case api.NetworkReinterpretDirection: + reinterpretDirection(outputEntry, &n.DirectionInfo) + case api.NetworkAddSubnetLabel: + if rule.AddSubnetLabel == nil { + logrus.Error("AddSubnetLabel rule: Missing configuration ") + continue + } + if anyIP, ok := outputEntry[rule.AddSubnetLabel.Input]; ok { + if strIP, ok := anyIP.(string); ok { + lbl, ok := n.ipLabelCache.GetCacheEntry(strIP) + if !ok { + lbl = n.applySubnetLabel(strIP) + n.ipLabelCache.UpdateCacheEntry(strIP, lbl) + } + if lbl != "" { + outputEntry[rule.AddSubnetLabel.Output] = lbl + } + } + } + case api.NetworkDecodeTCPFlags: + if anyFlags, ok := outputEntry[rule.DecodeTCPFlags.Input]; ok && anyFlags != nil { + if flags, ok := anyFlags.(uint16); ok { + flags := util.DecodeTCPFlags(flags) + outputEntry[rule.DecodeTCPFlags.Output] = flags + } + } + + default: + log.Panicf("unknown type %s for transform.Network rule: %v", rule.Type, rule) + } + } + + return outputEntry, true +} + +func (n *Network) applySubnetLabel(strIP string) string { + ip := net.ParseIP(strIP) + if ip != nil { + for _, subnetCat := range n.snLabels { + for _, cidr := range subnetCat.cidrs { + if cidr.Contains(ip) { + return subnetCat.name + } + } + } + } + return "" +} + +// NewTransformNetwork create a new transform +// +//nolint:cyclop +func NewTransformNetwork(params config.StageParam, opMetrics *operational.Metrics) (Transformer, error) { + var needToInitLocationDB = false + var needToInitKubeData = false + var needToInitNetworkServices = false + + jsonNetworkTransform := api.TransformNetwork{} + if params.Transform != nil && params.Transform.Network != nil { + jsonNetworkTransform = *params.Transform.Network + } + for _, rule := range jsonNetworkTransform.Rules { + switch rule.Type { + case api.NetworkAddLocation: + needToInitLocationDB = true + case api.NetworkAddKubernetes: + needToInitKubeData = true + case api.NetworkAddKubernetesInfra: + needToInitKubeData = true + case api.NetworkAddService: + needToInitNetworkServices = true + case api.NetworkReinterpretDirection: + if err := validateReinterpretDirectionConfig(&jsonNetworkTransform.DirectionInfo); err != nil { + return nil, err + } + case api.NetworkAddSubnetLabel: + if len(jsonNetworkTransform.SubnetLabels) == 0 { + return nil, fmt.Errorf("a rule '%s' was found, but there are no subnet labels configured", api.NetworkAddSubnetLabel) + } + case api.NetworkAddSubnet, api.NetworkDecodeTCPFlags: + // nothing + } + } + + if needToInitLocationDB { + err := location.InitLocationDB() + if err != nil { + log.Debugf("location.InitLocationDB error: %v", err) + } + } + + if needToInitKubeData { + err := kubernetes.InitFromConfig(jsonNetworkTransform.KubeConfig, opMetrics) + if err != nil { + return nil, err + } + } + + var servicesDB *netdb.ServiceNames + if needToInitNetworkServices { + pFilename, sFilename := jsonNetworkTransform.GetServiceFiles() + var err error + protos, err := os.Open(pFilename) + if err != nil { + return nil, fmt.Errorf("opening protocols file %q: %w", pFilename, err) + } + defer protos.Close() + services, err := os.Open(sFilename) + if err != nil { + return nil, fmt.Errorf("opening services file %q: %w", sFilename, err) + } + defer services.Close() + servicesDB, err = netdb.LoadServicesDB(protos, services) + if err != nil { + return nil, err + } + } + + var subnetCats []subnetLabel + for _, category := range jsonNetworkTransform.SubnetLabels { + var cidrs []*net.IPNet + for _, cidr := range category.CIDRs { + _, parsed, err := net.ParseCIDR(cidr) + if err != nil { + return nil, fmt.Errorf("category %s: fail to parse CIDR, %w", category.Name, err) + } + cidrs = append(cidrs, parsed) + } + if len(cidrs) > 0 { + subnetCats = append(subnetCats, subnetLabel{name: category.Name, cidrs: cidrs}) + } + } + + return &Network{ + TransformNetwork: api.TransformNetwork{ + Rules: jsonNetworkTransform.Rules, + DirectionInfo: jsonNetworkTransform.DirectionInfo, + }, + svcNames: servicesDB, + snLabels: subnetCats, + ipLabelCache: utils.NewQuietExpiringTimedCache(2 * time.Minute), + }, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go new file mode 100644 index 000000000..5f466088f --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/transform_network_direction.go @@ -0,0 +1,64 @@ +package transform + +import ( + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" +) + +const ( + ingress = 0 + egress = 1 + inner = 2 +) + +func validateReinterpretDirectionConfig(info *api.NetworkTransformDirectionInfo) error { + if info.FlowDirectionField == "" { + return fmt.Errorf("invalid config for transform.Network rule %s: missing FlowDirectionField", api.NetworkReinterpretDirection) + } + if info.ReporterIPField == "" { + return fmt.Errorf("invalid config for transform.Network rule %s: missing ReporterIPField", api.NetworkReinterpretDirection) + } + if info.SrcHostField == "" { + return fmt.Errorf("invalid config for transform.Network rule %s: missing SrcHostField", api.NetworkReinterpretDirection) + } + if info.DstHostField == "" { + return fmt.Errorf("invalid config for transform.Network rule %s: missing DstHostField", api.NetworkReinterpretDirection) + } + return nil +} + +func reinterpretDirection(output config.GenericMap, info *api.NetworkTransformDirectionInfo) { + if fd, ok := output[info.FlowDirectionField]; ok && len(info.IfDirectionField) > 0 { + output[info.IfDirectionField] = fd + } + var srcNode, dstNode, reporter string + if gen, ok := output[info.ReporterIPField]; ok { + if str, ok := gen.(string); ok { + reporter = str + } + } + if len(reporter) == 0 { + return + } + if gen, ok := output[info.SrcHostField]; ok { + if str, ok := gen.(string); ok { + srcNode = str + } + } + if gen, ok := output[info.DstHostField]; ok { + if str, ok := gen.(string); ok { + dstNode = str + } + } + if srcNode != dstNode { + if srcNode == reporter { + output[info.FlowDirectionField] = egress + } else if dstNode == reporter { + output[info.FlowDirectionField] = ingress + } + } else if srcNode != "" { + output[info.FlowDirectionField] = inner + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go new file mode 100644 index 000000000..1b01816e0 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/batcher.go @@ -0,0 +1,44 @@ +package utils + +import ( + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +func Batcher( + closeCh <-chan struct{}, + maxBatchLength int, + batchTimeout time.Duration, + inCh <-chan config.GenericMap, + action func([]config.GenericMap), +) { + log := logrus.WithField("component", "utils.Batcher") + invokeTicker := time.NewTicker(batchTimeout) + var entries []config.GenericMap + log.Debug("starting") + for { + select { + case <-closeCh: + log.Debug("exiting due to closeCh") + return + case <-invokeTicker.C: + if len(entries) == 0 { + continue + } + log.Debugf("ticker signal: invoking action with %d entries", len(entries)) + es := entries + entries = nil + action(es) + case gm := <-inCh: + entries = append(entries, gm) + if len(entries) >= maxBatchLength { + log.Debugf("batch complete: invoking action with %d entries", len(entries)) + es := entries + entries = nil + action(es) + } + } + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go new file mode 100644 index 000000000..d2da7fa6e --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/connections.go @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package utils + +import ( + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" +) + +const subnetBatchSize = 254 + +// GenerateConnectionFlowEntries generates data with one entry for each of nConnections +// Create the entries in a predictable manner so that the first K entries in each call +// to the function reproduce the same connection. +// avoid using addresses 0 and 255 since these sometimes have special meanings. +func GenerateConnectionFlowEntries(nConnections int) []config.GenericMap { + entries := make([]config.GenericMap, 0) + n1 := subnetBatchSize + n2 := subnetBatchSize + n3 := subnetBatchSize + n4 := subnetBatchSize + count := 0 + for l := 1; l <= n4; l++ { + for k := 1; k <= n3; k++ { + for j := 1; j <= n2; j++ { + for i := 1; i <= n1; i++ { + srcAddr := fmt.Sprintf("%d.%d.%d.%d", l, k, j, i) + count++ + entry := config.GenericMap{ + "SrcAddr": srcAddr, + "SrcPort": 1234, + "DstAddr": "11.1.1.1", + "DstPort": 8000, + "Bytes": 100, + "Packets": 1, + "Proto": 6, + "SrcAS": 0, + "DstAS": 0, + "TimeReceived": 0, + } + entries = append(entries, entry) + if count >= nConnections { + return entries + } + } + } + } + } + return entries +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go new file mode 100644 index 000000000..7bbfb7cc2 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/exit.go @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package utils + +import ( + "os" + "os/signal" + "syscall" + + "github.com/sirupsen/logrus" +) + +var ( + exitChannel chan struct{} +) + +func ExitChannel() <-chan struct{} { + return exitChannel +} + +// InitExitChannel and CloseExitChannel are needed for some tests +func InitExitChannel() { + exitChannel = make(chan struct{}) +} + +func CloseExitChannel() { + close(exitChannel) +} + +func SetupElegantExit() { + logrus.Debugf("entering SetupElegantExit") + // handle elegant exit; create support for channels of go routines that want to exit cleanly + exitChannel = make(chan struct{}) + exitSigChan := make(chan os.Signal, 1) + logrus.Debugf("registered exit signal channel") + signal.Notify(exitSigChan, syscall.SIGINT, syscall.SIGTERM) + go func() { + // wait for exit signal; then stop all the other go functions + sig := <-exitSigChan + logrus.Debugf("received exit signal = %v", sig) + close(exitChannel) + logrus.Debugf("exiting SetupElegantExit go function") + }() + logrus.Debugf("exiting SetupElegantExit") +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go new file mode 100644 index 000000000..17bebe36e --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/multiorderedmap.go @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file defines a multi-ordered map data structure. It supports insertion, deletion and retrieval in O(1) on +// average like a regular map. In addition, it allows iterating over the records by multiple orders. +// New records are pushed back to each of the defined orders. Existing records can be moved to the back of a specific +// order by MoveToBack() +// Note: MultiOrderedMap isn't responsible for keeping the records sorted. The user should take care of that. + +package utils + +import ( + "container/list" + "fmt" +) + +type Key uint64 +type Record interface{} +type OrderID string +type processRecordFunc func(Record) (delete, stop bool) + +type recordWrapper struct { + record Record + key Key + orderID2element map[OrderID]*list.Element +} + +type MultiOrderedMap struct { + m map[Key]*recordWrapper + orders map[OrderID]*list.List +} + +// NewMultiOrderedMap returns an initialized MultiOrderedMap. +func NewMultiOrderedMap(orderIDs ...OrderID) *MultiOrderedMap { + mom := &MultiOrderedMap{ + m: map[Key]*recordWrapper{}, + orders: map[OrderID]*list.List{}, + } + for _, id := range orderIDs { + mom.orders[id] = list.New() + } + return mom +} + +// Len returns the number of records of the multi-ordered map mom. +func (mom MultiOrderedMap) Len() int { + return len(mom.m) +} + +// AddRecord adds a record to the multi-ordered map. +func (mom MultiOrderedMap) AddRecord(key Key, record Record) error { + if _, found := mom.GetRecord(key); found { + return fmt.Errorf("record with key %x already exists", key) + } + rw := &recordWrapper{key: key, record: record, orderID2element: map[OrderID]*list.Element{}} + mom.m[key] = rw + for orderID, orderList := range mom.orders { + elem := orderList.PushBack(rw) + rw.orderID2element[orderID] = elem + } + return nil +} + +// GetRecord returns the record of key `key` and true if the key exists. Otherwise, nil and false is returned. +func (mom MultiOrderedMap) GetRecord(key Key) (Record, bool) { + rw, found := mom.m[key] + if !found { + return nil, false + } + return rw.record, true +} + +// RemoveRecord removes the record of key `key`. If the key doesn't exist, RemoveRecord is a no-op. +func (mom MultiOrderedMap) RemoveRecord(key Key) { + rw, found := mom.m[key] + if !found { + return + } + for orderID, elem := range rw.orderID2element { + mom.orders[orderID].Remove(elem) + } + delete(mom.m, key) +} + +// MoveToBack moves the record of key `key` to the back of orderID. If the key or the orderID doesn't exist, an error +// is returned. +func (mom MultiOrderedMap) MoveToBack(key Key, orderID OrderID) error { + rw, found := mom.m[key] + if !found { + return fmt.Errorf("can't MoveToBack non-existing key %x (order id %q)", key, orderID) + } + elem, found := rw.orderID2element[orderID] + if !found { + return fmt.Errorf("can't MoveToBack non-existing order id %q (key %x)", orderID, key) + } + mom.orders[orderID].MoveToBack(elem) + return nil +} + +// MoveToFront moves the record of key `key` to the front of orderID. If the key or the orderID doesn't exist, an error +// is returned. +func (mom MultiOrderedMap) MoveToFront(key Key, orderID OrderID) error { + rw, found := mom.m[key] + if !found { + return fmt.Errorf("can't MoveToFront non-existing key %x (order id %q)", key, orderID) + } + elem, found := rw.orderID2element[orderID] + if !found { + return fmt.Errorf("can't MoveToFront non-existing order id %q (key %x)", orderID, key) + } + mom.orders[orderID].MoveToFront(elem) + return nil +} + +// IterateFrontToBack iterates over the records by orderID. It applies function f() on each record. +// f() returns two booleans `delete` and `stop` that control whether to remove the record from the multi-ordered map +// and whether to stop the iteration respectively. +func (mom MultiOrderedMap) IterateFrontToBack(orderID OrderID, f processRecordFunc) { + if _, found := mom.orders[orderID]; !found { + panic(fmt.Sprintf("Unknown order id %q", orderID)) + } + // How to remove element from list while iterating the same list in golang + // https://stackoverflow.com/a/27662823/2749989 + var next *list.Element + for e := mom.orders[orderID].Front(); e != nil; e = next { + rw := e.Value.(*recordWrapper) + next = e.Next() + shouldDelete, shouldStop := f(rw.record) + if shouldDelete { + mom.RemoveRecord(rw.key) + } + if shouldStop { + break + } + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go new file mode 100644 index 000000000..0406db8ce --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/params_parse.go @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package utils diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go new file mode 100644 index 000000000..06d0c75d2 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/sasl.go @@ -0,0 +1,40 @@ +package utils + +import ( + "fmt" + "os" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/segmentio/kafka-go/sasl" + "github.com/segmentio/kafka-go/sasl/plain" + "github.com/segmentio/kafka-go/sasl/scram" +) + +func SetupSASLMechanism(cfg *api.SASLConfig) (sasl.Mechanism, error) { + // Read client ID + id, err := os.ReadFile(cfg.ClientIDPath) + if err != nil { + return nil, err + } + strID := strings.TrimSpace(string(id)) + // Read password + pwd, err := os.ReadFile(cfg.ClientSecretPath) + if err != nil { + return nil, err + } + strPwd := strings.TrimSpace(string(pwd)) + var mechanism sasl.Mechanism + switch cfg.Type { + case api.SASLPlain: + mechanism = plain.Mechanism{Username: strID, Password: strPwd} + case api.SASLScramSHA512: + mechanism, err = scram.Mechanism(scram.SHA512, strID, strPwd) + default: + return nil, fmt.Errorf("unknown SASL type: %s", cfg.Type) + } + if err != nil { + return nil, err + } + return mechanism, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go new file mode 100644 index 000000000..bedc38c4f --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils/timed_cache.go @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package utils + +import ( + "container/list" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +var log = logrus.WithField("component", "utils.TimedCache") + +// Functions to manage an LRU cache with an expiry +// When an item expires, allow a callback to allow the specific implementation to perform its particular cleanup +// Size of cache may be limited by setting maxEntries; if cache is full, do not enter new items. + +type CacheCallback func(entry interface{}) + +type cacheEntry struct { + key string + lastUpdatedTime time.Time + e *list.Element + SourceEntry interface{} +} + +type TimedCacheMap map[string]*cacheEntry + +// If maxEntries is non-zero, this limits the number of entries in the cache to the number specified. +// If maxEntries is zero, the cache has no size limit. +type TimedCache struct { + mu sync.RWMutex + cacheList *list.List + cacheMap TimedCacheMap + maxEntries int + cacheLenMetric prometheus.Gauge +} + +func (tc *TimedCache) GetCacheEntry(key string) (interface{}, bool) { + tc.mu.RLock() + defer tc.mu.RUnlock() + cEntry, ok := tc.cacheMap[key] + if ok { + return cEntry.SourceEntry, ok + } + return nil, ok +} + +var uclog = log.WithField("method", "UpdateCacheEntry") + +// If cache entry exists, update it and return it; if it does not exist, create it if there is room. +// If we exceed the size of the cache, then do not allocate new entry +func (tc *TimedCache) UpdateCacheEntry(key string, entry interface{}) bool { + nowInSecs := time.Now() + tc.mu.Lock() + defer tc.mu.Unlock() + cEntry, ok := tc.cacheMap[key] + if ok { + // item already exists in cache; update the element and move to end of list + cEntry.lastUpdatedTime = nowInSecs + // move to end of list + tc.cacheList.MoveToBack(cEntry.e) + } else { + // create new entry for cache + if (tc.maxEntries > 0) && (tc.cacheList.Len() >= tc.maxEntries) { + return false + } + cEntry = &cacheEntry{ + lastUpdatedTime: nowInSecs, + key: key, + SourceEntry: entry, + } + uclog.Tracef("adding entry: %#v", cEntry) + // place at end of list + cEntry.e = tc.cacheList.PushBack(cEntry) + tc.cacheMap[key] = cEntry + if tc.cacheLenMetric != nil { + tc.cacheLenMetric.Inc() + } + } + return true +} + +func (tc *TimedCache) GetCacheLen() int { + tc.mu.RLock() + defer tc.mu.RUnlock() + return tc.cacheList.Len() +} + +// We expect that the function calling Iterate might make updates to the entries by calling UpdateCacheEntry() +// We therefore cannot take the lock at this point since it will conflict with the call in UpdateCacheEntry() +// TODO: If the callback needs to update the cache, then we need a method to perform it without taking the lock again. +func (tc *TimedCache) Iterate(f func(key string, value interface{})) { + tc.mu.RLock() + defer tc.mu.RUnlock() + for k, v := range tc.cacheMap { + f(k, v.SourceEntry) + } +} + +// CleanupExpiredEntries removes items from cache that were last touched more than expiryTime seconds ago +func (tc *TimedCache) CleanupExpiredEntries(expiry time.Duration, callback CacheCallback) { + tc.mu.Lock() + defer tc.mu.Unlock() + + clog := log.WithFields(logrus.Fields{ + "mapLen": len(tc.cacheMap), + "listLen": tc.cacheList.Len(), + }) + clog.Debugf("cleaning up expried entries") + + expireTime := time.Now().Add(-expiry) + deleted := 0 + // go through the list until we reach recently used entries + for { + listEntry := tc.cacheList.Front() + if listEntry == nil { + return + } + pCacheInfo := listEntry.Value.(*cacheEntry) + if pCacheInfo.lastUpdatedTime.After(expireTime) { + // no more expired items + clog.Debugf("deleted %d expired entries", deleted) + return + } + deleted++ + if callback != nil { + callback(pCacheInfo.SourceEntry) + } + delete(tc.cacheMap, pCacheInfo.key) + tc.cacheList.Remove(listEntry) + if tc.cacheLenMetric != nil { + tc.cacheLenMetric.Dec() + } + } +} + +func NewTimedCache(maxEntries int, cacheLenMetric prometheus.Gauge) *TimedCache { + l := &TimedCache{ + cacheList: list.New(), + cacheMap: make(TimedCacheMap), + maxEntries: maxEntries, + cacheLenMetric: cacheLenMetric, + } + return l +} + +func NewQuietExpiringTimedCache(expiry time.Duration) *TimedCache { + l := &TimedCache{ + cacheList: list.New(), + cacheMap: make(TimedCacheMap), + } + + ticker := time.NewTicker(expiry) + go func() { + for { + select { + case <-ExitChannel(): + return + case <-ticker.C: + l.CleanupExpiredEntries(expiry, func(_ interface{}) {}) + } + } + }() + + return l +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go new file mode 100644 index 000000000..9af18a435 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/client.go @@ -0,0 +1,41 @@ +package grpc + +import ( + "flag" + "log" + + pb "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// ClientConnection wraps a gRPC+protobuf connection +type ClientConnection struct { + client pb.CollectorClient + conn *grpc.ClientConn +} + +func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) { + flag.Parse() + // Set up a connection to the server. + socket := utils.GetSocket(hostIP, hostPort) + conn, err := grpc.NewClient(socket, grpc.WithTransportCredentials(insecure.NewCredentials())) + + if err != nil { + log.Fatalf("did not connect: %v", err) + } + + return &ClientConnection{ + client: pb.NewCollectorClient(conn), + conn: conn, + }, nil +} + +func (cp *ClientConnection) Client() pb.CollectorClient { + return cp.client +} + +func (cp *ClientConnection) Close() error { + return cp.conn.Close() +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go new file mode 100644 index 000000000..a3ba1db8a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap.pb.go @@ -0,0 +1,209 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.6 +// source: proto/genericmap.proto + +package genericmap + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The request message containing the GenericMap +type Flow struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GenericMap *anypb.Any `protobuf:"bytes,1,opt,name=genericMap,proto3" json:"genericMap,omitempty"` +} + +func (x *Flow) Reset() { + *x = Flow{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_genericmap_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Flow) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Flow) ProtoMessage() {} + +func (x *Flow) ProtoReflect() protoreflect.Message { + mi := &file_proto_genericmap_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Flow.ProtoReflect.Descriptor instead. +func (*Flow) Descriptor() ([]byte, []int) { + return file_proto_genericmap_proto_rawDescGZIP(), []int{0} +} + +func (x *Flow) GetGenericMap() *anypb.Any { + if x != nil { + return x.GenericMap + } + return nil +} + +// intentionally empty +type CollectorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectorReply) Reset() { + *x = CollectorReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_genericmap_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CollectorReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectorReply) ProtoMessage() {} + +func (x *CollectorReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_genericmap_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead. +func (*CollectorReply) Descriptor() ([]byte, []int) { + return file_proto_genericmap_proto_rawDescGZIP(), []int{1} +} + +var File_proto_genericmap_proto protoreflect.FileDescriptor + +var file_proto_genericmap_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x6d, + 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x6d, 0x61, 0x70, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x3c, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x4d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4d, 0x61, 0x70, 0x22, 0x10, 0x0a, + 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, + 0x43, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, + 0x53, 0x65, 0x6e, 0x64, 0x12, 0x10, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x6d, 0x61, + 0x70, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x6d, 0x61, 0x70, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0e, 0x5a, 0x0c, 0x2e, 0x2f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x6d, 0x61, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_genericmap_proto_rawDescOnce sync.Once + file_proto_genericmap_proto_rawDescData = file_proto_genericmap_proto_rawDesc +) + +func file_proto_genericmap_proto_rawDescGZIP() []byte { + file_proto_genericmap_proto_rawDescOnce.Do(func() { + file_proto_genericmap_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_genericmap_proto_rawDescData) + }) + return file_proto_genericmap_proto_rawDescData +} + +var file_proto_genericmap_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_genericmap_proto_goTypes = []interface{}{ + (*Flow)(nil), // 0: genericmap.Flow + (*CollectorReply)(nil), // 1: genericmap.CollectorReply + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_proto_genericmap_proto_depIdxs = []int32{ + 2, // 0: genericmap.Flow.genericMap:type_name -> google.protobuf.Any + 0, // 1: genericmap.Collector.Send:input_type -> genericmap.Flow + 1, // 2: genericmap.Collector.Send:output_type -> genericmap.CollectorReply + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_genericmap_proto_init() } +func file_proto_genericmap_proto_init() { + if File_proto_genericmap_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_genericmap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Flow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_genericmap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CollectorReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_genericmap_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_genericmap_proto_goTypes, + DependencyIndexes: file_proto_genericmap_proto_depIdxs, + MessageInfos: file_proto_genericmap_proto_msgTypes, + }.Build() + File_proto_genericmap_proto = out.File + file_proto_genericmap_proto_rawDesc = nil + file_proto_genericmap_proto_goTypes = nil + file_proto_genericmap_proto_depIdxs = nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go new file mode 100644 index 000000000..12a8d8aaf --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap/genericmap_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.6 +// source: proto/genericmap.proto + +package genericmap + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// CollectorClient is the client API for Collector service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CollectorClient interface { + Send(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*CollectorReply, error) +} + +type collectorClient struct { + cc grpc.ClientConnInterface +} + +func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient { + return &collectorClient{cc} +} + +func (c *collectorClient) Send(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*CollectorReply, error) { + out := new(CollectorReply) + err := c.cc.Invoke(ctx, "/genericmap.Collector/Send", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CollectorServer is the server API for Collector service. +// All implementations must embed UnimplementedCollectorServer +// for forward compatibility +type CollectorServer interface { + Send(context.Context, *Flow) (*CollectorReply, error) + mustEmbedUnimplementedCollectorServer() +} + +// UnimplementedCollectorServer must be embedded to have forward compatible implementations. +type UnimplementedCollectorServer struct { +} + +func (UnimplementedCollectorServer) Send(context.Context, *Flow) (*CollectorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Send not implemented") +} +func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {} + +// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CollectorServer will +// result in compilation errors. +type UnsafeCollectorServer interface { + mustEmbedUnimplementedCollectorServer() +} + +func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) { + s.RegisterService(&Collector_ServiceDesc, srv) +} + +func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Flow) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectorServer).Send(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/genericmap.Collector/Send", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectorServer).Send(ctx, req.(*Flow)) + } + return interceptor(ctx, in, info, handler) +} + +// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Collector_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "genericmap.Collector", + HandlerType: (*CollectorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Send", + Handler: _Collector_Send_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/genericmap.proto", +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go new file mode 100644 index 000000000..0a085e924 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/server.go @@ -0,0 +1,77 @@ +package grpc + +import ( + "context" + "fmt" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap" +) + +// CollectorServer wraps a Flow Collector connection & session +type CollectorServer struct { + grpcServer *grpc.Server +} + +type collectorOptions struct { + grpcServerOptions []grpc.ServerOption +} + +// CollectorOption allows overriding the default configuration of the CollectorServer instance. +// Use them in the StartCollector function. +type CollectorOption func(options *collectorOptions) + +func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption { + return func(copt *collectorOptions) { + copt.grpcServerOptions = options + } +} + +// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each +// set of *genericmap.Flow by the provided channel. +func StartCollector( + port int, recordForwarder chan<- *genericmap.Flow, options ...CollectorOption, +) (*CollectorServer, error) { + copts := collectorOptions{} + for _, opt := range options { + opt(&copts) + } + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + return nil, err + } + grpcServer := grpc.NewServer(copts.grpcServerOptions...) + genericmap.RegisterCollectorServer(grpcServer, &collectorAPI{ + recordForwarder: recordForwarder, + }) + reflection.Register(grpcServer) + go func() { + if err := grpcServer.Serve(lis); err != nil { + panic("error connecting to server: " + err.Error()) + } + }() + return &CollectorServer{ + grpcServer: grpcServer, + }, nil +} + +func (c *CollectorServer) Close() error { + c.grpcServer.Stop() + return nil +} + +type collectorAPI struct { + genericmap.UnimplementedCollectorServer + recordForwarder chan<- *genericmap.Flow +} + +var okReply = &genericmap.CollectorReply{} + +func (c *collectorAPI) Send(_ context.Context, records *genericmap.Flow) (*genericmap.CollectorReply, error) { + c.recordForwarder <- records + return okReply, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go new file mode 100644 index 000000000..16e2511b5 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/metrics.go @@ -0,0 +1,18 @@ +package write + +import ( + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + "github.com/prometheus/client_golang/prometheus" +) + +type metrics struct { + *operational.Metrics + recordsWritten prometheus.Counter +} + +func newMetrics(opMetrics *operational.Metrics, stage string) *metrics { + return &metrics{ + Metrics: opMetrics, + recordsWritten: opMetrics.CreateRecordsWrittenCounter(stage), + } +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go new file mode 100644 index 000000000..2f6ed4775 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write.go @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy ofthe License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specificlanguage governing permissions and + * limitations under the License. + * + */ + +package write + +import ( + "sync" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +type Writer interface { + Write(in config.GenericMap) +} +type None struct { + // synchronized access to avoid race conditions + mt sync.Mutex + prevRecords []config.GenericMap +} + +// Write writes entries +func (t *None) Write(in config.GenericMap) { + logrus.Debugf("entering Write none, in = %v", in) + t.mt.Lock() + t.prevRecords = append(t.prevRecords, in) + t.mt.Unlock() +} + +func (t *None) PrevRecords() []config.GenericMap { + t.mt.Lock() + defer t.mt.Unlock() + var copies []config.GenericMap + for _, rec := range t.prevRecords { + copies = append(copies, rec.Copy()) + } + return copies +} + +// NewWriteNone create a new write +func NewWriteNone() (Writer, error) { + return &None{}, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go new file mode 100644 index 000000000..362415e7b --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_fake.go @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package write + +import ( + "sync" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +type Fake struct { + // access is locked and copied to avoid race condition errors during tests + mt sync.Mutex + allRecords []config.GenericMap +} + +// Write stores in memory all records. +func (w *Fake) Write(in config.GenericMap) { + logrus.Trace("entering writeFake Write") + w.mt.Lock() + w.allRecords = append(w.allRecords, in.Copy()) + w.mt.Unlock() +} + +func (w *Fake) AllRecords() []config.GenericMap { + w.mt.Lock() + defer w.mt.Unlock() + var copies []config.GenericMap + for _, r := range w.allRecords { + copies = append(copies, r.Copy()) + } + return copies +} + +// NewWriteFake creates a new write. +func NewWriteFake(_ config.StageParam) (Writer, error) { + logrus.Debugf("entering NewWriteFake") + w := &Fake{} + return w, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go new file mode 100644 index 000000000..c89d5f361 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_grpc.go @@ -0,0 +1,55 @@ +package write + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/anypb" +) + +type writeGRPC struct { + hostIP string + hostPort int + clientConn *grpc.ClientConnection +} + +// Write writes a flow before being stored +func (t *writeGRPC) Write(v config.GenericMap) { + logrus.Tracef("entering writeGRPC Write %s", v) + value, _ := json.Marshal(v) + if _, err := t.clientConn.Client().Send(context.TODO(), &genericmap.Flow{ + GenericMap: &anypb.Any{ + Value: value, + }, + }); err != nil { + logrus.Errorf("writeGRPC send error: %v", err) + } +} + +// NewWriteGRPC create a new write +func NewWriteGRPC(params config.StageParam) (Writer, error) { + logrus.Debugf("entering NewWriteGRPC") + + writeGRPC := &writeGRPC{} + if params.Write != nil && params.Write.GRPC != nil { + if err := params.Write.GRPC.Validate(); err != nil { + return nil, fmt.Errorf("the provided config is not valid: %w", err) + } + writeGRPC.hostIP = params.Write.GRPC.TargetHost + writeGRPC.hostPort = params.Write.GRPC.TargetPort + } else { + return nil, fmt.Errorf("write.grpc param is mandatory: %v", params.Write) + } + logrus.Debugf("NewWriteGRPC ConnectClient %s:%d...", writeGRPC.hostIP, writeGRPC.hostPort) + clientConn, err := grpc.ConnectClient(writeGRPC.hostIP, writeGRPC.hostPort) + if err != nil { + return nil, err + } + writeGRPC.clientConn = clientConn + return writeGRPC, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go new file mode 100644 index 000000000..49f8d4882 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_ipfix.go @@ -0,0 +1,579 @@ +/* + * Copyright (C) 2024 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package write + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" + "github.com/vmware/go-ipfix/pkg/entities" + ipfixExporter "github.com/vmware/go-ipfix/pkg/exporter" + "github.com/vmware/go-ipfix/pkg/registry" +) + +type writeIpfix struct { + hostPort string + transport string + templateIDv4 uint16 + templateIDv6 uint16 + enrichEnterpriseID uint32 + exporter *ipfixExporter.ExportingProcess + entitiesV4 []entities.InfoElementWithValue + entitiesV6 []entities.InfoElementWithValue +} + +type FieldMap struct { + Key string + Getter func(entities.InfoElementWithValue) any + Setter func(entities.InfoElementWithValue, any) + Matcher func(entities.InfoElementWithValue, any) bool + Optional bool +} + +// IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml +const IPv6Type uint16 = 0x86DD + +var ( + ilog = logrus.WithField("component", "write.Ipfix") + IANAFields = []string{ + "ethernetType", + "flowDirection", + "sourceMacAddress", + "destinationMacAddress", + "protocolIdentifier", + "sourceTransportPort", + "destinationTransportPort", + "octetDeltaCount", + "flowStartMilliseconds", + "flowEndMilliseconds", + "packetDeltaCount", + "interfaceName", + } + IPv4IANAFields = append([]string{ + "sourceIPv4Address", + "destinationIPv4Address", + }, IANAFields...) + IPv6IANAFields = append([]string{ + "sourceIPv6Address", + "destinationIPv6Address", + "nextHeaderIPv6", + }, IANAFields...) + KubeFields = []string{ + "sourcePodNamespace", + "sourcePodName", + "destinationPodNamespace", + "destinationPodName", + "sourceNodeName", + "destinationNodeName", + } + CustomNetworkFields = []string{ + "timeFlowRttNs", + "interfaces", + "directions", + } + + MapIPFIXKeys = map[string]FieldMap{ + "sourceIPv4Address": { + Key: "SrcAddr", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) }, + }, + "destinationIPv4Address": { + Key: "DstAddr", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) }, + }, + "sourceIPv6Address": { + Key: "SrcAddr", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) }, + }, + "destinationIPv6Address": { + Key: "DstAddr", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetIPAddressValue().String() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetIPAddressValue(net.ParseIP(rec.(string))) }, + }, + "nextHeaderIPv6": { + Key: "Proto", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) }, + }, + "sourceMacAddress": { + Key: "SrcMac", + Setter: func(elt entities.InfoElementWithValue, rec any) { + elt.SetMacAddressValue(net.HardwareAddr(rec.(string))) + }, + Matcher: func(_ entities.InfoElementWithValue, _ any) bool { + // Getting some discrepancies here, need to figure out why + return true + }, + }, + "destinationMacAddress": { + Key: "DstMac", + Setter: func(elt entities.InfoElementWithValue, rec any) { + elt.SetMacAddressValue(net.HardwareAddr(rec.(string))) + }, + Matcher: func(_ entities.InfoElementWithValue, _ any) bool { + // Getting some discrepancies here, need to figure out why + return true + }, + }, + "ethernetType": { + Key: "Etype", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) }, + }, + "flowDirection": { + Key: "IfDirections", + Setter: func(elt entities.InfoElementWithValue, rec any) { + if dirs, ok := rec.([]int); ok && len(dirs) > 0 { + elt.SetUnsigned8Value(uint8(dirs[0])) + } + }, + Matcher: func(elt entities.InfoElementWithValue, expected any) bool { + ifdirs := expected.([]int) + return int(elt.GetUnsigned8Value()) == ifdirs[0] + }, + }, + "directions": { + Key: "IfDirections", + Getter: func(elt entities.InfoElementWithValue) any { + var dirs []int + for _, dir := range strings.Split(elt.GetStringValue(), ",") { + d, _ := strconv.Atoi(dir) + dirs = append(dirs, d) + } + return dirs + }, + Setter: func(elt entities.InfoElementWithValue, rec any) { + if dirs, ok := rec.([]int); ok && len(dirs) > 0 { + var asStr []string + for _, dir := range dirs { + asStr = append(asStr, strconv.Itoa(dir)) + } + elt.SetStringValue(strings.Join(asStr, ",")) + } + }, + }, + "protocolIdentifier": { + Key: "Proto", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned8Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned8Value(rec.(uint8)) }, + }, + "sourceTransportPort": { + Key: "SrcPort", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) }, + }, + "destinationTransportPort": { + Key: "DstPort", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned16Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned16Value(rec.(uint16)) }, + }, + "octetDeltaCount": { + Key: "Bytes", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetUnsigned64Value() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(rec.(uint64)) }, + }, + "flowStartMilliseconds": { + Key: "TimeFlowStartMs", + Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) }, + }, + "flowEndMilliseconds": { + Key: "TimeFlowEndMs", + Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) }, + }, + "packetDeltaCount": { + Key: "Packets", + Getter: func(elt entities.InfoElementWithValue) any { return uint32(elt.GetUnsigned64Value()) }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(uint32))) }, + }, + "interfaceName": { + Key: "Interfaces", + Setter: func(elt entities.InfoElementWithValue, rec any) { + if ifs, ok := rec.([]string); ok && len(ifs) > 0 { + elt.SetStringValue(ifs[0]) + } + }, + Matcher: func(elt entities.InfoElementWithValue, expected any) bool { + ifs := expected.([]string) + return elt.GetStringValue() == ifs[0] + }, + }, + "interfaces": { + Key: "Interfaces", + Getter: func(elt entities.InfoElementWithValue) any { return strings.Split(elt.GetStringValue(), ",") }, + Setter: func(elt entities.InfoElementWithValue, rec any) { + if ifs, ok := rec.([]string); ok { + elt.SetStringValue(strings.Join(ifs, ",")) + } + }, + }, + "sourcePodNamespace": { + Key: "SrcK8S_Namespace", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "sourcePodName": { + Key: "SrcK8S_Name", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "destinationPodNamespace": { + Key: "DstK8S_Namespace", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "destinationPodName": { + Key: "DstK8S_Name", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "sourceNodeName": { + Key: "SrcK8S_HostName", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "destinationNodeName": { + Key: "DstK8S_HostName", + Getter: func(elt entities.InfoElementWithValue) any { return elt.GetStringValue() }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetStringValue(rec.(string)) }, + Optional: true, + }, + "timeFlowRttNs": { + Key: "TimeFlowRttNs", + Getter: func(elt entities.InfoElementWithValue) any { return int64(elt.GetUnsigned64Value()) }, + Setter: func(elt entities.InfoElementWithValue, rec any) { elt.SetUnsigned64Value(uint64(rec.(int64))) }, + Optional: true, + }, + } +) + +func addElementToTemplate(elementName string, value []byte, elements *[]entities.InfoElementWithValue, registryID uint32) error { + element, err := registry.GetInfoElement(elementName, registryID) + if err != nil { + ilog.WithError(err).Errorf("Did not find the element with name %s", elementName) + return err + } + ie, err := entities.DecodeAndCreateInfoElementWithValue(element, value) + if err != nil { + ilog.WithError(err).Errorf("Failed to decode element %s", elementName) + return err + } + *elements = append(*elements, ie) + return nil +} + +func addNetworkEnrichmentToTemplate(elements *[]entities.InfoElementWithValue, registryID uint32) error { + for _, field := range CustomNetworkFields { + if err := addElementToTemplate(field, nil, elements, registryID); err != nil { + return err + } + } + return nil +} + +func addKubeContextToTemplate(elements *[]entities.InfoElementWithValue, registryID uint32) error { + for _, field := range KubeFields { + if err := addElementToTemplate(field, nil, elements, registryID); err != nil { + return err + } + } + return nil +} + +func loadCustomRegistry(enterpriseID uint32) error { + err := registry.InitNewRegistry(enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to initialize registry") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("sourcePodNamespace", 7733, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("sourcePodName", 7734, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("destinationPodNamespace", 7735, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("destinationPodName", 7736, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("sourceNodeName", 7737, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("destinationNodeName", 7738, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("timeFlowRttNs", 7740, entities.Unsigned64, enterpriseID, 8)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("interfaces", 7741, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + err = registry.PutInfoElement((*entities.NewInfoElement("directions", 7742, entities.String, enterpriseID, 65535)), enterpriseID) + if err != nil { + ilog.WithError(err).Errorf("Failed to register element") + return err + } + return nil +} + +func SendTemplateRecordv4(exporter *ipfixExporter.ExportingProcess, enrichEnterpriseID uint32) (uint16, []entities.InfoElementWithValue, error) { + templateID := exporter.NewTemplateID() + templateSet := entities.NewSet(false) + err := templateSet.PrepareSet(entities.Template, templateID) + if err != nil { + ilog.WithError(err).Error("Failed in PrepareSet") + return 0, nil, err + } + elements := make([]entities.InfoElementWithValue, 0) + + for _, field := range IPv4IANAFields { + err = addElementToTemplate(field, nil, &elements, registry.IANAEnterpriseID) + if err != nil { + return 0, nil, err + } + } + if enrichEnterpriseID != 0 { + err = addKubeContextToTemplate(&elements, enrichEnterpriseID) + if err != nil { + return 0, nil, err + } + err = addNetworkEnrichmentToTemplate(&elements, enrichEnterpriseID) + if err != nil { + return 0, nil, err + } + } + err = templateSet.AddRecord(elements, templateID) + if err != nil { + ilog.WithError(err).Error("Failed in Add Record") + return 0, nil, err + } + _, err = exporter.SendSet(templateSet) + if err != nil { + ilog.WithError(err).Error("Failed to send template record") + return 0, nil, err + } + + return templateID, elements, nil +} + +func SendTemplateRecordv6(exporter *ipfixExporter.ExportingProcess, enrichEnterpriseID uint32) (uint16, []entities.InfoElementWithValue, error) { + templateID := exporter.NewTemplateID() + templateSet := entities.NewSet(false) + err := templateSet.PrepareSet(entities.Template, templateID) + if err != nil { + return 0, nil, err + } + elements := make([]entities.InfoElementWithValue, 0) + + for _, field := range IPv6IANAFields { + err = addElementToTemplate(field, nil, &elements, registry.IANAEnterpriseID) + if err != nil { + return 0, nil, err + } + } + if enrichEnterpriseID != 0 { + err = addKubeContextToTemplate(&elements, enrichEnterpriseID) + if err != nil { + return 0, nil, err + } + err = addNetworkEnrichmentToTemplate(&elements, enrichEnterpriseID) + if err != nil { + return 0, nil, err + } + } + + err = templateSet.AddRecord(elements, templateID) + if err != nil { + return 0, nil, err + } + _, err = exporter.SendSet(templateSet) + if err != nil { + return 0, nil, err + } + + return templateID, elements, nil +} + +//nolint:cyclop +func setElementValue(record config.GenericMap, ieValPtr *entities.InfoElementWithValue) error { + ieVal := *ieValPtr + name := ieVal.GetName() + mapping, ok := MapIPFIXKeys[name] + if !ok { + return nil + } + if value := record[mapping.Key]; value != nil { + mapping.Setter(ieVal, value) + } else if !mapping.Optional { + return fmt.Errorf("unable to find %s (%s) in record", name, mapping.Key) + } + return nil +} + +func setEntities(record config.GenericMap, elements *[]entities.InfoElementWithValue) error { + for _, ieVal := range *elements { + err := setElementValue(record, &ieVal) + if err != nil { + return err + } + } + return nil +} +func (t *writeIpfix) sendDataRecord(record config.GenericMap, v6 bool) error { + dataSet := entities.NewSet(false) + var templateID uint16 + if v6 { + templateID = t.templateIDv6 + err := setEntities(record, &t.entitiesV6) + if err != nil { + return err + } + } else { + templateID = t.templateIDv4 + err := setEntities(record, &t.entitiesV4) + if err != nil { + return err + } + } + err := dataSet.PrepareSet(entities.Data, templateID) + if err != nil { + return err + } + if v6 { + err = dataSet.AddRecord(t.entitiesV6, templateID) + if err != nil { + return err + } + } else { + err = dataSet.AddRecord(t.entitiesV4, templateID) + if err != nil { + return err + } + } + _, err = t.exporter.SendSet(dataSet) + if err != nil { + return err + } + return nil +} + +// Write writes a flow before being stored +func (t *writeIpfix) Write(entry config.GenericMap) { + ilog.Tracef("entering writeIpfix Write") + if IPv6Type == entry["Etype"].(uint16) { + err := t.sendDataRecord(entry, true) + if err != nil { + ilog.WithError(err).Error("Failed in send v6 IPFIX record") + } + } else { + err := t.sendDataRecord(entry, false) + if err != nil { + ilog.WithError(err).Error("Failed in send v4 IPFIX record") + } + } +} + +// NewWriteIpfix creates a new write +func NewWriteIpfix(params config.StageParam) (Writer, error) { + ilog.Debugf("entering NewWriteIpfix") + + ipfixConfigIn := api.WriteIpfix{} + if params.Write != nil && params.Write.Ipfix != nil { + ipfixConfigIn = *params.Write.Ipfix + } + // need to combine defaults with parameters that are provided in the config yaml file + ipfixConfigIn.SetDefaults() + + if err := ipfixConfigIn.Validate(); err != nil { + return nil, fmt.Errorf("the provided config is not valid: %w", err) + } + writeIpfix := &writeIpfix{} + if params.Write != nil && params.Write.Ipfix != nil { + writeIpfix.transport = params.Write.Ipfix.Transport + writeIpfix.hostPort = fmt.Sprintf("%s:%d", params.Write.Ipfix.TargetHost, params.Write.Ipfix.TargetPort) + writeIpfix.enrichEnterpriseID = uint32(params.Write.Ipfix.EnterpriseID) + } + // Initialize IPFIX registry and send templates + registry.LoadRegistry() + var err error + if params.Write != nil && params.Write.Ipfix != nil && params.Write.Ipfix.EnterpriseID != 0 { + err = loadCustomRegistry(writeIpfix.enrichEnterpriseID) + if err != nil { + ilog.Fatalf("Failed to load Custom(%d) Registry", writeIpfix.enrichEnterpriseID) + } + } + + // Create exporter using local server info + input := ipfixExporter.ExporterInput{ + CollectorAddress: writeIpfix.hostPort, + CollectorProtocol: writeIpfix.transport, + ObservationDomainID: 1, + TempRefTimeout: 1, + } + writeIpfix.exporter, err = ipfixExporter.InitExportingProcess(input) + if err != nil { + ilog.Fatalf("Got error when connecting to server %s: %v", writeIpfix.hostPort, err) + return nil, err + } + ilog.Infof("Created exporter connecting to server with address: %s", writeIpfix.hostPort) + + writeIpfix.templateIDv4, writeIpfix.entitiesV4, err = SendTemplateRecordv4(writeIpfix.exporter, writeIpfix.enrichEnterpriseID) + if err != nil { + ilog.WithError(err).Error("Failed in send IPFIX template v4 record") + return nil, err + } + + writeIpfix.templateIDv6, writeIpfix.entitiesV6, err = SendTemplateRecordv6(writeIpfix.exporter, writeIpfix.enrichEnterpriseID) + if err != nil { + ilog.WithError(err).Error("Failed in send IPFIX template v6 record") + return nil, err + } + return writeIpfix, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go new file mode 100644 index 000000000..ceea8450d --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_loki.go @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2022 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package write + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/operational" + pUtils "github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" + + logAdapter "github.com/go-kit/kit/log/logrus" + jsonIter "github.com/json-iterator/go" + "github.com/netobserv/loki-client-go/loki" + "github.com/netobserv/loki-client-go/pkg/backoff" + "github.com/netobserv/loki-client-go/pkg/urlutil" + "github.com/prometheus/common/model" + "github.com/sirupsen/logrus" +) + +var jsonEncodingConfig = jsonIter.Config{}.Froze() + +var ( + keyReplacer = strings.NewReplacer("/", "_", ".", "_", "-", "_") +) + +var log = logrus.WithField("component", "write.Loki") + +type emitter interface { + Handle(labels model.LabelSet, timestamp time.Time, record string) error +} + +// Loki record writer +type Loki struct { + lokiConfig loki.Config + apiConfig api.WriteLoki + timestampScale float64 + saneLabels map[string]model.LabelName + client emitter + timeNow func() time.Time + exitChan <-chan struct{} + metrics *metrics +} + +func buildLokiConfig(c *api.WriteLoki) (loki.Config, error) { + batchWait, err := time.ParseDuration(c.BatchWait) + if err != nil { + return loki.Config{}, fmt.Errorf("failed in parsing BatchWait : %w", err) + } + + timeout, err := time.ParseDuration(c.Timeout) + if err != nil { + return loki.Config{}, fmt.Errorf("failed in parsing Timeout : %w", err) + } + + minBackoff, err := time.ParseDuration(c.MinBackoff) + if err != nil { + return loki.Config{}, fmt.Errorf("failed in parsing MinBackoff : %w", err) + } + + maxBackoff, err := time.ParseDuration(c.MaxBackoff) + if err != nil { + return loki.Config{}, fmt.Errorf("failed in parsing MaxBackoff : %w", err) + } + + cfg := loki.Config{ + TenantID: c.TenantID, + BatchWait: batchWait, + BatchSize: c.BatchSize, + Timeout: timeout, + BackoffConfig: backoff.BackoffConfig{ + MinBackoff: minBackoff, + MaxBackoff: maxBackoff, + MaxRetries: c.MaxRetries, + }, + } + if c.ClientConfig != nil { + cfg.Client = *c.ClientConfig + } + var clientURL urlutil.URLValue + err = clientURL.Set(strings.TrimSuffix(c.URL, "/") + "/loki/api/v1/push") + if err != nil { + return cfg, fmt.Errorf("failed to parse client URL: %w", err) + } + cfg.URL = clientURL + return cfg, nil +} + +func (l *Loki) ProcessRecord(in config.GenericMap) error { + // copy record before process to avoid alteration on parallel stages + out := in.Copy() + labels := model.LabelSet{} + + // Add static labels from config + for k, v := range l.apiConfig.StaticLabels { + labels[k] = v + } + l.addLabels(in, labels) + + // Remove labels and configured ignore list from record + ignoreList := l.apiConfig.IgnoreList + ignoreList = append(ignoreList, l.apiConfig.Labels...) + for _, label := range ignoreList { + delete(out, label) + } + + js, err := jsonEncodingConfig.Marshal(out) + if err != nil { + return err + } + + timestamp := l.extractTimestamp(out) + err = l.client.Handle(labels, timestamp, string(js)) + if err == nil { + l.metrics.recordsWritten.Inc() + } + return err +} + +func (l *Loki) extractTimestamp(record map[string]interface{}) time.Time { + if l.apiConfig.TimestampLabel == "" { + return l.timeNow() + } + timestamp, ok := record[string(l.apiConfig.TimestampLabel)] + if !ok { + log.WithField("timestampLabel", l.apiConfig.TimestampLabel). + Warnf("Timestamp label not found in record. Using local time") + return l.timeNow() + } + ft, ok := getFloat64(timestamp) + if !ok { + log.WithField(string(l.apiConfig.TimestampLabel), timestamp). + Warnf("Invalid timestamp found: float64 expected but got %T. Using local time", timestamp) + return l.timeNow() + } + if ft == 0 { + log.WithField("timestampLabel", l.apiConfig.TimestampLabel). + Warnf("Empty timestamp in record. Using local time") + return l.timeNow() + } + + tsNanos := int64(ft * l.timestampScale) + return time.Unix(tsNanos/int64(time.Second), tsNanos%int64(time.Second)) +} + +func (l *Loki) addLabels(record config.GenericMap, labels model.LabelSet) { + // Add non-static labels from record + for _, label := range l.apiConfig.Labels { + val, ok := record[label] + if !ok { + continue + } + sanitized, ok := l.saneLabels[label] + if !ok { + continue + } + lv := model.LabelValue(utils.ConvertToString(val)) + if !lv.IsValid() { + log.WithFields(logrus.Fields{"key": label, "value": val}). + Debug("Invalid label value. Ignoring it") + continue + } + labels[sanitized] = lv + } +} + +func getFloat64(timestamp interface{}) (ft float64, ok bool) { + switch i := timestamp.(type) { + case float64: + return i, true + case float32: + return float64(i), true + case int64: + return float64(i), true + case int32: + return float64(i), true + case uint64: + return float64(i), true + case uint32: + return float64(i), true + case int: + return float64(i), true + default: + log.Warnf("Type %T is not implemented for float64 conversion\n", i) + return math.NaN(), false + } +} + +// Write writes a flow before being stored +func (l *Loki) Write(entry config.GenericMap) { + log.Tracef("writing entry: %#v", entry) + err := l.ProcessRecord(entry) + if err != nil { + log.WithError(err).Warn("can't write into loki") + } +} + +// NewWriteLoki creates a Loki writer from configuration +func NewWriteLoki(opMetrics *operational.Metrics, params config.StageParam) (*Loki, error) { + log.Debugf("entering NewWriteLoki") + lokiConfigIn := api.WriteLoki{} + if params.Write != nil && params.Write.Loki != nil { + lokiConfigIn = *params.Write.Loki + } + // need to combine defaults with parameters that are provided in the config yaml file + lokiConfigIn.SetDefaults() + + if err := lokiConfigIn.Validate(); err != nil { + return nil, fmt.Errorf("the provided config is not valid: %w", err) + } + + lokiConfig, buildconfigErr := buildLokiConfig(&lokiConfigIn) + if buildconfigErr != nil { + return nil, buildconfigErr + } + client, newWithLoggerErr := loki.NewWithLogger(lokiConfig, logAdapter.NewLogger(log.WithField("module", "export/loki"))) + if newWithLoggerErr != nil { + return nil, newWithLoggerErr + } + + timestampScale, err := time.ParseDuration(lokiConfigIn.TimestampScale) + if err != nil { + return nil, fmt.Errorf("cannot parse TimestampScale: %w", err) + } + + // Sanitize label keys + saneLabels := make(map[string]model.LabelName, len(lokiConfigIn.Labels)) + for _, label := range lokiConfigIn.Labels { + sanitized := model.LabelName(keyReplacer.Replace(label)) + if sanitized.IsValid() { + saneLabels[label] = sanitized + } else { + log.WithFields(logrus.Fields{"key": label, "sanitized": sanitized}). + Debug("Invalid label. Ignoring it") + } + } + + l := &Loki{ + lokiConfig: lokiConfig, + apiConfig: lokiConfigIn, + timestampScale: float64(timestampScale), + saneLabels: saneLabels, + client: client, + timeNow: time.Now, + exitChan: pUtils.ExitChannel(), + metrics: newMetrics(opMetrics, params.Name), + } + + return l, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go new file mode 100644 index 000000000..5c228305a --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/write_stdout.go @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2021 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package write + +import ( + "encoding/json" + "fmt" + "os" + "sort" + "text/tabwriter" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/sirupsen/logrus" +) + +type writeStdout struct { + format string +} + +// Write writes a flow before being stored +func (t *writeStdout) Write(v config.GenericMap) { + logrus.Tracef("entering writeStdout Write") + if t.format == "json" { + txt, _ := json.Marshal(v) + fmt.Println(string(txt)) + } else if t.format == "fields" { + var order sort.StringSlice + for fieldName := range v { + order = append(order, fieldName) + } + order.Sort() + w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintf(w, "\n\nFlow record at %s:\n", time.Now().Format(time.StampMilli)) + for _, field := range order { + fmt.Fprintf(w, "%v\t=\t%v\n", field, v[field]) + } + w.Flush() + } else { + fmt.Printf("%s: %v\n", time.Now().Format(time.StampMilli), v) + } +} + +// NewWriteStdout create a new write +func NewWriteStdout(params config.StageParam) (Writer, error) { + logrus.Debugf("entering NewWriteStdout") + writeStdout := &writeStdout{} + if params.Write != nil && params.Write.Stdout != nil { + writeStdout.format = params.Write.Stdout.Format + } + return writeStdout, nil +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go new file mode 100644 index 000000000..6353c1801 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/prometheus/prom_server.go @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2023 IBM, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package prometheus + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "sync" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/server" + prom "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + dto "github.com/prometheus/client_model/go" + "github.com/sirupsen/logrus" +) + +var ( + plog = logrus.WithField("component", "prometheus") + maybePanic = plog.Fatalf + SharedServer = &PromServer{} +) + +type PromServer struct { + httpServer *http.Server + namedRegistries sync.Map +} + +func (ps *PromServer) Gather() ([]*dto.MetricFamily, error) { + all := prom.Gatherers{} + ps.namedRegistries.Range(func(_, value interface{}) bool { + r := value.(prom.Gatherer) + all = append(all, r) + return true + }) + return all.Gather() +} + +func (ps *PromServer) Shutdown(ctx context.Context) error { + return ps.httpServer.Shutdown(ctx) +} + +func (ps *PromServer) SetRegistry(name string, registry prom.Gatherer) { + ps.namedRegistries.Store(name, registry) +} + +// InitializePrometheus starts the global Prometheus server, used for operational metrics and prom-encode stages if they don't override the server settings +func InitializePrometheus(settings *config.MetricsSettings) *PromServer { + if settings.NoPanic { + maybePanic = plog.Errorf + } + if settings.DisableGlobalServer { + plog.Info("Disabled global Prometheus server - no operational metrics will be available") + return nil + } + r := prom.DefaultGatherer + if settings.SuppressGoMetrics { + // set up private prometheus registry + r = prom.NewRegistry() + } + SharedServer = StartServerAsync(&settings.PromConnectionInfo, "", r) + return SharedServer +} + +// StartServerAsync listens for prometheus resource usage requests +func StartServerAsync(conn *api.PromConnectionInfo, regName string, registry prom.Gatherer) *PromServer { + // create prometheus server for operational metrics + // if value of address is empty, then by default it will take 0.0.0.0 + port := conn.Port + if port == 0 { + port = 9090 + } + addr := fmt.Sprintf("%s:%v", conn.Address, port) + plog.Infof("StartServerAsync: addr = %s", addr) + + httpServer := http.Server{ + Addr: addr, + // TLS clients must use TLS 1.2 or higher + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + // The Handler function provides a default handler to expose metrics + // via an HTTP server. "/metrics" is the usual endpoint for that. + mux := http.NewServeMux() + httpServer.Handler = mux + server.Default(&httpServer) + + go func() { + var err error + if conn.TLS != nil { + err = httpServer.ListenAndServeTLS(conn.TLS.CertPath, conn.TLS.KeyPath) + } else { + err = httpServer.ListenAndServe() + } + if err != nil && !errors.Is(err, http.ErrServerClosed) { + maybePanic("error in http.ListenAndServe: %v", err) + } + }() + + p := PromServer{httpServer: &httpServer} + p.namedRegistries.Store(regName, registry) + + mux.Handle("/metrics", promhttp.HandlerFor(&p, promhttp.HandlerOpts{})) + + return &p +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go new file mode 100644 index 000000000..92fdb7fb5 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/server/common.go @@ -0,0 +1,46 @@ +package server + +import ( + "crypto/tls" + "net/http" + "time" + + "github.com/sirupsen/logrus" +) + +var slog = logrus.WithField("module", "server") + +func Default(srv *http.Server) *http.Server { + // defaults taken from https://bruinsslot.jp/post/go-secure-webserver/ can be overriden by caller + if srv.Handler != nil { + // No more than 2MB body + srv.Handler = http.MaxBytesHandler(srv.Handler, 2<<20) + } else { + slog.Warnf("Handler not yet set on server while securing defaults. Make sure a MaxByte middleware is used.") + } + if srv.ReadTimeout == 0 { + srv.ReadTimeout = 10 * time.Second + } + if srv.ReadHeaderTimeout == 0 { + srv.ReadHeaderTimeout = 5 * time.Second + } + if srv.WriteTimeout == 0 { + srv.WriteTimeout = 10 * time.Second + } + if srv.IdleTimeout == 0 { + srv.IdleTimeout = 120 * time.Second + } + if srv.MaxHeaderBytes == 0 { + srv.MaxHeaderBytes = 1 << 20 // 1MB + } + if srv.TLSConfig == nil { + srv.TLSConfig = &tls.Config{} + } + if srv.TLSConfig.MinVersion == 0 { + srv.TLSConfig.MinVersion = tls.VersionTLS13 + } + // Disable http/2 + srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0) + + return srv +} diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go new file mode 100644 index 000000000..8bb12fcb6 --- /dev/null +++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/utils/filters/filters.go @@ -0,0 +1,153 @@ +package filters + +import ( + "fmt" + "regexp" + "strings" + + "github.com/netobserv/flowlogs-pipeline/pkg/api" + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/utils" +) + +type Predicate func(config.GenericMap) bool + +var variableExtractor = regexp.MustCompile(`\$\(([^\)]+)\)`) + +func Presence(key string) Predicate { + return func(flow config.GenericMap) bool { + _, found := flow[key] + return found + } +} + +func Absence(key string) Predicate { + return func(flow config.GenericMap) bool { + _, found := flow[key] + return !found + } +} + +func Equal(key string, filterValue any, convertString bool) Predicate { + varLookups := extractVarLookups(filterValue) + if len(varLookups) > 0 { + return func(flow config.GenericMap) bool { + if val, found := flow[key]; found { + // Variable injection => convert to string + sVal, ok := val.(string) + if !ok { + sVal = utils.ConvertToString(val) + } + injected := injectVars(flow, filterValue.(string), varLookups) + return sVal == injected + } + return false + } + } + if convertString { + return func(flow config.GenericMap) bool { + if val, found := flow[key]; found { + sVal, ok := val.(string) + if !ok { + sVal = utils.ConvertToString(val) + } + return sVal == filterValue + } + return false + } + } + return func(flow config.GenericMap) bool { + if val, found := flow[key]; found { + return val == filterValue + } + return false + } +} + +func NotEqual(key string, filterValue any, convertString bool) Predicate { + pred := Equal(key, filterValue, convertString) + return func(flow config.GenericMap) bool { return !pred(flow) } +} + +func Regex(key string, filterRegex *regexp.Regexp) Predicate { + return func(flow config.GenericMap) bool { + if val, found := flow[key]; found { + sVal, ok := val.(string) + if !ok { + sVal = utils.ConvertToString(val) + } + return filterRegex.MatchString(sVal) + } + return false + } +} + +func NotRegex(key string, filterRegex *regexp.Regexp) Predicate { + pred := Regex(key, filterRegex) + return func(flow config.GenericMap) bool { return !pred(flow) } +} + +func extractVarLookups(value any) [][]string { + // Extract list of variables to lookup + // E.g: filter "$(SrcAddr):$(SrcPort)" would return [SrcAddr,SrcPort] + if sVal, isString := value.(string); isString { + if len(sVal) > 0 { + return variableExtractor.FindAllStringSubmatch(sVal, -1) + } + } + return nil +} + +func injectVars(flow config.GenericMap, filterValue string, varLookups [][]string) string { + injected := filterValue + for _, matchGroup := range varLookups { + var value string + if rawVal, found := flow[matchGroup[1]]; found { + if sVal, ok := rawVal.(string); ok { + value = sVal + } else { + value = utils.ConvertToString(rawVal) + } + } + injected = strings.ReplaceAll(injected, matchGroup[0], value) + } + return injected +} + +func FromKeepEntry(from *api.KeepEntryRule) (Predicate, error) { + switch from.Type { + case api.KeepEntryIfExists: + return Presence(from.KeepEntry.Input), nil + case api.KeepEntryIfDoesntExist: + return Absence(from.KeepEntry.Input), nil + case api.KeepEntryIfEqual: + return Equal(from.KeepEntry.Input, from.KeepEntry.Value, true), nil + case api.KeepEntryIfNotEqual: + return NotEqual(from.KeepEntry.Input, from.KeepEntry.Value, true), nil + case api.KeepEntryIfRegexMatch: + if r, err := compileRegex(from.KeepEntry); err != nil { + return nil, err + } else { + return Regex(from.KeepEntry.Input, r), nil + } + case api.KeepEntryIfNotRegexMatch: + if r, err := compileRegex(from.KeepEntry); err != nil { + return nil, err + } else { + return NotRegex(from.KeepEntry.Input, r), nil + } + } + return nil, fmt.Errorf("keep entry rule type not recognized: %s", from.Type) +} + +func compileRegex(from *api.TransformFilterGenericRule) (*regexp.Regexp, error) { + s, ok := from.Value.(string) + if !ok { + return nil, fmt.Errorf("invalid regex keep rule: rule value must be a string [%v]", from) + } + r, err := regexp.Compile(s) + if err != nil { + return nil, fmt.Errorf("invalid regex keep rule: cannot compile regex [%w]", err) + } + return r, nil +} diff --git a/vendor/github.com/netobserv/gopipes/LICENSE b/vendor/github.com/netobserv/gopipes/LICENSE new file mode 100644 index 000000000..091e18352 --- /dev/null +++ b/vendor/github.com/netobserv/gopipes/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Mario Macias Lloret + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go b/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go new file mode 100644 index 000000000..b9d7c8c54 --- /dev/null +++ b/vendor/github.com/netobserv/gopipes/pkg/node/internal/connect/connectors.go @@ -0,0 +1,99 @@ +package connect + +import ( + "sync/atomic" +) + +// Joiner provides shared access to the input channel of a node of the type IN +type Joiner[IN any] struct { + totalSenders int32 + bufLen int + channel chan IN +} + +// NewJoiner creates a joiner for a given channel type and buffer length +func NewJoiner[IN any](bufferLength int) Joiner[IN] { + return Joiner[IN]{ + bufLen: bufferLength, + channel: make(chan IN, bufferLength), + } +} + +// Receiver gets access to the channel as a receiver +func (j *Joiner[IN]) Receiver() chan IN { + return j.channel +} + +// AcquireSender gets acces to the channel as a sender. The acquirer must finally invoke +// ReleaseSender to make sure that the channel is closed when all the senders released it. +func (j *Joiner[IN]) AcquireSender() chan IN { + atomic.AddInt32(&j.totalSenders, 1) + return j.channel +} + +// ReleaseSender will close the channel when all the invokers of the AcquireSender have invoked +// this function +func (j *Joiner[IN]) ReleaseSender() { + // if no senders, we close the main channel + if atomic.AddInt32(&j.totalSenders, -1) == 0 { + close(j.channel) + } +} + +// Releaser is a function that will allow releasing a forked channel. +type Releaser func() + +// Forker manages the access to a Node's output (send) channel. When a node sends to only +// one node, this will work as a single channel. When a node sends to N nodes, +// it will spawn N channels that are cloned from the original channel in a goroutine. +type Forker[OUT any] struct { + sendCh chan OUT + releaseChannel Releaser +} + +// Fork provides connection to a group of output Nodes, accessible through their respective +// Joiner instances. +func Fork[T any](joiners ...*Joiner[T]) Forker[T] { + if len(joiners) == 0 { + panic("can't fork 0 joiners") + } + // if there is only one joiner, we directly send the data to the channel, without intermediation + if len(joiners) == 1 { + return Forker[T]{ + sendCh: joiners[0].AcquireSender(), + releaseChannel: joiners[0].ReleaseSender, + } + } + // channel used as input from the source Node + sendCh := make(chan T, joiners[0].bufLen) + + // channels that clone the contents of the sendCh + forwarders := make([]chan T, len(joiners)) + for i := 0; i < len(joiners); i++ { + forwarders[i] = joiners[i].AcquireSender() + } + go func() { + for in := range sendCh { + for i := 0; i < len(joiners); i++ { + forwarders[i] <- in + } + } + for i := 0; i < len(joiners); i++ { + joiners[i].ReleaseSender() + } + }() + return Forker[T]{ + sendCh: sendCh, + releaseChannel: func() { close(sendCh) }, + } +} + +// Sender acquires the channel that will receive the data from the source node +func (f *Forker[OUT]) Sender() chan OUT { + return f.sendCh +} + +// Close the input channel and, in cascade, all the forked channels +func (f *Forker[OUT]) Close() { + f.releaseChannel() +} diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/node.go b/vendor/github.com/netobserv/gopipes/pkg/node/node.go new file mode 100644 index 000000000..72b99e792 --- /dev/null +++ b/vendor/github.com/netobserv/gopipes/pkg/node/node.go @@ -0,0 +1,239 @@ +// Package node provides functionalities to create nodes and interconnect them. +// A Node is a function container that can be connected via channels to other nodes. +// A node can send data to multiple nodes, and receive data from multiple nodes. +package node + +import ( + "context" + "reflect" + + "github.com/netobserv/gopipes/pkg/node/internal/connect" +) + +// StartFunc is a function that receives a writable channel as unique argument, and sends +// value to that channel during an indefinite amount of time. +type StartFunc[OUT any] func(out chan<- OUT) + +// StartFuncCtx is a StartFunc that also receives a context as a first argument. If the passed +// context is cancelled via the ctx.Done() function, the implementer function should end, +// so the cancel will be propagated to the later nodes. +type StartFuncCtx[OUT any] func(ctx context.Context, out chan<- OUT) + +// MiddleFunc is a function that receives a readable channel as first argument, +// and a writable channel as second argument. +// It must process the inputs from the input channel until it's closed. +type MiddleFunc[IN, OUT any] func(in <-chan IN, out chan<- OUT) + +// TerminalFunc is a function that receives a readable channel as unique argument. +// It must process the inputs from the input channel until it's closed. +type TerminalFunc[IN any] func(out <-chan IN) + +// TODO: OutType and InType methods are candidates for deprecation + +// Sender is any node that can send data to another node: node.Start and node.Middle +type Sender[OUT any] interface { + // SendsTo connect a sender with a group of receivers + SendsTo(...Receiver[OUT]) + // OutType returns the inner type of the Sender's output channel + OutType() reflect.Type +} + +// Receiver is any node that can receive data from another node: node.Middle and node.Terminal +type Receiver[IN any] interface { + isStarted() bool + start() + joiner() *connect.Joiner[IN] + // InType returns the inner type of the Receiver's input channel + InType() reflect.Type +} + +// Start nodes are the starting points of a graph. This is, all the nodes that bring information +// from outside the graph: e.g. because they generate them or because they acquire them from an +// external source like a Web Service. +// A graph must have at least one Start node. +// A Start node must have at least one output node. +type Start[OUT any] struct { + outs []Receiver[OUT] + fun StartFuncCtx[OUT] + outType reflect.Type +} + +func (s *Start[OUT]) SendsTo(outputs ...Receiver[OUT]) { + s.outs = append(s.outs, outputs...) +} + +// OutType is deprecated. It will be removed in future versions. +func (s *Start[OUT]) OutType() reflect.Type { + return s.outType +} + +// Middle is any intermediate node that receives data from another node, processes/filters it, +// and forwards the data to another node. +// An Middle node must have at least one output node. +type Middle[IN, OUT any] struct { + outs []Receiver[OUT] + inputs connect.Joiner[IN] + started bool + fun MiddleFunc[IN, OUT] + outType reflect.Type + inType reflect.Type +} + +func (i *Middle[IN, OUT]) joiner() *connect.Joiner[IN] { + return &i.inputs +} + +func (i *Middle[IN, OUT]) isStarted() bool { + return i.started +} + +func (s *Middle[IN, OUT]) SendsTo(outputs ...Receiver[OUT]) { + s.outs = append(s.outs, outputs...) +} + +func (m *Middle[IN, OUT]) OutType() reflect.Type { + return m.outType +} + +func (m *Middle[IN, OUT]) InType() reflect.Type { + return m.inType +} + +// Terminal is any node that receives data from another node and does not forward it to another node, +// but can process it and send the results to outside the graph (e.g. memory, storage, web...) +type Terminal[IN any] struct { + inputs connect.Joiner[IN] + started bool + fun TerminalFunc[IN] + done chan struct{} + inType reflect.Type +} + +func (i *Terminal[IN]) joiner() *connect.Joiner[IN] { + return &i.inputs +} + +func (t *Terminal[IN]) isStarted() bool { + return t.started +} + +// Done returns a channel that is closed when the Terminal node has ended its processing. This +// is, when all its inputs have been also closed. Waiting for all the Terminal nodes to finish +// allows blocking the execution until all the data in the graph has been processed and all the +// previous stages have ended +func (t *Terminal[IN]) Done() <-chan struct{} { + return t.done +} + +func (m *Terminal[IN]) InType() reflect.Type { + return m.inType +} + +// AsStart wraps a StartFunc into a Start node. +// Deprecated. Use AsStart or AsStartCtx +func AsInit[OUT any](fun StartFunc[OUT]) *Start[OUT] { + return AsStart(fun) +} + +// AsStart wraps a StartFunc into a Start node. +func AsStart[OUT any](fun StartFunc[OUT]) *Start[OUT] { + return AsStartCtx(func(_ context.Context, out chan<- OUT) { + fun(out) + }) +} + +// AsStartCtx wraps a StartFuncCtx into a Start node. +func AsStartCtx[OUT any](fun StartFuncCtx[OUT]) *Start[OUT] { + var out OUT + return &Start[OUT]{ + fun: fun, + outType: reflect.TypeOf(out), + } +} + +// AsMiddle wraps an MiddleFunc into an Middle node. +func AsMiddle[IN, OUT any](fun MiddleFunc[IN, OUT], opts ...Option) *Middle[IN, OUT] { + var in IN + var out OUT + options := getOptions(opts...) + return &Middle[IN, OUT]{ + inputs: connect.NewJoiner[IN](options.channelBufferLen), + fun: fun, + inType: reflect.TypeOf(in), + outType: reflect.TypeOf(out), + } +} + +// AsTerminal wraps a TerminalFunc into a Terminal node. +func AsTerminal[IN any](fun TerminalFunc[IN], opts ...Option) *Terminal[IN] { + var i IN + options := getOptions(opts...) + return &Terminal[IN]{ + inputs: connect.NewJoiner[IN](options.channelBufferLen), + fun: fun, + done: make(chan struct{}), + inType: reflect.TypeOf(i), + } +} + +// Start the function wrapped in the Start node. Either this method or StartCtx should be invoked +// for all the start nodes of the same graph, so the graph can properly start and finish. +func (i *Start[OUT]) Start() { + i.StartCtx(context.TODO()) +} + +// StartCtx starts the function wrapped in the Start node, allow passing a context that can be +// used by the wrapped function. Either this method or Start should be invoked +// for all the start nodes of the same graph, so the graph can properly start and finish. +func (i *Start[OUT]) StartCtx(ctx context.Context) { + if len(i.outs) == 0 { + panic("Start node should have outputs") + } + joiners := make([]*connect.Joiner[OUT], 0, len(i.outs)) + for _, out := range i.outs { + joiners = append(joiners, out.joiner()) + if !out.isStarted() { + out.start() + } + } + forker := connect.Fork(joiners...) + go func() { + i.fun(ctx, forker.Sender()) + forker.Close() + }() +} + +func (i *Middle[IN, OUT]) start() { + if len(i.outs) == 0 { + panic("Middle node should have outputs") + } + i.started = true + joiners := make([]*connect.Joiner[OUT], 0, len(i.outs)) + for _, out := range i.outs { + joiners = append(joiners, out.joiner()) + if !out.isStarted() { + out.start() + } + } + forker := connect.Fork(joiners...) + go func() { + i.fun(i.inputs.Receiver(), forker.Sender()) + forker.Close() + }() +} + +func (t *Terminal[IN]) start() { + t.started = true + go func() { + t.fun(t.inputs.Receiver()) + close(t.done) + }() +} + +func getOptions(opts ...Option) creationOptions { + options := defaultOptions + for _, opt := range opts { + opt(&options) + } + return options +} diff --git a/vendor/github.com/netobserv/gopipes/pkg/node/options.go b/vendor/github.com/netobserv/gopipes/pkg/node/options.go new file mode 100644 index 000000000..f03027ce5 --- /dev/null +++ b/vendor/github.com/netobserv/gopipes/pkg/node/options.go @@ -0,0 +1,22 @@ +package node + +type creationOptions struct { + // if 0, channel is unbuffered + channelBufferLen int +} + +var defaultOptions = creationOptions{ + channelBufferLen: 0, +} + +// Option allows overriding the default values of node instantiation +type Option func(options *creationOptions) + +// ChannelBufferLen is a node.Option that allows specifying the length of the input +// channels for a given node. The default value is 0, which means that the channels +// are unbuffered. +func ChannelBufferLen(length int) Option { + return func(options *creationOptions) { + options.channelBufferLen = length + } +} diff --git a/vendor/github.com/netobserv/loki-client-go/LICENSE b/vendor/github.com/netobserv/loki-client-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/netobserv/loki-client-go/loki/batch.go b/vendor/github.com/netobserv/loki-client-go/loki/batch.go new file mode 100644 index 000000000..473e05c68 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/loki/batch.go @@ -0,0 +1,107 @@ +package loki + +import ( + "time" + + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + json "github.com/json-iterator/go" + + "github.com/netobserv/loki-client-go/pkg/logproto" +) + +// batch holds pending log streams waiting to be sent to Loki, and it's used +// to reduce the number of push requests to Loki aggregating multiple log streams +// and entries in a single batch request. In case of multi-tenant Promtail, log +// streams for each tenant are stored in a dedicated batch. +type batch struct { + streams map[string]*logproto.Stream + bytes int + createdAt time.Time +} + +func newBatch(entries ...entry) *batch { + b := &batch{ + streams: map[string]*logproto.Stream{}, + bytes: 0, + createdAt: time.Now(), + } + + // Add entries to the batch + for _, entry := range entries { + b.add(entry) + } + + return b +} + +// add an entry to the batch +func (b *batch) add(entry entry) { + b.bytes += len(entry.Line) + + // Append the entry to an already existing stream (if any) + labels := entry.labels.String() + if stream, ok := b.streams[labels]; ok { + stream.Entries = append(stream.Entries, entry.Entry) + return + } + + // Add the entry as a new stream + b.streams[labels] = &logproto.Stream{ + Labels: labels, + Entries: []logproto.Entry{entry.Entry}, + } +} + +// sizeBytes returns the current batch size in bytes +func (b *batch) sizeBytes() int { + return b.bytes +} + +// sizeBytesAfter returns the size of the batch after the input entry +// will be added to the batch itself +func (b *batch) sizeBytesAfter(entry entry) int { + return b.bytes + len(entry.Line) +} + +// age of the batch since its creation +func (b *batch) age() time.Duration { + return time.Since(b.createdAt) +} + +// encode the batch as snappy-compressed push request, and returns +// the encoded bytes and the number of encoded entries +func (b *batch) encode() ([]byte, int, error) { + req, entriesCount := b.createPushRequest() + buf, err := proto.Marshal(req) + if err != nil { + return nil, 0, err + } + buf = snappy.Encode(nil, buf) + return buf, entriesCount, nil +} + +// encode the batch as json push request, and returns +// the encoded bytes and the number of encoded entries +func (b *batch) encodeJSON() ([]byte, int, error) { + req, entriesCount := b.createPushRequest() + buf, err := json.Marshal(req) + if err != nil { + return nil, 0, err + } + return buf, entriesCount, nil +} + +// creates push request and returns it, together with number of entries +func (b *batch) createPushRequest() (*logproto.PushRequest, int) { + req := logproto.PushRequest{ + Streams: make([]logproto.Stream, 0, len(b.streams)), + } + + entriesCount := 0 + for _, stream := range b.streams { + req.Streams = append(req.Streams, *stream) + entriesCount += len(stream.Entries) + } + return &req, entriesCount +} diff --git a/vendor/github.com/netobserv/loki-client-go/loki/client.go b/vendor/github.com/netobserv/loki-client-go/loki/client.go new file mode 100644 index 000000000..bf3093b5e --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/loki/client.go @@ -0,0 +1,404 @@ +package loki + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "strconv" + "sync" + "time" + + "github.com/netobserv/loki-client-go/pkg/backoff" + "github.com/prometheus/prometheus/promql/parser" + + "github.com/netobserv/loki-client-go/pkg/metric" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/common/version" + + "github.com/netobserv/loki-client-go/pkg/helpers" + "github.com/netobserv/loki-client-go/pkg/logproto" +) + +const ( + protoContentType = "application/x-protobuf" + JSONContentType = "application/json" + maxErrMsgLen = 1024 + + // Label reserved to override the tenant ID while processing + // pipeline stages + ReservedLabelTenantID = "__tenant_id__" + + LatencyLabel = "filename" + HostLabel = "host" + MetricPrefix = "netobserv" +) + +var ( + encodedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_encoded_bytes_total", + Help: "Number of bytes encoded and ready to send.", + }, []string{HostLabel}) + sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_sent_bytes_total", + Help: "Number of bytes sent.", + }, []string{HostLabel}) + droppedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_dropped_bytes_total", + Help: "Number of bytes dropped because failed to be sent to the ingester after all retries.", + }, []string{HostLabel}) + sentEntries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_sent_entries_total", + Help: "Number of log entries sent to the ingester.", + }, []string{HostLabel}) + droppedEntries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_dropped_entries_total", + Help: "Number of log entries dropped because failed to be sent to the ingester after all retries.", + }, []string{HostLabel}) + requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: MetricPrefix, + Name: "loki_request_duration_seconds", + Help: "Duration of send requests.", + }, []string{"status_code", HostLabel}) + batchRetries = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: MetricPrefix, + Name: "loki_batch_retries_total", + Help: "Number of times batches has had to be retried.", + }, []string{HostLabel}) + streamLag *metric.Gauges + + countersWithHost = []*prometheus.CounterVec{ + encodedBytes, sentBytes, droppedBytes, sentEntries, droppedEntries, + } + + UserAgent = fmt.Sprintf("promtail/%s", version.Version) +) + +func init() { + prometheus.MustRegister(encodedBytes) + prometheus.MustRegister(sentBytes) + prometheus.MustRegister(droppedBytes) + prometheus.MustRegister(sentEntries) + prometheus.MustRegister(droppedEntries) + prometheus.MustRegister(requestDuration) + prometheus.MustRegister(batchRetries) + var err error + streamLag, err = metric.NewGauges(MetricPrefix+"_loki_stream_lag_seconds", + "Difference between current time and last batch timestamp for successful sends", + metric.GaugeConfig{Action: "set"}, + int64(1*time.Minute.Seconds()), // This strips out files which update slowly and reduces noise in this metric. + ) + if err != nil { + panic(err) + } + prometheus.MustRegister(streamLag) +} + +// Client for pushing logs in snappy-compressed protos over HTTP. +type Client struct { + logger log.Logger + cfg Config + client *http.Client + quit chan struct{} + once sync.Once + entries chan entry + wg sync.WaitGroup + + externalLabels model.LabelSet +} + +type entry struct { + tenantID string + labels model.LabelSet + logproto.Entry +} + +// New makes a new Client from config +func New(cfg Config) (*Client, error) { + logger := level.NewFilter(log.NewLogfmtLogger(os.Stdout), level.AllowWarn()) + return NewWithLogger(cfg, logger) +} + +// NewWithDefault creates a new client with default configuration. +func NewWithDefault(url string) (*Client, error) { + cfg, err := NewDefaultConfig(url) + if err != nil { + return nil, err + } + return New(cfg) +} + +// NewWithLogger makes a new Client from a logger and a config +func NewWithLogger(cfg Config, logger log.Logger) (*Client, error) { + if cfg.URL.URL == nil { + return nil, errors.New("client needs target URL") + } + + c := &Client{ + logger: log.With(logger, "component", "client", "host", cfg.URL.Host), + cfg: cfg, + quit: make(chan struct{}), + entries: make(chan entry), + + externalLabels: cfg.ExternalLabels.LabelSet, + } + + err := cfg.Client.Validate() + if err != nil { + return nil, err + } + + c.client, err = config.NewClientFromConfig(cfg.Client, "promtail", config.WithKeepAlivesDisabled(), config.WithHTTP2Disabled()) + if err != nil { + return nil, err + } + + c.client.Timeout = cfg.Timeout + + // Initialize counters to 0 so the metrics are exported before the first + // occurrence of incrementing to avoid missing metrics. + for _, counter := range countersWithHost { + counter.WithLabelValues(c.cfg.URL.Host).Add(0) + } + + c.wg.Add(1) + go c.run() + return c, nil +} + +func (c *Client) run() { + batches := map[string]*batch{} + + // Given the client handles multiple batches (1 per tenant) and each batch + // can be created at a different point in time, we look for batches whose + // max wait time has been reached every 10 times per BatchWait, so that the + // maximum delay we have sending batches is 10% of the max waiting time. + // We apply a cap of 10ms to the ticker, to avoid too frequent checks in + // case the BatchWait is very low. + minWaitCheckFrequency := 10 * time.Millisecond + maxWaitCheckFrequency := c.cfg.BatchWait / 10 + if maxWaitCheckFrequency < minWaitCheckFrequency { + maxWaitCheckFrequency = minWaitCheckFrequency + } + + maxWaitCheck := time.NewTicker(maxWaitCheckFrequency) + + defer func() { + // Send all pending batches + for tenantID, batch := range batches { + c.sendBatch(tenantID, batch) + } + + c.wg.Done() + }() + + for { + select { + case <-c.quit: + return + + case e := <-c.entries: + batch, ok := batches[e.tenantID] + + // If the batch doesn't exist yet, we create a new one with the entry + if !ok { + batches[e.tenantID] = newBatch(e) + break + } + + // If adding the entry to the batch will increase the size over the max + // size allowed, we do send the current batch and then create a new one + if batch.sizeBytesAfter(e) > c.cfg.BatchSize { + c.sendBatch(e.tenantID, batch) + + batches[e.tenantID] = newBatch(e) + break + } + + // The max size of the batch isn't reached, so we can add the entry + batch.add(e) + + case <-maxWaitCheck.C: + // Send all batches whose max wait time has been reached + for tenantID, batch := range batches { + if batch.age() < c.cfg.BatchWait { + continue + } + + c.sendBatch(tenantID, batch) + delete(batches, tenantID) + } + } + } +} + +func (c *Client) sendBatch(tenantID string, batch *batch) { + var ( + err error + buf []byte + entriesCount int + ) + if c.cfg.EncodeJson { + buf, entriesCount, err = batch.encodeJSON() + } else { + buf, entriesCount, err = batch.encode() + } + + if err != nil { + level.Error(c.logger).Log("msg", "error encoding batch", "error", err) + return + } + bufBytes := float64(len(buf)) + encodedBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes) + + ctx := context.Background() + backoff := backoff.New(ctx, c.cfg.BackoffConfig) + var status int + for backoff.Ongoing() { + start := time.Now() + status, err = c.send(ctx, tenantID, buf) + requestDuration.WithLabelValues(strconv.Itoa(status), c.cfg.URL.Host).Observe(time.Since(start).Seconds()) + + if err == nil { + sentBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes) + sentEntries.WithLabelValues(c.cfg.URL.Host).Add(float64(entriesCount)) + for _, s := range batch.streams { + lbls, err := parser.ParseMetric(s.Labels) + if err != nil { + // is this possible? + level.Warn(c.logger).Log("msg", "error converting stream label string to label.Labels, cannot update lagging metric", "error", err) + return + } + var lblSet model.LabelSet + for i := range lbls { + if lbls[i].Name == LatencyLabel { + lblSet = model.LabelSet{ + model.LabelName(HostLabel): model.LabelValue(c.cfg.URL.Host), + model.LabelName(LatencyLabel): model.LabelValue(lbls[i].Value), + } + } + } + if lblSet != nil { + streamLag.With(lblSet).Set(time.Since(s.Entries[len(s.Entries)-1].Timestamp).Seconds()) + } + } + return + } + + // Only retry 429s, 500s and connection-level errors. + if status > 0 && status != 429 && status/100 != 5 { + break + } + + level.Warn(c.logger).Log("msg", "error sending batch, will retry", "status", status, "error", err) + batchRetries.WithLabelValues(c.cfg.URL.Host).Inc() + backoff.Wait() + } + + if err != nil { + level.Error(c.logger).Log("msg", "final error sending batch", "status", status, "error", err) + droppedBytes.WithLabelValues(c.cfg.URL.Host).Add(bufBytes) + droppedEntries.WithLabelValues(c.cfg.URL.Host).Add(float64(entriesCount)) + } +} + +func (c *Client) send(ctx context.Context, tenantID string, buf []byte) (int, error) { + ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + req, err := http.NewRequest("POST", c.cfg.URL.String(), bytes.NewReader(buf)) + if err != nil { + return -1, err + } + req = req.WithContext(ctx) + req.Header.Set("Content-Type", protoContentType) + if c.cfg.EncodeJson { + req.Header.Set("Content-Type", JSONContentType) + } + req.Header.Set("User-Agent", UserAgent) + + // If the tenant ID is not empty promtail is running in multi-tenant mode, so + // we should send it to Loki + if tenantID != "" { + req.Header.Set("X-Scope-OrgID", tenantID) + } + + resp, err := c.client.Do(req) + if err != nil { + return -1, err + } + defer helpers.LogError(c.logger, "closing response body", resp.Body.Close) + + if resp.StatusCode/100 != 2 { + scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) + line := "" + if scanner.Scan() { + line = scanner.Text() + } + err = fmt.Errorf("server returned HTTP status %s (%d): %s", resp.Status, resp.StatusCode, line) + } + return resp.StatusCode, err +} + +func (c *Client) getTenantID(labels model.LabelSet) string { + // Check if it has been overridden while processing the pipeline stages + if value, ok := labels[ReservedLabelTenantID]; ok { + return string(value) + } + + // Check if has been specified in the config + if c.cfg.TenantID != "" { + return c.cfg.TenantID + } + + // Defaults to an empty string, which means the X-Scope-OrgID header + // will not be sent + return "" +} + +// Stop the client. +func (c *Client) Stop() { + c.once.Do(func() { close(c.quit) }) + c.wg.Wait() +} + +// Handle implement EntryHandler; adds a new line to the next batch; send is async. +func (c *Client) Handle(ls model.LabelSet, t time.Time, s string) error { + if len(c.externalLabels) > 0 { + ls = c.externalLabels.Merge(ls) + } + + // Get the tenant ID in case it has been overridden while processing + // the pipeline stages, then remove the special label + tenantID := c.getTenantID(ls) + if _, ok := ls[ReservedLabelTenantID]; ok { + // Clone the label set to not manipulate the input one + ls = ls.Clone() + delete(ls, ReservedLabelTenantID) + } + + c.entries <- entry{tenantID, ls, logproto.Entry{ + Timestamp: t, + Line: s, + }} + return nil +} + +func (c *Client) UnregisterLatencyMetric(labels model.LabelSet) { + labels[HostLabel] = model.LabelValue(c.cfg.URL.Host) + streamLag.Delete(labels) +} diff --git a/vendor/github.com/netobserv/loki-client-go/loki/config.go b/vendor/github.com/netobserv/loki-client-go/loki/config.go new file mode 100644 index 000000000..0241f9c1e --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/loki/config.go @@ -0,0 +1,109 @@ +package loki + +import ( + "flag" + "time" + + "github.com/netobserv/loki-client-go/pkg/backoff" + "github.com/netobserv/loki-client-go/pkg/labelutil" + "github.com/netobserv/loki-client-go/pkg/urlutil" + "github.com/prometheus/common/config" +) + +// NOTE the helm chart for promtail and fluent-bit also have defaults for these values, please update to match if you make changes here. +const ( + BatchWait = 1 * time.Second + BatchSize int = 1024 * 1024 + MinBackoff = 500 * time.Millisecond + MaxBackoff = 5 * time.Minute + MaxRetries int = 10 + Timeout = 10 * time.Second +) + +// Config describes configuration for a HTTP pusher client. +type Config struct { + URL urlutil.URLValue + BatchWait time.Duration + BatchSize int + + Client config.HTTPClientConfig `yaml:",inline"` + + BackoffConfig backoff.BackoffConfig `yaml:"backoff_config"` + // The labels to add to any time series or alerts when communicating with loki + ExternalLabels labelutil.LabelSet `yaml:"external_labels,omitempty"` + Timeout time.Duration `yaml:"timeout"` + + // The tenant ID to use when pushing logs to Loki (empty string means + // single tenant mode) + TenantID string `yaml:"tenant_id"` + + // Use Loki JSON api as opposed to the snappy protobuf. + EncodeJson bool `yaml:"encode_json"` +} + +// NewDefaultConfig creates a default configuration for a given target Loki URL. +func NewDefaultConfig(url string) (Config, error) { + var cfg Config + var u urlutil.URLValue + f := &flag.FlagSet{} + cfg.RegisterFlags(f) + if err := f.Parse(nil); err != nil { + return cfg, err + } + if err := u.Set(url); err != nil { + return cfg, err + } + cfg.URL = u + return cfg, nil +} + +// RegisterFlags with prefix registers flags where every name is prefixed by +// prefix. If prefix is a non-empty string, prefix should end with a period. +func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.Var(&c.URL, prefix+"client.url", "URL of log server") + f.DurationVar(&c.BatchWait, prefix+"client.batch-wait", BatchWait, "Maximum wait period before sending batch.") + f.IntVar(&c.BatchSize, prefix+"client.batch-size-bytes", BatchSize, "Maximum batch size to accrue before sending. ") + // Default backoff schedule: 0.5s, 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s(4.267m) For a total time of 511.5s(8.5m) before logs are lost + f.IntVar(&c.BackoffConfig.MaxRetries, prefix+"client.max-retries", MaxRetries, "Maximum number of retires when sending batches.") + f.DurationVar(&c.BackoffConfig.MinBackoff, prefix+"client.min-backoff", MinBackoff, "Initial backoff time between retries.") + f.DurationVar(&c.BackoffConfig.MaxBackoff, prefix+"client.max-backoff", MaxBackoff, "Maximum backoff time between retries.") + f.DurationVar(&c.Timeout, prefix+"client.timeout", Timeout, "Maximum time to wait for server to respond to a request") + f.Var(&c.ExternalLabels, prefix+"client.external-labels", "list of external labels to add to each log (e.g: --client.external-labels=lb1=v1,lb2=v2)") + + f.StringVar(&c.TenantID, prefix+"client.tenant-id", "", "Tenant ID to use when pushing logs to Loki.") + f.BoolVar(&c.EncodeJson, prefix+"client.encode-json", false, "Encode payload in JSON, default to snappy protobuf") +} + +// RegisterFlags registers flags. +func (c *Config) RegisterFlags(flags *flag.FlagSet) { + c.RegisterFlagsWithPrefix("", flags) +} + +// UnmarshalYAML implement Yaml Unmarshaler +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + type raw Config + var cfg raw + if c.URL.URL != nil { + // we used flags to set that value, which already has sane default. + cfg = raw(*c) + } else { + // force sane defaults. + cfg = raw{ + BackoffConfig: backoff.BackoffConfig{ + MaxBackoff: MaxBackoff, + MaxRetries: MaxRetries, + MinBackoff: MinBackoff, + }, + BatchSize: BatchSize, + BatchWait: BatchWait, + Timeout: Timeout, + } + } + + if err := unmarshal(&cfg); err != nil { + return err + } + + *c = Config(cfg) + return nil +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go b/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go new file mode 100644 index 000000000..3c922c1c1 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/backoff/backoff.go @@ -0,0 +1,117 @@ +package backoff + +import ( + "context" + "flag" + "fmt" + "math/rand" + "time" +) + +// BackoffConfig configures a Backoff +type BackoffConfig struct { + MinBackoff time.Duration `yaml:"min_period"` // start backoff at this level + MaxBackoff time.Duration `yaml:"max_period"` // increase exponentially to this level + MaxRetries int `yaml:"max_retries"` // give up after this many; zero means infinite retries +} + +// RegisterFlags for BackoffConfig. +func (cfg *BackoffConfig) RegisterFlags(prefix string, f *flag.FlagSet) { + f.DurationVar(&cfg.MinBackoff, prefix+".backoff-min-period", 100*time.Millisecond, "Minimum delay when backing off.") + f.DurationVar(&cfg.MaxBackoff, prefix+".backoff-max-period", 10*time.Second, "Maximum delay when backing off.") + f.IntVar(&cfg.MaxRetries, prefix+".backoff-retries", 10, "Number of times to backoff and retry before failing.") +} + +// Backoff implements exponential backoff with randomized wait times +type Backoff struct { + cfg BackoffConfig + ctx context.Context + numRetries int + nextDelayMin time.Duration + nextDelayMax time.Duration +} + +// New creates a Backoff object. Pass a Context that can also terminate the operation. +func New(ctx context.Context, cfg BackoffConfig) *Backoff { + return &Backoff{ + cfg: cfg, + ctx: ctx, + nextDelayMin: cfg.MinBackoff, + nextDelayMax: doubleDuration(cfg.MinBackoff, cfg.MaxBackoff), + } +} + +// Reset the Backoff back to its initial condition +func (b *Backoff) Reset() { + b.numRetries = 0 + b.nextDelayMin = b.cfg.MinBackoff + b.nextDelayMax = doubleDuration(b.cfg.MinBackoff, b.cfg.MaxBackoff) +} + +// Ongoing returns true if caller should keep going +func (b *Backoff) Ongoing() bool { + // Stop if Context has errored or max retry count is exceeded + return b.ctx.Err() == nil && (b.cfg.MaxRetries == 0 || b.numRetries < b.cfg.MaxRetries) +} + +// Err returns the reason for terminating the backoff, or nil if it didn't terminate +func (b *Backoff) Err() error { + if b.ctx.Err() != nil { + return b.ctx.Err() + } + if b.cfg.MaxRetries != 0 && b.numRetries >= b.cfg.MaxRetries { + return fmt.Errorf("terminated after %d retries", b.numRetries) + } + return nil +} + +// NumRetries returns the number of retries so far +func (b *Backoff) NumRetries() int { + return b.numRetries +} + +// Wait sleeps for the backoff time then increases the retry count and backoff time +// Returns immediately if Context is terminated +func (b *Backoff) Wait() { + // Increase the number of retries and get the next delay + sleepTime := b.NextDelay() + + if b.Ongoing() { + select { + case <-b.ctx.Done(): + case <-time.After(sleepTime): + } + } +} + +func (b *Backoff) NextDelay() time.Duration { + b.numRetries++ + + // Handle the edge case the min and max have the same value + // (or due to some misconfig max is < min) + if b.nextDelayMin >= b.nextDelayMax { + return b.nextDelayMin + } + + // Add a jitter within the next exponential backoff range + sleepTime := b.nextDelayMin + time.Duration(rand.Int63n(int64(b.nextDelayMax-b.nextDelayMin))) + + // Apply the exponential backoff to calculate the next jitter + // range, unless we've already reached the max + if b.nextDelayMax < b.cfg.MaxBackoff { + b.nextDelayMin = doubleDuration(b.nextDelayMin, b.cfg.MaxBackoff) + b.nextDelayMax = doubleDuration(b.nextDelayMax, b.cfg.MaxBackoff) + } + + return sleepTime +} + +func doubleDuration(value time.Duration, max time.Duration) time.Duration { + value = value * 2 + + if value <= max { + return value + } + + return max +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go new file mode 100644 index 000000000..7dce2260f --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/config.go @@ -0,0 +1,18 @@ +package helpers + +import ( + "io/ioutil" + + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// LoadConfig read YAML-formatted config from filename into cfg. +func LoadConfig(filename string, cfg interface{}) error { + buf, err := ioutil.ReadFile(filename) + if err != nil { + return errors.Wrap(err, "Error reading config file") + } + + return yaml.UnmarshalStrict(buf, cfg) +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go new file mode 100644 index 000000000..dd75ff5e9 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/logerror.go @@ -0,0 +1,13 @@ +package helpers + +import ( + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +// LogError logs any error returned by f; useful when deferring Close etc. +func LogError(logger log.Logger, message string, f func() error) { + if err := f(); err != nil { + level.Error(logger).Log("message", message, "error", err) + } +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go new file mode 100644 index 000000000..2902683fa --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/helpers/math.go @@ -0,0 +1,9 @@ +package helpers + +// MinUint32 return the min of a and b. +func MinUint32(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go b/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go new file mode 100644 index 000000000..6df0b04d6 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/labelutil/label.go @@ -0,0 +1,99 @@ +package labelutil + +import ( + "bytes" + "encoding/csv" + "fmt" + "strings" + + "github.com/prometheus/common/model" + "gopkg.in/yaml.v2" +) + +// LabelSet is a labelSet that can be used as a flag. +type LabelSet struct { + model.LabelSet `yaml:",inline"` +} + +// String implements flag.Value +// Format: a=1,b=2 +func (v LabelSet) String() string { + if v.LabelSet == nil { + return "" + } + records := make([]string, 0, len(v.LabelSet)>>1) + for k, v := range v.LabelSet { + records = append(records, string(k)+"="+string(v)) + } + + var buf bytes.Buffer + w := csv.NewWriter(&buf) + if err := w.Write(records); err != nil { + panic(err) + } + w.Flush() + return "[" + strings.TrimSpace(buf.String()) + "]" +} + +// Set implements flag.Value +func (v *LabelSet) Set(s string) error { + var ss []string + n := strings.Count(s, "=") + switch n { + case 0: + return fmt.Errorf("%s must be formatted as key=value", s) + case 1: + ss = append(ss, strings.Trim(s, `"`)) + default: + r := csv.NewReader(strings.NewReader(s)) + var err error + ss, err = r.Read() + if err != nil { + return err + } + } + + out := model.LabelSet{} + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + out[model.LabelName(kv[0])] = model.LabelValue(kv[1]) + } + + if err := out.Validate(); err != nil { + return err + } + v.LabelSet = out + return nil +} + +// UnmarshalYAML the Unmarshaler interface of the yaml pkg. +func (v *LabelSet) UnmarshalYAML(unmarshal func(interface{}) error) error { + lbSet := model.LabelSet{} + err := unmarshal(&lbSet) + if err != nil { + return err + } + v.LabelSet = lbSet + return nil +} + +// MarshalYAML implements yaml.Marshaller. +func (v LabelSet) MarshalYAML() (interface{}, error) { + out, err := yaml.Marshal(ModelLabelSetToMap(v.LabelSet)) + if err != nil { + return nil, err + } + return string(out), nil +} + +// ModelLabelSetToMap convert a model.LabelSet to a map[string]string +func ModelLabelSetToMap(m model.LabelSet) map[string]string { + result := map[string]string{} + for k, v := range m { + result[string(k)] = string(v) + } + return result +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go new file mode 100644 index 000000000..3a68d2614 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/extensions.go @@ -0,0 +1,23 @@ +package logproto + +import "github.com/prometheus/prometheus/pkg/labels" + +// Note, this is not very efficient and use should be minimized as it requires label construction on each comparison +type SeriesIdentifiers []SeriesIdentifier + +func (ids SeriesIdentifiers) Len() int { return len(ids) } +func (ids SeriesIdentifiers) Swap(i, j int) { ids[i], ids[j] = ids[j], ids[i] } +func (ids SeriesIdentifiers) Less(i, j int) bool { + a, b := labels.FromMap(ids[i].Labels), labels.FromMap(ids[j].Labels) + return labels.Compare(a, b) <= 0 +} + +type Streams []Stream + +func (xs Streams) Len() int { return len(xs) } +func (xs Streams) Swap(i, j int) { xs[i], xs[j] = xs[j], xs[i] } +func (xs Streams) Less(i, j int) bool { return xs[i].Labels <= xs[j].Labels } + +func (s Series) Len() int { return len(s.Samples) } +func (s Series) Swap(i, j int) { s.Samples[i], s.Samples[j] = s.Samples[j], s.Samples[i] } +func (s Series) Less(i, j int) bool { return s.Samples[i].Timestamp < s.Samples[j].Timestamp } diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go new file mode 100644 index 000000000..b1acaa0d5 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.pb.go @@ -0,0 +1,8031 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pkg/logproto/logproto.proto + +package logproto + +import ( + bytes "bytes" + context "context" + encoding_binary "encoding/binary" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + _ "github.com/gogo/protobuf/types" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + grpc "google.golang.org/grpc" + io "io" + math "math" + reflect "reflect" + strconv "strconv" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Direction int32 + +const ( + FORWARD Direction = 0 + BACKWARD Direction = 1 +) + +var Direction_name = map[int32]string{ + 0: "FORWARD", + 1: "BACKWARD", +} + +var Direction_value = map[string]int32{ + "FORWARD": 0, + "BACKWARD": 1, +} + +func (Direction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{0} +} + +type PushRequest struct { + Streams []Stream `protobuf:"bytes,1,rep,name=streams,proto3,customtype=Stream" json:"streams"` +} + +func (m *PushRequest) Reset() { *m = PushRequest{} } +func (*PushRequest) ProtoMessage() {} +func (*PushRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{0} +} +func (m *PushRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PushRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PushRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PushRequest.Merge(m, src) +} +func (m *PushRequest) XXX_Size() int { + return m.Size() +} +func (m *PushRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PushRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PushRequest proto.InternalMessageInfo + +type PushResponse struct { +} + +func (m *PushResponse) Reset() { *m = PushResponse{} } +func (*PushResponse) ProtoMessage() {} +func (*PushResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{1} +} +func (m *PushResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PushResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PushResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PushResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PushResponse.Merge(m, src) +} +func (m *PushResponse) XXX_Size() int { + return m.Size() +} +func (m *PushResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PushResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PushResponse proto.InternalMessageInfo + +type QueryRequest struct { + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end"` + Direction Direction `protobuf:"varint,5,opt,name=direction,proto3,enum=logproto.Direction" json:"direction,omitempty"` + Shards []string `protobuf:"bytes,7,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (m *QueryRequest) Reset() { *m = QueryRequest{} } +func (*QueryRequest) ProtoMessage() {} +func (*QueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{2} +} +func (m *QueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRequest.Merge(m, src) +} +func (m *QueryRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRequest proto.InternalMessageInfo + +func (m *QueryRequest) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *QueryRequest) GetLimit() uint32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *QueryRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *QueryRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +func (m *QueryRequest) GetDirection() Direction { + if m != nil { + return m.Direction + } + return FORWARD +} + +func (m *QueryRequest) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + +type SampleQueryRequest struct { + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` + Shards []string `protobuf:"bytes,4,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (m *SampleQueryRequest) Reset() { *m = SampleQueryRequest{} } +func (*SampleQueryRequest) ProtoMessage() {} +func (*SampleQueryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{3} +} +func (m *SampleQueryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleQueryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleQueryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleQueryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleQueryRequest.Merge(m, src) +} +func (m *SampleQueryRequest) XXX_Size() int { + return m.Size() +} +func (m *SampleQueryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SampleQueryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleQueryRequest proto.InternalMessageInfo + +func (m *SampleQueryRequest) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *SampleQueryRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *SampleQueryRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +func (m *SampleQueryRequest) GetShards() []string { + if m != nil { + return m.Shards + } + return nil +} + +type SampleQueryResponse struct { + Series []Series `protobuf:"bytes,1,rep,name=series,proto3,customtype=Series" json:"series,omitempty"` +} + +func (m *SampleQueryResponse) Reset() { *m = SampleQueryResponse{} } +func (*SampleQueryResponse) ProtoMessage() {} +func (*SampleQueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{4} +} +func (m *SampleQueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SampleQueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SampleQueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SampleQueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SampleQueryResponse.Merge(m, src) +} +func (m *SampleQueryResponse) XXX_Size() int { + return m.Size() +} +func (m *SampleQueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SampleQueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SampleQueryResponse proto.InternalMessageInfo + +type QueryResponse struct { + Streams []Stream `protobuf:"bytes,1,rep,name=streams,proto3,customtype=Stream" json:"streams,omitempty"` +} + +func (m *QueryResponse) Reset() { *m = QueryResponse{} } +func (*QueryResponse) ProtoMessage() {} +func (*QueryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{5} +} +func (m *QueryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResponse.Merge(m, src) +} +func (m *QueryResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResponse proto.InternalMessageInfo + +type LabelRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Values bool `protobuf:"varint,2,opt,name=values,proto3" json:"values,omitempty"` + Start *time.Time `protobuf:"bytes,3,opt,name=start,proto3,stdtime" json:"start,omitempty"` + End *time.Time `protobuf:"bytes,4,opt,name=end,proto3,stdtime" json:"end,omitempty"` +} + +func (m *LabelRequest) Reset() { *m = LabelRequest{} } +func (*LabelRequest) ProtoMessage() {} +func (*LabelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{6} +} +func (m *LabelRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelRequest.Merge(m, src) +} +func (m *LabelRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelRequest proto.InternalMessageInfo + +func (m *LabelRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelRequest) GetValues() bool { + if m != nil { + return m.Values + } + return false +} + +func (m *LabelRequest) GetStart() *time.Time { + if m != nil { + return m.Start + } + return nil +} + +func (m *LabelRequest) GetEnd() *time.Time { + if m != nil { + return m.End + } + return nil +} + +type LabelResponse struct { + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (m *LabelResponse) Reset() { *m = LabelResponse{} } +func (*LabelResponse) ProtoMessage() {} +func (*LabelResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{7} +} +func (m *LabelResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelResponse.Merge(m, src) +} +func (m *LabelResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelResponse proto.InternalMessageInfo + +func (m *LabelResponse) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +type StreamAdapter struct { + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"` + Entries []EntryAdapter `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries"` +} + +func (m *StreamAdapter) Reset() { *m = StreamAdapter{} } +func (*StreamAdapter) ProtoMessage() {} +func (*StreamAdapter) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{8} +} +func (m *StreamAdapter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamAdapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamAdapter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamAdapter) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamAdapter.Merge(m, src) +} +func (m *StreamAdapter) XXX_Size() int { + return m.Size() +} +func (m *StreamAdapter) XXX_DiscardUnknown() { + xxx_messageInfo_StreamAdapter.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamAdapter proto.InternalMessageInfo + +func (m *StreamAdapter) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +func (m *StreamAdapter) GetEntries() []EntryAdapter { + if m != nil { + return m.Entries + } + return nil +} + +type EntryAdapter struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` + Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` +} + +func (m *EntryAdapter) Reset() { *m = EntryAdapter{} } +func (*EntryAdapter) ProtoMessage() {} +func (*EntryAdapter) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{9} +} +func (m *EntryAdapter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EntryAdapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EntryAdapter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EntryAdapter) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntryAdapter.Merge(m, src) +} +func (m *EntryAdapter) XXX_Size() int { + return m.Size() +} +func (m *EntryAdapter) XXX_DiscardUnknown() { + xxx_messageInfo_EntryAdapter.DiscardUnknown(m) +} + +var xxx_messageInfo_EntryAdapter proto.InternalMessageInfo + +func (m *EntryAdapter) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *EntryAdapter) GetLine() string { + if m != nil { + return m.Line + } + return "" +} + +type Sample struct { + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"ts"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value"` + Hash uint64 `protobuf:"varint,3,opt,name=hash,proto3" json:"hash"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{10} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetHash() uint64 { + if m != nil { + return m.Hash + } + return 0 +} + +type Series struct { + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +} + +func (m *Series) Reset() { *m = Series{} } +func (*Series) ProtoMessage() {} +func (*Series) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{11} +} +func (m *Series) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Series.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Series) XXX_Merge(src proto.Message) { + xxx_messageInfo_Series.Merge(m, src) +} +func (m *Series) XXX_Size() int { + return m.Size() +} +func (m *Series) XXX_DiscardUnknown() { + xxx_messageInfo_Series.DiscardUnknown(m) +} + +var xxx_messageInfo_Series proto.InternalMessageInfo + +func (m *Series) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +func (m *Series) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +type TailRequest struct { + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + DelayFor uint32 `protobuf:"varint,3,opt,name=delayFor,proto3" json:"delayFor,omitempty"` + Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + Start time.Time `protobuf:"bytes,5,opt,name=start,proto3,stdtime" json:"start"` +} + +func (m *TailRequest) Reset() { *m = TailRequest{} } +func (*TailRequest) ProtoMessage() {} +func (*TailRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{12} +} +func (m *TailRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TailRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TailRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TailRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TailRequest.Merge(m, src) +} +func (m *TailRequest) XXX_Size() int { + return m.Size() +} +func (m *TailRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TailRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TailRequest proto.InternalMessageInfo + +func (m *TailRequest) GetQuery() string { + if m != nil { + return m.Query + } + return "" +} + +func (m *TailRequest) GetDelayFor() uint32 { + if m != nil { + return m.DelayFor + } + return 0 +} + +func (m *TailRequest) GetLimit() uint32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *TailRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +type TailResponse struct { + Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3,customtype=Stream" json:"stream,omitempty"` + DroppedStreams []*DroppedStream `protobuf:"bytes,2,rep,name=droppedStreams,proto3" json:"droppedStreams,omitempty"` +} + +func (m *TailResponse) Reset() { *m = TailResponse{} } +func (*TailResponse) ProtoMessage() {} +func (*TailResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{13} +} +func (m *TailResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TailResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TailResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TailResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TailResponse.Merge(m, src) +} +func (m *TailResponse) XXX_Size() int { + return m.Size() +} +func (m *TailResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TailResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TailResponse proto.InternalMessageInfo + +func (m *TailResponse) GetDroppedStreams() []*DroppedStream { + if m != nil { + return m.DroppedStreams + } + return nil +} + +type SeriesRequest struct { + Start time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end"` + Groups []string `protobuf:"bytes,3,rep,name=groups,proto3" json:"groups,omitempty"` +} + +func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } +func (*SeriesRequest) ProtoMessage() {} +func (*SeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{14} +} +func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesRequest.Merge(m, src) +} +func (m *SeriesRequest) XXX_Size() int { + return m.Size() +} +func (m *SeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesRequest proto.InternalMessageInfo + +func (m *SeriesRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *SeriesRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +func (m *SeriesRequest) GetGroups() []string { + if m != nil { + return m.Groups + } + return nil +} + +type SeriesResponse struct { + Series []SeriesIdentifier `protobuf:"bytes,1,rep,name=series,proto3" json:"series"` +} + +func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } +func (*SeriesResponse) ProtoMessage() {} +func (*SeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{15} +} +func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesResponse.Merge(m, src) +} +func (m *SeriesResponse) XXX_Size() int { + return m.Size() +} +func (m *SeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesResponse proto.InternalMessageInfo + +func (m *SeriesResponse) GetSeries() []SeriesIdentifier { + if m != nil { + return m.Series + } + return nil +} + +type SeriesIdentifier struct { + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SeriesIdentifier) Reset() { *m = SeriesIdentifier{} } +func (*SeriesIdentifier) ProtoMessage() {} +func (*SeriesIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{16} +} +func (m *SeriesIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesIdentifier.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SeriesIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesIdentifier.Merge(m, src) +} +func (m *SeriesIdentifier) XXX_Size() int { + return m.Size() +} +func (m *SeriesIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesIdentifier proto.InternalMessageInfo + +func (m *SeriesIdentifier) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +type DroppedStream struct { + From time.Time `protobuf:"bytes,1,opt,name=from,proto3,stdtime" json:"from"` + To time.Time `protobuf:"bytes,2,opt,name=to,proto3,stdtime" json:"to"` + Labels string `protobuf:"bytes,3,opt,name=labels,proto3" json:"labels,omitempty"` +} + +func (m *DroppedStream) Reset() { *m = DroppedStream{} } +func (*DroppedStream) ProtoMessage() {} +func (*DroppedStream) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{17} +} +func (m *DroppedStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DroppedStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DroppedStream.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DroppedStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_DroppedStream.Merge(m, src) +} +func (m *DroppedStream) XXX_Size() int { + return m.Size() +} +func (m *DroppedStream) XXX_DiscardUnknown() { + xxx_messageInfo_DroppedStream.DiscardUnknown(m) +} + +var xxx_messageInfo_DroppedStream proto.InternalMessageInfo + +func (m *DroppedStream) GetFrom() time.Time { + if m != nil { + return m.From + } + return time.Time{} +} + +func (m *DroppedStream) GetTo() time.Time { + if m != nil { + return m.To + } + return time.Time{} +} + +func (m *DroppedStream) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +type TimeSeriesChunk struct { + FromIngesterId string `protobuf:"bytes,1,opt,name=from_ingester_id,json=fromIngesterId,proto3" json:"from_ingester_id,omitempty"` + UserId string `protobuf:"bytes,2,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Labels []*LabelPair `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"` + Chunks []*Chunk `protobuf:"bytes,4,rep,name=chunks,proto3" json:"chunks,omitempty"` +} + +func (m *TimeSeriesChunk) Reset() { *m = TimeSeriesChunk{} } +func (*TimeSeriesChunk) ProtoMessage() {} +func (*TimeSeriesChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{18} +} +func (m *TimeSeriesChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesChunk.Merge(m, src) +} +func (m *TimeSeriesChunk) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesChunk) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesChunk proto.InternalMessageInfo + +func (m *TimeSeriesChunk) GetFromIngesterId() string { + if m != nil { + return m.FromIngesterId + } + return "" +} + +func (m *TimeSeriesChunk) GetUserId() string { + if m != nil { + return m.UserId + } + return "" +} + +func (m *TimeSeriesChunk) GetLabels() []*LabelPair { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TimeSeriesChunk) GetChunks() []*Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{19} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Chunk struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{20} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type TransferChunksResponse struct { +} + +func (m *TransferChunksResponse) Reset() { *m = TransferChunksResponse{} } +func (*TransferChunksResponse) ProtoMessage() {} +func (*TransferChunksResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{21} +} +func (m *TransferChunksResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TransferChunksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TransferChunksResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TransferChunksResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferChunksResponse.Merge(m, src) +} +func (m *TransferChunksResponse) XXX_Size() int { + return m.Size() +} +func (m *TransferChunksResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TransferChunksResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferChunksResponse proto.InternalMessageInfo + +type TailersCountRequest struct { +} + +func (m *TailersCountRequest) Reset() { *m = TailersCountRequest{} } +func (*TailersCountRequest) ProtoMessage() {} +func (*TailersCountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{22} +} +func (m *TailersCountRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TailersCountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TailersCountRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TailersCountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TailersCountRequest.Merge(m, src) +} +func (m *TailersCountRequest) XXX_Size() int { + return m.Size() +} +func (m *TailersCountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TailersCountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TailersCountRequest proto.InternalMessageInfo + +type TailersCountResponse struct { + Count uint32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (m *TailersCountResponse) Reset() { *m = TailersCountResponse{} } +func (*TailersCountResponse) ProtoMessage() {} +func (*TailersCountResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{23} +} +func (m *TailersCountResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TailersCountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TailersCountResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TailersCountResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TailersCountResponse.Merge(m, src) +} +func (m *TailersCountResponse) XXX_Size() int { + return m.Size() +} +func (m *TailersCountResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TailersCountResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TailersCountResponse proto.InternalMessageInfo + +func (m *TailersCountResponse) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +type GetChunkIDsRequest struct { + Matchers string `protobuf:"bytes,1,opt,name=matchers,proto3" json:"matchers,omitempty"` + Start time.Time `protobuf:"bytes,2,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,3,opt,name=end,proto3,stdtime" json:"end"` +} + +func (m *GetChunkIDsRequest) Reset() { *m = GetChunkIDsRequest{} } +func (*GetChunkIDsRequest) ProtoMessage() {} +func (*GetChunkIDsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{24} +} +func (m *GetChunkIDsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetChunkIDsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetChunkIDsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetChunkIDsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetChunkIDsRequest.Merge(m, src) +} +func (m *GetChunkIDsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetChunkIDsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetChunkIDsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetChunkIDsRequest proto.InternalMessageInfo + +func (m *GetChunkIDsRequest) GetMatchers() string { + if m != nil { + return m.Matchers + } + return "" +} + +func (m *GetChunkIDsRequest) GetStart() time.Time { + if m != nil { + return m.Start + } + return time.Time{} +} + +func (m *GetChunkIDsRequest) GetEnd() time.Time { + if m != nil { + return m.End + } + return time.Time{} +} + +type GetChunkIDsResponse struct { + ChunkIDs []string `protobuf:"bytes,1,rep,name=chunkIDs,proto3" json:"chunkIDs,omitempty"` +} + +func (m *GetChunkIDsResponse) Reset() { *m = GetChunkIDsResponse{} } +func (*GetChunkIDsResponse) ProtoMessage() {} +func (*GetChunkIDsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c28a5f14f1f4c79a, []int{25} +} +func (m *GetChunkIDsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetChunkIDsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetChunkIDsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetChunkIDsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetChunkIDsResponse.Merge(m, src) +} +func (m *GetChunkIDsResponse) XXX_Size() int { + return m.Size() +} +func (m *GetChunkIDsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetChunkIDsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetChunkIDsResponse proto.InternalMessageInfo + +func (m *GetChunkIDsResponse) GetChunkIDs() []string { + if m != nil { + return m.ChunkIDs + } + return nil +} + +func init() { + proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value) + proto.RegisterType((*PushRequest)(nil), "logproto.PushRequest") + proto.RegisterType((*PushResponse)(nil), "logproto.PushResponse") + proto.RegisterType((*QueryRequest)(nil), "logproto.QueryRequest") + proto.RegisterType((*SampleQueryRequest)(nil), "logproto.SampleQueryRequest") + proto.RegisterType((*SampleQueryResponse)(nil), "logproto.SampleQueryResponse") + proto.RegisterType((*QueryResponse)(nil), "logproto.QueryResponse") + proto.RegisterType((*LabelRequest)(nil), "logproto.LabelRequest") + proto.RegisterType((*LabelResponse)(nil), "logproto.LabelResponse") + proto.RegisterType((*StreamAdapter)(nil), "logproto.StreamAdapter") + proto.RegisterType((*EntryAdapter)(nil), "logproto.EntryAdapter") + proto.RegisterType((*Sample)(nil), "logproto.Sample") + proto.RegisterType((*Series)(nil), "logproto.Series") + proto.RegisterType((*TailRequest)(nil), "logproto.TailRequest") + proto.RegisterType((*TailResponse)(nil), "logproto.TailResponse") + proto.RegisterType((*SeriesRequest)(nil), "logproto.SeriesRequest") + proto.RegisterType((*SeriesResponse)(nil), "logproto.SeriesResponse") + proto.RegisterType((*SeriesIdentifier)(nil), "logproto.SeriesIdentifier") + proto.RegisterMapType((map[string]string)(nil), "logproto.SeriesIdentifier.LabelsEntry") + proto.RegisterType((*DroppedStream)(nil), "logproto.DroppedStream") + proto.RegisterType((*TimeSeriesChunk)(nil), "logproto.TimeSeriesChunk") + proto.RegisterType((*LabelPair)(nil), "logproto.LabelPair") + proto.RegisterType((*Chunk)(nil), "logproto.Chunk") + proto.RegisterType((*TransferChunksResponse)(nil), "logproto.TransferChunksResponse") + proto.RegisterType((*TailersCountRequest)(nil), "logproto.TailersCountRequest") + proto.RegisterType((*TailersCountResponse)(nil), "logproto.TailersCountResponse") + proto.RegisterType((*GetChunkIDsRequest)(nil), "logproto.GetChunkIDsRequest") + proto.RegisterType((*GetChunkIDsResponse)(nil), "logproto.GetChunkIDsResponse") +} + +func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } + +var fileDescriptor_c28a5f14f1f4c79a = []byte{ + // 1366 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4b, 0x8f, 0x13, 0xc7, + 0x13, 0x77, 0xfb, 0x31, 0xb6, 0xcb, 0x0f, 0xac, 0xde, 0x65, 0xd7, 0x7f, 0x03, 0x63, 0xab, 0x85, + 0xc0, 0xfa, 0x87, 0x78, 0x83, 0xf3, 0xe2, 0x91, 0x87, 0xd6, 0x6c, 0x08, 0x4b, 0x50, 0x80, 0x01, + 0x09, 0x09, 0x29, 0x42, 0xb3, 0x76, 0xaf, 0x3d, 0x5a, 0x7b, 0xc6, 0xcc, 0xb4, 0x91, 0xf6, 0x96, + 0x0f, 0x90, 0x48, 0xdc, 0x72, 0xe0, 0x9a, 0x43, 0x94, 0x43, 0x3e, 0x07, 0x47, 0x94, 0x13, 0xca, + 0xc1, 0x09, 0xe6, 0x12, 0xad, 0x72, 0xe0, 0x23, 0x44, 0xfd, 0x98, 0x99, 0xb6, 0xd9, 0x4d, 0x30, + 0x97, 0x5c, 0x3c, 0x5d, 0xd5, 0xd5, 0xd5, 0x55, 0xbf, 0xfe, 0x55, 0x75, 0x1b, 0x4e, 0x8c, 0xf7, + 0xfa, 0x1b, 0x43, 0xaf, 0x3f, 0xf6, 0x3d, 0xe6, 0x45, 0x83, 0x96, 0xf8, 0xc5, 0xb9, 0x50, 0xae, + 0xd5, 0xfb, 0x9e, 0xd7, 0x1f, 0xd2, 0x0d, 0x21, 0xed, 0x4c, 0x76, 0x37, 0x98, 0x33, 0xa2, 0x01, + 0xb3, 0x47, 0x63, 0x69, 0x5a, 0x7b, 0xb7, 0xef, 0xb0, 0xc1, 0x64, 0xa7, 0xd5, 0xf5, 0x46, 0x1b, + 0x7d, 0xaf, 0xef, 0xc5, 0x96, 0x5c, 0x92, 0xde, 0xf9, 0x48, 0x9a, 0x93, 0x7b, 0x50, 0xb8, 0x35, + 0x09, 0x06, 0x16, 0x7d, 0x38, 0xa1, 0x01, 0xc3, 0xd7, 0x20, 0x1b, 0x30, 0x9f, 0xda, 0xa3, 0xa0, + 0x8a, 0x1a, 0xa9, 0x66, 0xa1, 0xbd, 0xde, 0x8a, 0x42, 0xb9, 0x23, 0x26, 0x36, 0x7b, 0xf6, 0x98, + 0x51, 0xbf, 0x73, 0xfc, 0xb7, 0x69, 0xdd, 0x90, 0xaa, 0x83, 0x69, 0x3d, 0x5c, 0x65, 0x85, 0x03, + 0x52, 0x86, 0xa2, 0x74, 0x1c, 0x8c, 0x3d, 0x37, 0xa0, 0xe4, 0x49, 0x12, 0x8a, 0xb7, 0x27, 0xd4, + 0xdf, 0x0f, 0xb7, 0xaa, 0x41, 0x2e, 0xa0, 0x43, 0xda, 0x65, 0x9e, 0x5f, 0x45, 0x0d, 0xd4, 0xcc, + 0x5b, 0x91, 0x8c, 0x57, 0x21, 0x33, 0x74, 0x46, 0x0e, 0xab, 0x26, 0x1b, 0xa8, 0x59, 0xb2, 0xa4, + 0x80, 0x2f, 0x41, 0x26, 0x60, 0xb6, 0xcf, 0xaa, 0xa9, 0x06, 0x6a, 0x16, 0xda, 0xb5, 0x96, 0xc4, + 0xa2, 0x15, 0x66, 0xd8, 0xba, 0x1b, 0x62, 0xd1, 0xc9, 0x3d, 0x9d, 0xd6, 0x13, 0x8f, 0x7f, 0xaf, + 0x23, 0x4b, 0x2e, 0xc1, 0x1f, 0x41, 0x8a, 0xba, 0xbd, 0x6a, 0x7a, 0x89, 0x95, 0x7c, 0x01, 0x3e, + 0x0f, 0xf9, 0x9e, 0xe3, 0xd3, 0x2e, 0x73, 0x3c, 0xb7, 0x9a, 0x69, 0xa0, 0x66, 0xb9, 0xbd, 0x12, + 0x43, 0xb2, 0x15, 0x4e, 0x59, 0xb1, 0x15, 0x3e, 0x07, 0x46, 0x30, 0xb0, 0xfd, 0x5e, 0x50, 0xcd, + 0x36, 0x52, 0xcd, 0x7c, 0x67, 0xf5, 0x60, 0x5a, 0xaf, 0x48, 0xcd, 0x39, 0x6f, 0xe4, 0x30, 0x3a, + 0x1a, 0xb3, 0x7d, 0x4b, 0xd9, 0x5c, 0x4f, 0xe7, 0x8c, 0x4a, 0x96, 0xfc, 0x8a, 0x00, 0xdf, 0xb1, + 0x47, 0xe3, 0x21, 0x7d, 0x63, 0x8c, 0x22, 0x34, 0x92, 0x6f, 0x8d, 0x46, 0x6a, 0x59, 0x34, 0xe2, + 0xd4, 0xd2, 0xff, 0x9e, 0x1a, 0xb9, 0x09, 0x2b, 0x73, 0x39, 0x49, 0x26, 0xe0, 0x0b, 0x60, 0x04, + 0xd4, 0x77, 0x68, 0x48, 0xb1, 0x8a, 0x46, 0x31, 0xa1, 0xef, 0x94, 0x9f, 0x4e, 0xeb, 0x48, 0xf0, + 0x4b, 0xc8, 0x96, 0xb2, 0x27, 0x16, 0x94, 0xe6, 0x5d, 0x6d, 0xbe, 0x31, 0x5d, 0x63, 0x97, 0x42, + 0x1d, 0xf3, 0xf4, 0x17, 0x04, 0xc5, 0x1b, 0xf6, 0x0e, 0x1d, 0x86, 0x98, 0x63, 0x48, 0xbb, 0xf6, + 0x88, 0x2a, 0xbc, 0xc5, 0x18, 0xaf, 0x81, 0xf1, 0xc8, 0x1e, 0x4e, 0x68, 0x20, 0xc0, 0xce, 0x59, + 0x4a, 0x5a, 0x96, 0x91, 0xe8, 0xad, 0x19, 0x89, 0xa2, 0x33, 0x20, 0x67, 0xa1, 0xa4, 0xe2, 0x55, + 0x20, 0xc4, 0xc1, 0x71, 0x0c, 0xf2, 0x61, 0x70, 0xe4, 0x11, 0x94, 0xe6, 0x30, 0xc0, 0x04, 0x8c, + 0x21, 0x5f, 0x19, 0xc8, 0xdc, 0x3a, 0x70, 0x30, 0xad, 0x2b, 0x8d, 0xa5, 0xbe, 0x1c, 0x51, 0xea, + 0x32, 0x71, 0x3a, 0x49, 0x81, 0xe8, 0x5a, 0x8c, 0xe8, 0x17, 0x2e, 0xf3, 0xf7, 0x43, 0x40, 0x8f, + 0x71, 0x66, 0xf0, 0xca, 0x57, 0xe6, 0x56, 0x38, 0x20, 0x8f, 0xa0, 0xa8, 0x5b, 0xe2, 0x6b, 0x90, + 0x8f, 0x9a, 0x94, 0xd8, 0xf9, 0x9f, 0xd3, 0x2d, 0x2b, 0xc7, 0x49, 0x16, 0x88, 0xa4, 0xe3, 0xc5, + 0xf8, 0x24, 0xa4, 0x87, 0x8e, 0x4b, 0xc5, 0x21, 0xe4, 0x3b, 0xb9, 0x83, 0x69, 0x5d, 0xc8, 0x96, + 0xf8, 0x25, 0x23, 0x30, 0x24, 0xdd, 0xf0, 0xe9, 0xc5, 0x1d, 0x53, 0x1d, 0x43, 0x7a, 0xd4, 0xbd, + 0xd5, 0x21, 0x23, 0x90, 0x12, 0xee, 0x50, 0x27, 0x7f, 0x30, 0xad, 0x4b, 0x85, 0x25, 0x3f, 0x7c, + 0xbb, 0x81, 0x1d, 0x0c, 0xc4, 0xe1, 0xa6, 0xe5, 0x76, 0x5c, 0xb6, 0xc4, 0x2f, 0x71, 0x40, 0xd1, + 0xf3, 0x8d, 0x70, 0xbd, 0x0c, 0xd9, 0x40, 0x04, 0x17, 0xe2, 0xaa, 0xb3, 0x5e, 0x4c, 0xc4, 0x88, + 0x2a, 0x43, 0x2b, 0x1c, 0x90, 0x1f, 0x10, 0x14, 0xee, 0xda, 0x4e, 0x44, 0xd1, 0x55, 0xc8, 0x3c, + 0xe4, 0x75, 0xa0, 0x38, 0x2a, 0x05, 0xde, 0x2c, 0x7a, 0x74, 0x68, 0xef, 0x5f, 0xf5, 0x7c, 0x11, + 0x72, 0xc9, 0x8a, 0xe4, 0xb8, 0xa1, 0xa6, 0x0f, 0x6d, 0xa8, 0x99, 0xa5, 0x5b, 0xc8, 0xf5, 0x74, + 0x2e, 0x59, 0x49, 0x91, 0xef, 0x10, 0x14, 0x65, 0x64, 0x8a, 0x8c, 0x97, 0xc1, 0x90, 0x95, 0xa5, + 0x4e, 0xfa, 0xc8, 0x82, 0x04, 0xad, 0x18, 0xd5, 0x12, 0xfc, 0x39, 0x94, 0x7b, 0xbe, 0x37, 0x1e, + 0xd3, 0xde, 0x1d, 0x55, 0xd5, 0xc9, 0xc5, 0xaa, 0xde, 0xd2, 0xe7, 0xad, 0x05, 0x73, 0xf2, 0x04, + 0x41, 0x49, 0xf5, 0x0c, 0x05, 0x55, 0x94, 0x22, 0x7a, 0xeb, 0x2e, 0x99, 0x5c, 0xb6, 0x4b, 0xae, + 0x81, 0xd1, 0xf7, 0xbd, 0xc9, 0x38, 0xa8, 0xa6, 0x64, 0x41, 0x4a, 0x89, 0x5c, 0x87, 0x72, 0x18, + 0xdc, 0x11, 0xad, 0xb0, 0xb6, 0xd8, 0x0a, 0xb7, 0x7b, 0xd4, 0x65, 0xce, 0xae, 0x43, 0xfd, 0x4e, + 0x9a, 0x6f, 0x12, 0xb5, 0xc2, 0xef, 0x11, 0x54, 0x16, 0x4d, 0xf0, 0x67, 0x1a, 0x11, 0xb9, 0xbb, + 0x33, 0x47, 0xbb, 0x6b, 0x89, 0x1e, 0x12, 0x88, 0x42, 0x0d, 0x49, 0x5a, 0xbb, 0x08, 0x05, 0x4d, + 0x8d, 0x2b, 0x90, 0xda, 0xa3, 0x21, 0xc9, 0xf8, 0x90, 0xd3, 0x28, 0x2e, 0x99, 0xbc, 0xaa, 0x93, + 0x4b, 0xc9, 0x0b, 0x88, 0x53, 0xb4, 0x34, 0x77, 0x36, 0xf8, 0x02, 0xa4, 0x77, 0x7d, 0x6f, 0xb4, + 0x14, 0xf0, 0x62, 0x05, 0xfe, 0x00, 0x92, 0xcc, 0x5b, 0x0a, 0xf6, 0x24, 0xf3, 0x38, 0xea, 0x2a, + 0xf9, 0x94, 0x08, 0x4e, 0x49, 0xe4, 0x67, 0x04, 0xc7, 0xf8, 0x1a, 0x89, 0xc0, 0x95, 0xc1, 0xc4, + 0xdd, 0xc3, 0x4d, 0xa8, 0xf0, 0x9d, 0x1e, 0x38, 0x6e, 0x9f, 0x06, 0x8c, 0xfa, 0x0f, 0x9c, 0x9e, + 0x4a, 0xb3, 0xcc, 0xf5, 0xdb, 0x4a, 0xbd, 0xdd, 0xc3, 0xeb, 0x90, 0x9d, 0x04, 0xd2, 0x40, 0xe6, + 0x6c, 0x70, 0x71, 0xbb, 0x87, 0xdf, 0xd1, 0xb6, 0xe3, 0x58, 0x6b, 0xaf, 0x02, 0x81, 0xe1, 0x2d, + 0xdb, 0xf1, 0xa3, 0xea, 0x3f, 0x0b, 0x46, 0x97, 0x6f, 0x2c, 0xef, 0xcd, 0x42, 0xfb, 0x58, 0x6c, + 0x2c, 0x02, 0xb2, 0xd4, 0x34, 0xf9, 0x10, 0xf2, 0xd1, 0xea, 0x43, 0x6f, 0xa2, 0x43, 0x4f, 0x80, + 0x9c, 0x80, 0x8c, 0x4c, 0x0c, 0x43, 0xba, 0x67, 0x33, 0x5b, 0x2c, 0x29, 0x5a, 0x62, 0x4c, 0xaa, + 0xb0, 0x76, 0xd7, 0xb7, 0xdd, 0x60, 0x97, 0xfa, 0xc2, 0x28, 0xa2, 0x1f, 0x39, 0x0e, 0x2b, 0xbc, + 0x78, 0xa9, 0x1f, 0x5c, 0xf1, 0x26, 0x2e, 0x53, 0x35, 0x43, 0xce, 0xc1, 0xea, 0xbc, 0x5a, 0xb1, + 0x75, 0x15, 0x32, 0x5d, 0xae, 0x10, 0xde, 0x4b, 0x96, 0x14, 0xc8, 0x8f, 0x08, 0xf0, 0x97, 0x94, + 0x09, 0xd7, 0xdb, 0x5b, 0x81, 0xf6, 0x74, 0x19, 0xd9, 0xac, 0x3b, 0xa0, 0x7e, 0x10, 0x3e, 0x5d, + 0x42, 0xf9, 0xbf, 0x78, 0xba, 0x90, 0xf3, 0xb0, 0x32, 0x17, 0xa5, 0xca, 0xa9, 0x06, 0xb9, 0xae, + 0xd2, 0xa9, 0xeb, 0x33, 0x92, 0xff, 0x7f, 0x06, 0xf2, 0xd1, 0x03, 0x0f, 0x17, 0x20, 0x7b, 0xf5, + 0xa6, 0x75, 0x6f, 0xd3, 0xda, 0xaa, 0x24, 0x70, 0x11, 0x72, 0x9d, 0xcd, 0x2b, 0x5f, 0x09, 0x09, + 0xb5, 0x37, 0xc1, 0xe0, 0x4f, 0x5d, 0xea, 0xe3, 0x8f, 0x21, 0xcd, 0x47, 0xf8, 0x78, 0x7c, 0xbe, + 0xda, 0xeb, 0xba, 0xb6, 0xb6, 0xa8, 0x56, 0xe7, 0x90, 0x68, 0xff, 0x95, 0x82, 0x2c, 0x7f, 0xda, + 0xf0, 0x2a, 0xfe, 0x04, 0x32, 0xe2, 0x95, 0x83, 0x35, 0x73, 0xfd, 0x55, 0x58, 0x5b, 0x7f, 0x4d, + 0x1f, 0xfa, 0x79, 0x0f, 0xe1, 0xaf, 0xa1, 0x20, 0x94, 0xea, 0x2a, 0x3c, 0xb9, 0x78, 0xcd, 0xcc, + 0x79, 0x3a, 0x75, 0xc4, 0xac, 0xe6, 0xef, 0x12, 0x64, 0x04, 0x23, 0xf5, 0x68, 0xf4, 0xf7, 0x92, + 0x1e, 0xcd, 0xdc, 0xbb, 0x84, 0x24, 0xf0, 0x45, 0x48, 0x73, 0x22, 0xe9, 0x70, 0x68, 0xd7, 0x98, + 0x0e, 0x87, 0x7e, 0x87, 0x88, 0x6d, 0x3f, 0x8d, 0x6e, 0xd7, 0xf5, 0xc5, 0x26, 0x16, 0x2e, 0xaf, + 0xbe, 0x3e, 0x11, 0xed, 0x7c, 0x53, 0x5e, 0x4b, 0x21, 0x85, 0xf1, 0xa9, 0xf9, 0xad, 0x16, 0x18, + 0x5f, 0x33, 0x8f, 0x9a, 0x8e, 0x1c, 0xde, 0x80, 0x82, 0x46, 0x1f, 0x1d, 0xd6, 0xd7, 0xb9, 0xaf, + 0xc3, 0x7a, 0x08, 0xe7, 0x48, 0xa2, 0xfd, 0x0d, 0xe4, 0xc2, 0x1e, 0x83, 0x6f, 0x43, 0x79, 0xbe, + 0x3c, 0xf1, 0xff, 0xb4, 0x68, 0xe6, 0x1b, 0x57, 0xad, 0xa1, 0x4d, 0x1d, 0x5e, 0xd3, 0x89, 0x26, + 0xea, 0xdc, 0x7f, 0xf6, 0xc2, 0x4c, 0x3c, 0x7f, 0x61, 0x26, 0x5e, 0xbd, 0x30, 0xd1, 0xb7, 0x33, + 0x13, 0xfd, 0x34, 0x33, 0xd1, 0xd3, 0x99, 0x89, 0x9e, 0xcd, 0x4c, 0xf4, 0xc7, 0xcc, 0x44, 0x7f, + 0xce, 0xcc, 0xc4, 0xab, 0x99, 0x89, 0x1e, 0xbf, 0x34, 0x13, 0xcf, 0x5e, 0x9a, 0x89, 0xe7, 0x2f, + 0xcd, 0xc4, 0xfd, 0xd3, 0xfa, 0x3f, 0x47, 0xdf, 0xde, 0xb5, 0x5d, 0x7b, 0x63, 0xe8, 0xed, 0x39, + 0x1b, 0xfa, 0x3f, 0xd3, 0x1d, 0x43, 0x7c, 0xde, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x21, + 0xc8, 0x3d, 0xb0, 0x0e, 0x00, 0x00, +} + +func (x Direction) String() string { + s, ok := Direction_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *PushRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PushRequest) + if !ok { + that2, ok := that.(PushRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Streams) != len(that1.Streams) { + return false + } + for i := range this.Streams { + if !this.Streams[i].Equal(that1.Streams[i]) { + return false + } + } + return true +} +func (this *PushResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PushResponse) + if !ok { + that2, ok := that.(PushResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *QueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryRequest) + if !ok { + that2, ok := that.(QueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Selector != that1.Selector { + return false + } + if this.Limit != that1.Limit { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + if this.Direction != that1.Direction { + return false + } + if len(this.Shards) != len(that1.Shards) { + return false + } + for i := range this.Shards { + if this.Shards[i] != that1.Shards[i] { + return false + } + } + return true +} +func (this *SampleQueryRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleQueryRequest) + if !ok { + that2, ok := that.(SampleQueryRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Selector != that1.Selector { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + if len(this.Shards) != len(that1.Shards) { + return false + } + for i := range this.Shards { + if this.Shards[i] != that1.Shards[i] { + return false + } + } + return true +} +func (this *SampleQueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SampleQueryResponse) + if !ok { + that2, ok := that.(SampleQueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Series) != len(that1.Series) { + return false + } + for i := range this.Series { + if !this.Series[i].Equal(that1.Series[i]) { + return false + } + } + return true +} +func (this *QueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QueryResponse) + if !ok { + that2, ok := that.(QueryResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Streams) != len(that1.Streams) { + return false + } + for i := range this.Streams { + if !this.Streams[i].Equal(that1.Streams[i]) { + return false + } + } + return true +} +func (this *LabelRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelRequest) + if !ok { + that2, ok := that.(LabelRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Values != that1.Values { + return false + } + if that1.Start == nil { + if this.Start != nil { + return false + } + } else if !this.Start.Equal(*that1.Start) { + return false + } + if that1.End == nil { + if this.End != nil { + return false + } + } else if !this.End.Equal(*that1.End) { + return false + } + return true +} +func (this *LabelResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelResponse) + if !ok { + that2, ok := that.(LabelResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if this.Values[i] != that1.Values[i] { + return false + } + } + return true +} +func (this *StreamAdapter) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamAdapter) + if !ok { + that2, ok := that.(StreamAdapter) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Labels != that1.Labels { + return false + } + if len(this.Entries) != len(that1.Entries) { + return false + } + for i := range this.Entries { + if !this.Entries[i].Equal(&that1.Entries[i]) { + return false + } + } + return true +} +func (this *EntryAdapter) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EntryAdapter) + if !ok { + that2, ok := that.(EntryAdapter) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Timestamp.Equal(that1.Timestamp) { + return false + } + if this.Line != that1.Line { + return false + } + return true +} +func (this *Sample) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Sample) + if !ok { + that2, ok := that.(Sample) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + if this.Value != that1.Value { + return false + } + if this.Hash != that1.Hash { + return false + } + return true +} +func (this *Series) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Series) + if !ok { + that2, ok := that.(Series) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Labels != that1.Labels { + return false + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } + } + return true +} +func (this *TailRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TailRequest) + if !ok { + that2, ok := that.(TailRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Query != that1.Query { + return false + } + if this.DelayFor != that1.DelayFor { + return false + } + if this.Limit != that1.Limit { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + return true +} +func (this *TailResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TailResponse) + if !ok { + that2, ok := that.(TailResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Stream == nil { + if this.Stream != nil { + return false + } + } else if !this.Stream.Equal(*that1.Stream) { + return false + } + if len(this.DroppedStreams) != len(that1.DroppedStreams) { + return false + } + for i := range this.DroppedStreams { + if !this.DroppedStreams[i].Equal(that1.DroppedStreams[i]) { + return false + } + } + return true +} +func (this *SeriesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesRequest) + if !ok { + that2, ok := that.(SeriesRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + if len(this.Groups) != len(that1.Groups) { + return false + } + for i := range this.Groups { + if this.Groups[i] != that1.Groups[i] { + return false + } + } + return true +} +func (this *SeriesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse) + if !ok { + that2, ok := that.(SeriesResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Series) != len(that1.Series) { + return false + } + for i := range this.Series { + if !this.Series[i].Equal(&that1.Series[i]) { + return false + } + } + return true +} +func (this *SeriesIdentifier) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesIdentifier) + if !ok { + that2, ok := that.(SeriesIdentifier) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if this.Labels[i] != that1.Labels[i] { + return false + } + } + return true +} +func (this *DroppedStream) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DroppedStream) + if !ok { + that2, ok := that.(DroppedStream) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.From.Equal(that1.From) { + return false + } + if !this.To.Equal(that1.To) { + return false + } + if this.Labels != that1.Labels { + return false + } + return true +} +func (this *TimeSeriesChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesChunk) + if !ok { + that2, ok := that.(TimeSeriesChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FromIngesterId != that1.FromIngesterId { + return false + } + if this.UserId != that1.UserId { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(that1.Chunks[i]) { + return false + } + } + return true +} +func (this *LabelPair) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LabelPair) + if !ok { + that2, ok := that.(LabelPair) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Chunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Chunk) + if !ok { + that2, ok := that.(Chunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *TransferChunksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TransferChunksResponse) + if !ok { + that2, ok := that.(TransferChunksResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *TailersCountRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TailersCountRequest) + if !ok { + that2, ok := that.(TailersCountRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *TailersCountResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TailersCountResponse) + if !ok { + that2, ok := that.(TailersCountResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Count != that1.Count { + return false + } + return true +} +func (this *GetChunkIDsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetChunkIDsRequest) + if !ok { + that2, ok := that.(GetChunkIDsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Matchers != that1.Matchers { + return false + } + if !this.Start.Equal(that1.Start) { + return false + } + if !this.End.Equal(that1.End) { + return false + } + return true +} +func (this *GetChunkIDsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetChunkIDsResponse) + if !ok { + that2, ok := that.(GetChunkIDsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.ChunkIDs) != len(that1.ChunkIDs) { + return false + } + for i := range this.ChunkIDs { + if this.ChunkIDs[i] != that1.ChunkIDs[i] { + return false + } + } + return true +} +func (this *PushRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.PushRequest{") + s = append(s, "Streams: "+fmt.Sprintf("%#v", this.Streams)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PushResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&logproto.PushResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&logproto.QueryRequest{") + s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "Direction: "+fmt.Sprintf("%#v", this.Direction)+",\n") + s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleQueryRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.SampleQueryRequest{") + s = append(s, "Selector: "+fmt.Sprintf("%#v", this.Selector)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "Shards: "+fmt.Sprintf("%#v", this.Shards)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SampleQueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.SampleQueryResponse{") + s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QueryResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.QueryResponse{") + s = append(s, "Streams: "+fmt.Sprintf("%#v", this.Streams)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.LabelRequest{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.LabelResponse{") + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamAdapter) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.StreamAdapter{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Entries != nil { + vs := make([]*EntryAdapter, len(this.Entries)) + for i := range vs { + vs[i] = &this.Entries[i] + } + s = append(s, "Entries: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EntryAdapter) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.EntryAdapter{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Line: "+fmt.Sprintf("%#v", this.Line)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Sample) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.Sample{") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "Hash: "+fmt.Sprintf("%#v", this.Hash)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Series) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.Series{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TailRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.TailRequest{") + s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") + s = append(s, "DelayFor: "+fmt.Sprintf("%#v", this.DelayFor)+",\n") + s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TailResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.TailResponse{") + s = append(s, "Stream: "+fmt.Sprintf("%#v", this.Stream)+",\n") + if this.DroppedStreams != nil { + s = append(s, "DroppedStreams: "+fmt.Sprintf("%#v", this.DroppedStreams)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SeriesRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.SeriesRequest{") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "Groups: "+fmt.Sprintf("%#v", this.Groups)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SeriesResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.SeriesResponse{") + if this.Series != nil { + vs := make([]*SeriesIdentifier, len(this.Series)) + for i := range vs { + vs[i] = &this.Series[i] + } + s = append(s, "Series: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SeriesIdentifier) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.SeriesIdentifier{") + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%#v: %#v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + if this.Labels != nil { + s = append(s, "Labels: "+mapStringForLabels+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DroppedStream) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.DroppedStream{") + s = append(s, "From: "+fmt.Sprintf("%#v", this.From)+",\n") + s = append(s, "To: "+fmt.Sprintf("%#v", this.To)+",\n") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesChunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&logproto.TimeSeriesChunk{") + s = append(s, "FromIngesterId: "+fmt.Sprintf("%#v", this.FromIngesterId)+",\n") + s = append(s, "UserId: "+fmt.Sprintf("%#v", this.UserId)+",\n") + if this.Labels != nil { + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + } + if this.Chunks != nil { + s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LabelPair) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&logproto.LabelPair{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Chunk) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.Chunk{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TransferChunksResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&logproto.TransferChunksResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TailersCountRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&logproto.TailersCountRequest{") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TailersCountResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.TailersCountResponse{") + s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetChunkIDsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&logproto.GetChunkIDsRequest{") + s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n") + s = append(s, "Start: "+fmt.Sprintf("%#v", this.Start)+",\n") + s = append(s, "End: "+fmt.Sprintf("%#v", this.End)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetChunkIDsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&logproto.GetChunkIDsResponse{") + s = append(s, "ChunkIDs: "+fmt.Sprintf("%#v", this.ChunkIDs)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringLogproto(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PusherClient is the client API for Pusher service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PusherClient interface { + Push(ctx context.Context, in *PushRequest, opts ...grpc.CallOption) (*PushResponse, error) +} + +type pusherClient struct { + cc *grpc.ClientConn +} + +func NewPusherClient(cc *grpc.ClientConn) PusherClient { + return &pusherClient{cc} +} + +func (c *pusherClient) Push(ctx context.Context, in *PushRequest, opts ...grpc.CallOption) (*PushResponse, error) { + out := new(PushResponse) + err := c.cc.Invoke(ctx, "/logproto.Pusher/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PusherServer is the server API for Pusher service. +type PusherServer interface { + Push(context.Context, *PushRequest) (*PushResponse, error) +} + +func RegisterPusherServer(s *grpc.Server, srv PusherServer) { + s.RegisterService(&_Pusher_serviceDesc, srv) +} + +func _Pusher_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PushRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PusherServer).Push(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Pusher/Push", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PusherServer).Push(ctx, req.(*PushRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Pusher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "logproto.Pusher", + HandlerType: (*PusherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _Pusher_Push_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/logproto/logproto.proto", +} + +// QuerierClient is the client API for Querier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QuerierClient interface { + Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Querier_QueryClient, error) + QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error) + Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error) + Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error) + Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (*SeriesResponse, error) + TailersCount(ctx context.Context, in *TailersCountRequest, opts ...grpc.CallOption) (*TailersCountResponse, error) + GetChunkIDs(ctx context.Context, in *GetChunkIDsRequest, opts ...grpc.CallOption) (*GetChunkIDsResponse, error) +} + +type querierClient struct { + cc *grpc.ClientConn +} + +func NewQuerierClient(cc *grpc.ClientConn) QuerierClient { + return &querierClient{cc} +} + +func (c *querierClient) Query(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Querier_QueryClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[0], "/logproto.Querier/Query", opts...) + if err != nil { + return nil, err + } + x := &querierQueryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_QueryClient interface { + Recv() (*QueryResponse, error) + grpc.ClientStream +} + +type querierQueryClient struct { + grpc.ClientStream +} + +func (x *querierQueryClient) Recv() (*QueryResponse, error) { + m := new(QueryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *querierClient) QuerySample(ctx context.Context, in *SampleQueryRequest, opts ...grpc.CallOption) (Querier_QuerySampleClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[1], "/logproto.Querier/QuerySample", opts...) + if err != nil { + return nil, err + } + x := &querierQuerySampleClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_QuerySampleClient interface { + Recv() (*SampleQueryResponse, error) + grpc.ClientStream +} + +type querierQuerySampleClient struct { + grpc.ClientStream +} + +func (x *querierQuerySampleClient) Recv() (*SampleQueryResponse, error) { + m := new(SampleQueryResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *querierClient) Label(ctx context.Context, in *LabelRequest, opts ...grpc.CallOption) (*LabelResponse, error) { + out := new(LabelResponse) + err := c.cc.Invoke(ctx, "/logproto.Querier/Label", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *querierClient) Tail(ctx context.Context, in *TailRequest, opts ...grpc.CallOption) (Querier_TailClient, error) { + stream, err := c.cc.NewStream(ctx, &_Querier_serviceDesc.Streams[2], "/logproto.Querier/Tail", opts...) + if err != nil { + return nil, err + } + x := &querierTailClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Querier_TailClient interface { + Recv() (*TailResponse, error) + grpc.ClientStream +} + +type querierTailClient struct { + grpc.ClientStream +} + +func (x *querierTailClient) Recv() (*TailResponse, error) { + m := new(TailResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *querierClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (*SeriesResponse, error) { + out := new(SeriesResponse) + err := c.cc.Invoke(ctx, "/logproto.Querier/Series", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *querierClient) TailersCount(ctx context.Context, in *TailersCountRequest, opts ...grpc.CallOption) (*TailersCountResponse, error) { + out := new(TailersCountResponse) + err := c.cc.Invoke(ctx, "/logproto.Querier/TailersCount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *querierClient) GetChunkIDs(ctx context.Context, in *GetChunkIDsRequest, opts ...grpc.CallOption) (*GetChunkIDsResponse, error) { + out := new(GetChunkIDsResponse) + err := c.cc.Invoke(ctx, "/logproto.Querier/GetChunkIDs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QuerierServer is the server API for Querier service. +type QuerierServer interface { + Query(*QueryRequest, Querier_QueryServer) error + QuerySample(*SampleQueryRequest, Querier_QuerySampleServer) error + Label(context.Context, *LabelRequest) (*LabelResponse, error) + Tail(*TailRequest, Querier_TailServer) error + Series(context.Context, *SeriesRequest) (*SeriesResponse, error) + TailersCount(context.Context, *TailersCountRequest) (*TailersCountResponse, error) + GetChunkIDs(context.Context, *GetChunkIDsRequest) (*GetChunkIDsResponse, error) +} + +func RegisterQuerierServer(s *grpc.Server, srv QuerierServer) { + s.RegisterService(&_Querier_serviceDesc, srv) +} + +func _Querier_Query_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(QueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).Query(m, &querierQueryServer{stream}) +} + +type Querier_QueryServer interface { + Send(*QueryResponse) error + grpc.ServerStream +} + +type querierQueryServer struct { + grpc.ServerStream +} + +func (x *querierQueryServer) Send(m *QueryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Querier_QuerySample_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SampleQueryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).QuerySample(m, &querierQuerySampleServer{stream}) +} + +type Querier_QuerySampleServer interface { + Send(*SampleQueryResponse) error + grpc.ServerStream +} + +type querierQuerySampleServer struct { + grpc.ServerStream +} + +func (x *querierQuerySampleServer) Send(m *SampleQueryResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Querier_Label_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LabelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).Label(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Querier/Label", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).Label(ctx, req.(*LabelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Querier_Tail_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TailRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QuerierServer).Tail(m, &querierTailServer{stream}) +} + +type Querier_TailServer interface { + Send(*TailResponse) error + grpc.ServerStream +} + +type querierTailServer struct { + grpc.ServerStream +} + +func (x *querierTailServer) Send(m *TailResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Querier_Series_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).Series(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Querier/Series", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).Series(ctx, req.(*SeriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Querier_TailersCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TailersCountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).TailersCount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Querier/TailersCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).TailersCount(ctx, req.(*TailersCountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Querier_GetChunkIDs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetChunkIDsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QuerierServer).GetChunkIDs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logproto.Querier/GetChunkIDs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QuerierServer).GetChunkIDs(ctx, req.(*GetChunkIDsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Querier_serviceDesc = grpc.ServiceDesc{ + ServiceName: "logproto.Querier", + HandlerType: (*QuerierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Label", + Handler: _Querier_Label_Handler, + }, + { + MethodName: "Series", + Handler: _Querier_Series_Handler, + }, + { + MethodName: "TailersCount", + Handler: _Querier_TailersCount_Handler, + }, + { + MethodName: "GetChunkIDs", + Handler: _Querier_GetChunkIDs_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Query", + Handler: _Querier_Query_Handler, + ServerStreams: true, + }, + { + StreamName: "QuerySample", + Handler: _Querier_QuerySample_Handler, + ServerStreams: true, + }, + { + StreamName: "Tail", + Handler: _Querier_Tail_Handler, + ServerStreams: true, + }, + }, + Metadata: "pkg/logproto/logproto.proto", +} + +// IngesterClient is the client API for Ingester service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IngesterClient interface { + TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) +} + +type ingesterClient struct { + cc *grpc.ClientConn +} + +func NewIngesterClient(cc *grpc.ClientConn) IngesterClient { + return &ingesterClient{cc} +} + +func (c *ingesterClient) TransferChunks(ctx context.Context, opts ...grpc.CallOption) (Ingester_TransferChunksClient, error) { + stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/logproto.Ingester/TransferChunks", opts...) + if err != nil { + return nil, err + } + x := &ingesterTransferChunksClient{stream} + return x, nil +} + +type Ingester_TransferChunksClient interface { + Send(*TimeSeriesChunk) error + CloseAndRecv() (*TransferChunksResponse, error) + grpc.ClientStream +} + +type ingesterTransferChunksClient struct { + grpc.ClientStream +} + +func (x *ingesterTransferChunksClient) Send(m *TimeSeriesChunk) error { + return x.ClientStream.SendMsg(m) +} + +func (x *ingesterTransferChunksClient) CloseAndRecv() (*TransferChunksResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(TransferChunksResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// IngesterServer is the server API for Ingester service. +type IngesterServer interface { + TransferChunks(Ingester_TransferChunksServer) error +} + +func RegisterIngesterServer(s *grpc.Server, srv IngesterServer) { + s.RegisterService(&_Ingester_serviceDesc, srv) +} + +func _Ingester_TransferChunks_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(IngesterServer).TransferChunks(&ingesterTransferChunksServer{stream}) +} + +type Ingester_TransferChunksServer interface { + SendAndClose(*TransferChunksResponse) error + Recv() (*TimeSeriesChunk, error) + grpc.ServerStream +} + +type ingesterTransferChunksServer struct { + grpc.ServerStream +} + +func (x *ingesterTransferChunksServer) SendAndClose(m *TransferChunksResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *ingesterTransferChunksServer) Recv() (*TimeSeriesChunk, error) { + m := new(TimeSeriesChunk) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Ingester_serviceDesc = grpc.ServiceDesc{ + ServiceName: "logproto.Ingester", + HandlerType: (*IngesterServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "TransferChunks", + Handler: _Ingester_TransferChunks_Handler, + ClientStreams: true, + }, + }, + Metadata: "pkg/logproto/logproto.proto", +} + +func (m *PushRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, msg := range m.Streams { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PushResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PushResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *QueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Selector) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + if m.Limit != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Direction != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Direction)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SampleQueryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleQueryRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Selector) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Shards) > 0 { + for _, s := range m.Shards { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SampleQueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SampleQueryResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Series) > 0 { + for _, msg := range m.Series { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *QueryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, msg := range m.Streams { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Values { + dAtA[i] = 0x10 + i++ + if m.Values { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Start != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start))) + n5, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.End != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(*m.End))) + n6, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *LabelResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *StreamAdapter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamAdapter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EntryAdapter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EntryAdapter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp))) + n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Line) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + return i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timestamp != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Timestamp)) + } + if m.Value != 0 { + dAtA[i] = 0x11 + i++ + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 + } + if m.Hash != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Hash)) + } + return i, nil +} + +func (m *Series) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Series) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } + if len(m.Samples) > 0 { + for _, msg := range m.Samples { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TailRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Query) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Query))) + i += copy(dAtA[i:], m.Query) + } + if m.DelayFor != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.DelayFor)) + } + if m.Limit != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Limit)) + } + dAtA[i] = 0x2a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *TailResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Stream != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Stream.Size())) + n9, err := m.Stream.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if len(m.DroppedStreams) > 0 { + for _, msg := range m.DroppedStreams { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SeriesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *SeriesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Series) > 0 { + for _, msg := range m.Series { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SeriesIdentifier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SeriesIdentifier) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0xa + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v))) + i = encodeVarintLogproto(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *DroppedStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DroppedStream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.From))) + n12, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.From, dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.To))) + n13, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.To, dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Labels) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } + return i, nil +} + +func (m *TimeSeriesChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesChunk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FromIngesterId) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.FromIngesterId))) + i += copy(dAtA[i:], m.FromIngesterId) + } + if len(m.UserId) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.UserId))) + i += copy(dAtA[i:], m.UserId) + } + if len(m.Labels) > 0 { + for _, msg := range m.Labels { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Chunks) > 0 { + for _, msg := range m.Chunks { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *TransferChunksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TransferChunksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TailersCountRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailersCountRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TailersCountResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TailersCountResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Count != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(m.Count)) + } + return i, nil +} + +func (m *GetChunkIDsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetChunkIDsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Matchers) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Matchers))) + i += copy(dAtA[i:], m.Matchers) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Start))) + n14, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintLogproto(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.End))) + n15, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *GetChunkIDsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetChunkIDsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ChunkIDs) > 0 { + for _, s := range m.ChunkIDs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintLogproto(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PushRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *PushResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovLogproto(uint64(m.Limit)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) + if m.Direction != 0 { + n += 1 + sovLogproto(uint64(m.Direction)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SampleQueryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SampleQueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Series) > 0 { + for _, e := range m.Series { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *QueryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *LabelRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if m.Values { + n += 2 + } + if m.Start != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start) + n += 1 + l + sovLogproto(uint64(l)) + } + if m.End != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.End) + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *LabelResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *StreamAdapter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *EntryAdapter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovLogproto(uint64(l)) + l = len(m.Line) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovLogproto(uint64(m.Timestamp)) + } + if m.Value != 0 { + n += 9 + } + if m.Hash != 0 { + n += 1 + sovLogproto(uint64(m.Hash)) + } + return n +} + +func (m *Series) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *TailRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if m.DelayFor != 0 { + n += 1 + sovLogproto(uint64(m.DelayFor)) + } + if m.Limit != 0 { + n += 1 + sovLogproto(uint64(m.Limit)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + return n +} + +func (m *TailResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stream != nil { + l = m.Stream.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.DroppedStreams) > 0 { + for _, e := range m.DroppedStreams { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SeriesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SeriesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Series) > 0 { + for _, e := range m.Series { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *SeriesIdentifier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovLogproto(uint64(len(k))) + 1 + len(v) + sovLogproto(uint64(len(v))) + n += mapEntrySize + 1 + sovLogproto(uint64(mapEntrySize)) + } + } + return n +} + +func (m *DroppedStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.From) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.To) + n += 1 + l + sovLogproto(uint64(l)) + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *TimeSeriesChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromIngesterId) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + l = len(m.UserId) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *TransferChunksResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TailersCountRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TailersCountResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 1 + sovLogproto(uint64(m.Count)) + } + return n +} + +func (m *GetChunkIDsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Matchers) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) + return n +} + +func (m *GetChunkIDsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkIDs) > 0 { + for _, s := range m.ChunkIDs { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func sovLogproto(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLogproto(x uint64) (n int) { + return sovLogproto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PushRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PushRequest{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `}`, + }, "") + return s +} +func (this *PushResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PushResponse{`, + `}`, + }, "") + return s +} +func (this *QueryRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryRequest{`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Direction:` + fmt.Sprintf("%v", this.Direction) + `,`, + `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, + `}`, + }, "") + return s +} +func (this *SampleQueryRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SampleQueryRequest{`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, + `}`, + }, "") + return s +} +func (this *SampleQueryResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SampleQueryResponse{`, + `Series:` + fmt.Sprintf("%v", this.Series) + `,`, + `}`, + }, "") + return s +} +func (this *QueryResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QueryResponse{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `}`, + }, "") + return s +} +func (this *LabelRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1) + `,`, + `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelResponse{`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *StreamAdapter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamAdapter{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Entries:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Entries), "EntryAdapter", "EntryAdapter", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EntryAdapter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EntryAdapter{`, + `Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Line:` + fmt.Sprintf("%v", this.Line) + `,`, + `}`, + }, "") + return s +} +func (this *Sample) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sample{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Hash:` + fmt.Sprintf("%v", this.Hash) + `,`, + `}`, + }, "") + return s +} +func (this *Series) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Series{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `Samples:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Samples), "Sample", "Sample", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TailRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TailRequest{`, + `Query:` + fmt.Sprintf("%v", this.Query) + `,`, + `DelayFor:` + fmt.Sprintf("%v", this.DelayFor) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TailResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TailResponse{`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `DroppedStreams:` + strings.Replace(fmt.Sprintf("%v", this.DroppedStreams), "DroppedStream", "DroppedStream", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesRequest{`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesResponse{`, + `Series:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Series), "SeriesIdentifier", "SeriesIdentifier", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesIdentifier) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&SeriesIdentifier{`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *DroppedStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DroppedStream{`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesChunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TimeSeriesChunk{`, + `FromIngesterId:` + fmt.Sprintf("%v", this.FromIngesterId) + `,`, + `UserId:` + fmt.Sprintf("%v", this.UserId) + `,`, + `Labels:` + strings.Replace(fmt.Sprintf("%v", this.Labels), "LabelPair", "LabelPair", 1) + `,`, + `Chunks:` + strings.Replace(fmt.Sprintf("%v", this.Chunks), "Chunk", "Chunk", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelPair) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelPair{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Chunk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Chunk{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *TransferChunksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TransferChunksResponse{`, + `}`, + }, "") + return s +} +func (this *TailersCountRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TailersCountRequest{`, + `}`, + }, "") + return s +} +func (this *TailersCountResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TailersCountResponse{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `}`, + }, "") + return s +} +func (this *GetChunkIDsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetChunkIDsRequest{`, + `Matchers:` + fmt.Sprintf("%v", this.Matchers) + `,`, + `Start:` + strings.Replace(strings.Replace(this.Start.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(this.End.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetChunkIDsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetChunkIDsResponse{`, + `ChunkIDs:` + fmt.Sprintf("%v", this.ChunkIDs) + `,`, + `}`, + }, "") + return s +} +func valueToStringLogproto(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PushRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, Stream{}) + if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PushResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PushResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PushResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Direction", wireType) + } + m.Direction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Direction |= Direction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleQueryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleQueryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SampleQueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SampleQueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SampleQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Series = append(m.Series, Series{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, Stream{}) + if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Start == nil { + m.Start = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.End == nil { + m.End = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamAdapter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamAdapter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamAdapter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, EntryAdapter{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EntryAdapter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EntryAdapter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EntryAdapter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + m.Hash = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hash |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Series) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Series: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Series: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TailRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TailRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TailRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelayFor", wireType) + } + m.DelayFor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelayFor |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TailResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TailResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TailResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stream == nil { + m.Stream = &Stream{} + } + if err := m.Stream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedStreams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DroppedStreams = append(m.DroppedStreams, &DroppedStream{}) + if err := m.DroppedStreams[len(m.DroppedStreams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Series = append(m.Series, SeriesIdentifier{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SeriesIdentifier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesIdentifier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthLogproto + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthLogproto + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthLogproto + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthLogproto + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DroppedStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DroppedStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DroppedStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.From, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.To, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIngesterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromIngesterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, &LabelPair{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, &Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TransferChunksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TransferChunksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TransferChunksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TailersCountRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TailersCountRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TailersCountRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TailersCountResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TailersCountResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TailersCountResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetChunkIDsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetChunkIDsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetChunkIDsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetChunkIDsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetChunkIDsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetChunkIDsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkIDs = append(m.ChunkIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogproto(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogproto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogproto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogproto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLogproto + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthLogproto + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogproto + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLogproto(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthLogproto + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLogproto = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogproto = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto new file mode 100644 index 000000000..deac4f94c --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/logproto.proto @@ -0,0 +1,166 @@ +syntax = "proto3"; + +package logproto; + +option go_package = "github.com/netobserv/loki-client-go/pkg/logproto"; + +import "google/protobuf/timestamp.proto"; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +service Pusher { + rpc Push(PushRequest) returns (PushResponse) {}; +} + +service Querier { + rpc Query(QueryRequest) returns (stream QueryResponse) {}; + rpc QuerySample(SampleQueryRequest) returns (stream SampleQueryResponse) {}; + rpc Label(LabelRequest) returns (LabelResponse) {}; + rpc Tail(TailRequest) returns (stream TailResponse) {}; + rpc Series(SeriesRequest) returns (SeriesResponse) {}; + rpc TailersCount(TailersCountRequest) returns (TailersCountResponse) {}; + rpc GetChunkIDs(GetChunkIDsRequest) returns (GetChunkIDsResponse) {}; // GetChunkIDs returns ChunkIDs from the index store holding logs for given selectors and time-range. +} + +service Ingester { + rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; +} + +message PushRequest { + repeated StreamAdapter streams = 1 [(gogoproto.jsontag) = "streams", (gogoproto.customtype) = "Stream"]; +} + +message PushResponse { +} + +message QueryRequest { + string selector = 1; + uint32 limit = 2; + google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + Direction direction = 5; + reserved 6; + repeated string shards = 7 [(gogoproto.jsontag) = "shards,omitempty"]; +} + +message SampleQueryRequest { + string selector = 1; + google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + repeated string shards = 4 [(gogoproto.jsontag) = "shards,omitempty"]; +} + +message SampleQueryResponse { + repeated Series series = 1 [(gogoproto.customtype) = "Series", (gogoproto.nullable) = true]; +} + +enum Direction { + FORWARD = 0; + BACKWARD = 1; +} + +message QueryResponse { + repeated StreamAdapter streams = 1 [(gogoproto.customtype) = "Stream", (gogoproto.nullable) = true]; +} + +message LabelRequest { + string name = 1; + bool values = 2; // True to fetch label values, false for fetch labels names. + google.protobuf.Timestamp start = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp end = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; +} + +message LabelResponse { + repeated string values = 1; +} + +message StreamAdapter { + string labels = 1 [(gogoproto.jsontag) = "labels"]; + repeated EntryAdapter entries = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "entries"]; +} + +message EntryAdapter { + google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false, (gogoproto.jsontag) = "ts"]; + string line = 2 [(gogoproto.jsontag) = "line"]; +} + +message Sample { + int64 timestamp = 1 [(gogoproto.jsontag) = "ts"]; + double value = 2 [(gogoproto.jsontag) = "value"]; + uint64 hash = 3 [(gogoproto.jsontag) = "hash"]; +} + +message Series { + string labels = 1 [(gogoproto.jsontag) = "labels"]; + repeated Sample samples = 2 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "samples"]; +} + +message TailRequest { + string query = 1; + reserved 2; + uint32 delayFor = 3; + uint32 limit = 4; + google.protobuf.Timestamp start = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message TailResponse { + StreamAdapter stream = 1 [(gogoproto.customtype) = "Stream"]; + repeated DroppedStream droppedStreams = 2; +} + +message SeriesRequest { + google.protobuf.Timestamp start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp end = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + repeated string groups = 3; +} + +message SeriesResponse { + repeated SeriesIdentifier series = 1 [(gogoproto.nullable) = false]; +} + +message SeriesIdentifier { + map labels = 1; +} + +message DroppedStream { + google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string labels = 3; +} + +message TimeSeriesChunk { + string from_ingester_id = 1; + string user_id = 2; + repeated LabelPair labels = 3; + repeated Chunk chunks = 4; +} + +message LabelPair { + string name = 1; + string value = 2; +} + +message Chunk { + bytes data = 1; +} + +message TransferChunksResponse { + +} + +message TailersCountRequest { + +} + +message TailersCountResponse { + uint32 count = 1; +} + +message GetChunkIDsRequest { + string matchers = 1; + google.protobuf.Timestamp start = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp end = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message GetChunkIDsResponse { + repeated string chunkIDs = 1; +} \ No newline at end of file diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go new file mode 100644 index 000000000..f2fb323f9 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/timestamp.go @@ -0,0 +1,106 @@ +package logproto + +import ( + "errors" + strconv "strconv" + time "time" + + "github.com/gogo/protobuf/types" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *types.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return errors.New("timestamp: " + formatTimestamp(ts) + " before 0001-01-01") + } + if ts.Seconds >= maxValidSeconds { + return errors.New("timestamp: " + formatTimestamp(ts) + " after 10000-01-01") + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return errors.New("timestamp: " + formatTimestamp(ts) + ": nanos not in range [0, 1e9)") + } + return nil +} + +// formatTimestamp is equivalent to fmt.Sprintf("%#v", ts) +// but avoids the escape incurred by using fmt.Sprintf, eliminating +// unnecessary heap allocations. +func formatTimestamp(ts *types.Timestamp) string { + if ts == nil { + return "nil" + } + + seconds := strconv.FormatInt(ts.Seconds, 10) + nanos := strconv.FormatInt(int64(ts.Nanos), 10) + return "&types.Timestamp{Seconds: " + seconds + ",\nNanos: " + nanos + ",\n}" +} + +func SizeOfStdTime(t time.Time) int { + ts, err := timestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := timestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &types.Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := timestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} + +func timestampFromProto(ts *types.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +func timestampProto(t time.Time) (types.Timestamp, error) { + ts := types.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + return ts, validateTimestamp(&ts) +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go new file mode 100644 index 000000000..f2a2ac064 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/logproto/types.go @@ -0,0 +1,475 @@ +package logproto + +import ( + "fmt" + "io" + "time" + + jsoniter "github.com/json-iterator/go" + "github.com/prometheus/prometheus/promql/parser" +) + +// Stream contains a unique labels set as a string and a set of entries for it. +// We are not using the proto generated version but this custom one so that we +// can improve serialization see benchmark. +type Stream struct { + Labels string `protobuf:"bytes,1,opt,name=labels,proto3" json:"labels"` + Entries []Entry `protobuf:"bytes,2,rep,name=entries,proto3,customtype=EntryAdapter" json:"entries"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (r *PushRequest) MarshalJSON() ([]byte, error) { + stream := jsoniter.ConfigDefault.BorrowStream(nil) + defer jsoniter.ConfigDefault.ReturnStream(stream) + + stream.WriteObjectStart() + stream.WriteObjectField("streams") + stream.WriteArrayStart() + for i, s := range r.Streams { + stream.WriteObjectStart() + stream.WriteObjectField("stream") + stream.WriteObjectStart() + lbs, err := parser.ParseMetric(s.Labels) + if err != nil { + continue + } + for i, lb := range lbs { + stream.WriteObjectField(lb.Name) + stream.WriteStringWithHTMLEscaped(lb.Value) + if i != len(lbs)-1 { + stream.WriteMore() + } + } + stream.WriteObjectEnd() + stream.WriteMore() + stream.WriteObjectField("values") + stream.WriteArrayStart() + for i, entry := range s.Entries { + stream.WriteArrayStart() + stream.WriteRaw(fmt.Sprintf(`"%d"`, entry.Timestamp.UnixNano())) + stream.WriteMore() + stream.WriteStringWithHTMLEscaped(entry.Line) + stream.WriteArrayEnd() + if i != len(s.Entries)-1 { + stream.WriteMore() + } + } + stream.WriteArrayEnd() + stream.WriteObjectEnd() + if i != len(r.Streams)-1 { + stream.WriteMore() + } + } + stream.WriteArrayEnd() + stream.WriteObjectEnd() + + return stream.Buffer(), nil +} + +// Entry is a log entry with a timestamp. +type Entry struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"ts"` + Line string `protobuf:"bytes,2,opt,name=line,proto3" json:"line"` +} + +func (m *Stream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stream) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Labels))) + i += copy(dAtA[i:], m.Labels) + } + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogproto(dAtA, i, uint64(SizeOfStdTime(m.Timestamp))) + n5, err := StdTimeMarshalTo(m.Timestamp, dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Line) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + return i, nil +} + +func (m *Stream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogproto(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthLogproto + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func (m *Stream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovLogproto(uint64(l)) + } + } + return n +} + +func (m *Entry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = SizeOfStdTime(m.Timestamp) + n += 1 + l + sovLogproto(uint64(l)) + l = len(m.Line) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } + return n +} + +func (m *Stream) Equal(that interface{}) bool { + if that == nil { + return m == nil + } + + that1, ok := that.(*Stream) + if !ok { + that2, ok := that.(Stream) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return m == nil + } else if m == nil { + return false + } + if m.Labels != that1.Labels { + return false + } + if len(m.Entries) != len(that1.Entries) { + return false + } + for i := range m.Entries { + if !m.Entries[i].Equal(that1.Entries[i]) { + return false + } + } + return true +} + +func (m *Entry) Equal(that interface{}) bool { + if that == nil { + return m == nil + } + + that1, ok := that.(*Entry) + if !ok { + that2, ok := that.(Entry) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return m == nil + } else if m == nil { + return false + } + if !m.Timestamp.Equal(that1.Timestamp) { + return false + } + if m.Line != that1.Line { + return false + } + return true +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go new file mode 100644 index 000000000..bfab117d4 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/counters.go @@ -0,0 +1,117 @@ +package metric + +import ( + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +const ( + CounterInc = "inc" + CounterAdd = "add" + + ErrCounterActionRequired = "counter action must be defined as either `inc` or `add`" + ErrCounterInvalidAction = "action %s is not valid, action must be either `inc` or `add`" + ErrCounterInvalidMatchAll = "`match_all: true` cannot be combined with `value`, please remove `match_all` or `value`" + ErrCounterInvalidCountBytes = "`count_entry_bytes: true` can only be set with `match_all: true`" + ErrCounterInvalidCountBytesAction = "`count_entry_bytes: true` can only be used with `action: add`" +) + +type CounterConfig struct { + MatchAll *bool `mapstructure:"match_all"` + CountBytes *bool `mapstructure:"count_entry_bytes"` + Value *string `mapstructure:"value"` + Action string `mapstructure:"action"` +} + +func validateCounterConfig(config *CounterConfig) error { + if config.Action == "" { + return errors.New(ErrCounterActionRequired) + } + config.Action = strings.ToLower(config.Action) + if config.Action != CounterInc && config.Action != CounterAdd { + return errors.Errorf(ErrCounterInvalidAction, config.Action) + } + if config.MatchAll != nil && *config.MatchAll && config.Value != nil { + return errors.Errorf(ErrCounterInvalidMatchAll) + } + if config.CountBytes != nil && *config.CountBytes && (config.MatchAll == nil || !*config.MatchAll) { + return errors.New(ErrCounterInvalidCountBytes) + } + if config.CountBytes != nil && *config.CountBytes && config.Action != CounterAdd { + return errors.New(ErrCounterInvalidCountBytesAction) + } + return nil +} + +func parseCounterConfig(config interface{}) (*CounterConfig, error) { + cfg := &CounterConfig{} + err := mapstructure.Decode(config, cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// Counters is a vec tor of counters for a each log stream. +type Counters struct { + *metricVec + Cfg *CounterConfig +} + +// NewCounters creates a new counter vec. +func NewCounters(name, help string, config interface{}, maxIdleSec int64) (*Counters, error) { + cfg, err := parseCounterConfig(config) + if err != nil { + return nil, err + } + err = validateCounterConfig(cfg) + if err != nil { + return nil, err + } + return &Counters{ + metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric { + return &expiringCounter{prometheus.NewCounter(prometheus.CounterOpts{ + Help: help, + Name: name, + ConstLabels: labels, + }), + 0, + } + }, maxIdleSec), + Cfg: cfg, + }, nil +} + +// With returns the counter associated with a stream labelset. +func (c *Counters) With(labels model.LabelSet) prometheus.Counter { + return c.metricVec.With(labels).(prometheus.Counter) +} + +type expiringCounter struct { + prometheus.Counter + lastModSec int64 +} + +// Inc increments the counter by 1. Use Add to increment it by arbitrary +// non-negative values. +func (e *expiringCounter) Inc() { + e.Counter.Inc() + e.lastModSec = time.Now().Unix() +} + +// Add adds the given value to the counter. It panics if the value is < +// 0. +func (e *expiringCounter) Add(val float64) { + e.Counter.Add(val) + e.lastModSec = time.Now().Unix() +} + +// HasExpired implements Expirable +func (e *expiringCounter) HasExpired(currentTimeSec int64, maxAgeSec int64) bool { + return currentTimeSec-e.lastModSec >= maxAgeSec +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go new file mode 100644 index 000000000..2b5b8b84b --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/gauges.go @@ -0,0 +1,136 @@ +package metric + +import ( + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +const ( + GaugeSet = "set" + GaugeInc = "inc" + GaugeDec = "dec" + GaugeAdd = "add" + GaugeSub = "sub" + + ErrGaugeActionRequired = "gauge action must be defined as `set`, `inc`, `dec`, `add`, or `sub`" + ErrGaugeInvalidAction = "action %s is not valid, action must be `set`, `inc`, `dec`, `add`, or `sub`" +) + +type GaugeConfig struct { + Value *string `mapstructure:"value"` + Action string `mapstructure:"action"` +} + +func validateGaugeConfig(config *GaugeConfig) error { + if config.Action == "" { + return errors.New(ErrGaugeActionRequired) + } + config.Action = strings.ToLower(config.Action) + if config.Action != GaugeSet && + config.Action != GaugeInc && + config.Action != GaugeDec && + config.Action != GaugeAdd && + config.Action != GaugeSub { + return errors.Errorf(ErrGaugeInvalidAction, config.Action) + } + return nil +} + +func parseGaugeConfig(config interface{}) (*GaugeConfig, error) { + cfg := &GaugeConfig{} + err := mapstructure.Decode(config, cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// Gauges is a vector of gauges for a each log stream. +type Gauges struct { + *metricVec + Cfg *GaugeConfig +} + +// NewGauges creates a new gauge vec. +func NewGauges(name, help string, config interface{}, maxIdleSec int64) (*Gauges, error) { + cfg, err := parseGaugeConfig(config) + if err != nil { + return nil, err + } + err = validateGaugeConfig(cfg) + if err != nil { + return nil, err + } + return &Gauges{ + metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric { + return &expiringGauge{prometheus.NewGauge(prometheus.GaugeOpts{ + Help: help, + Name: name, + ConstLabels: labels, + }), + 0, + } + }, maxIdleSec), + Cfg: cfg, + }, nil +} + +// With returns the gauge associated with a stream labelset. +func (g *Gauges) With(labels model.LabelSet) prometheus.Gauge { + return g.metricVec.With(labels).(prometheus.Gauge) +} + +type expiringGauge struct { + prometheus.Gauge + lastModSec int64 +} + +// Set sets the Gauge to an arbitrary value. +func (g *expiringGauge) Set(val float64) { + g.Gauge.Set(val) + g.lastModSec = time.Now().Unix() +} + +// Inc increments the Gauge by 1. Use Add to increment it by arbitrary +// values. +func (g *expiringGauge) Inc() { + g.Gauge.Inc() + g.lastModSec = time.Now().Unix() +} + +// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary +// values. +func (g *expiringGauge) Dec() { + g.Gauge.Dec() + g.lastModSec = time.Now().Unix() +} + +// Add adds the given value to the Gauge. (The value can be negative, +// resulting in a decrease of the Gauge.) +func (g *expiringGauge) Add(val float64) { + g.Gauge.Add(val) + g.lastModSec = time.Now().Unix() +} + +// Sub subtracts the given value from the Gauge. (The value can be +// negative, resulting in an increase of the Gauge.) +func (g *expiringGauge) Sub(val float64) { + g.Gauge.Sub(val) + g.lastModSec = time.Now().Unix() +} + +// SetToCurrentTime sets the Gauge to the current Unix time in seconds. +func (g *expiringGauge) SetToCurrentTime() { + g.Gauge.SetToCurrentTime() + g.lastModSec = time.Now().Unix() +} + +// HasExpired implements Expirable +func (g *expiringGauge) HasExpired(currentTimeSec int64, maxAgeSec int64) bool { + return currentTimeSec-g.lastModSec >= maxAgeSec +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go new file mode 100644 index 000000000..c920e0a0c --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/histograms.go @@ -0,0 +1,79 @@ +package metric + +import ( + "time" + + "github.com/mitchellh/mapstructure" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +type HistogramConfig struct { + Value *string `mapstructure:"value"` + Buckets []float64 `mapstructure:"buckets"` +} + +func validateHistogramConfig(config *HistogramConfig) error { + return nil +} + +func parseHistogramConfig(config interface{}) (*HistogramConfig, error) { + cfg := &HistogramConfig{} + err := mapstructure.Decode(config, cfg) + if err != nil { + return nil, err + } + return cfg, nil +} + +// Histograms is a vector of histograms for a each log stream. +type Histograms struct { + *metricVec + Cfg *HistogramConfig +} + +// NewHistograms creates a new histogram vec. +func NewHistograms(name, help string, config interface{}, maxIdleSec int64) (*Histograms, error) { + cfg, err := parseHistogramConfig(config) + if err != nil { + return nil, err + } + err = validateHistogramConfig(cfg) + if err != nil { + return nil, err + } + return &Histograms{ + metricVec: newMetricVec(func(labels map[string]string) prometheus.Metric { + return &expiringHistogram{prometheus.NewHistogram(prometheus.HistogramOpts{ + Help: help, + Name: name, + ConstLabels: labels, + Buckets: cfg.Buckets, + }), + 0, + } + }, maxIdleSec), + Cfg: cfg, + }, nil +} + +// With returns the histogram associated with a stream labelset. +func (h *Histograms) With(labels model.LabelSet) prometheus.Histogram { + return h.metricVec.With(labels).(prometheus.Histogram) +} + +type expiringHistogram struct { + prometheus.Histogram + lastModSec int64 +} + +// Observe adds a single observation to the histogram. +func (h *expiringHistogram) Observe(val float64) { + h.Histogram.Observe(val) + h.lastModSec = time.Now().Unix() +} + +// HasExpired implements Expirable +func (h *expiringHistogram) HasExpired(currentTimeSec int64, maxAgeSec int64) bool { + return currentTimeSec-h.lastModSec >= maxAgeSec +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go b/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go new file mode 100644 index 000000000..e56a6ccc3 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/metric/metricvec.go @@ -0,0 +1,82 @@ +package metric + +import ( + "sync" + "time" + + "github.com/netobserv/loki-client-go/pkg/labelutil" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" +) + +// Expirable allows checking if something has exceeded the provided maxAge based on the provided currentTime +type Expirable interface { + HasExpired(currentTimeSec int64, maxAgeSec int64) bool +} + +type metricVec struct { + factory func(labels map[string]string) prometheus.Metric + mtx sync.Mutex + metrics map[model.Fingerprint]prometheus.Metric + maxAgeSec int64 +} + +func newMetricVec(factory func(labels map[string]string) prometheus.Metric, maxAgeSec int64) *metricVec { + return &metricVec{ + metrics: map[model.Fingerprint]prometheus.Metric{}, + factory: factory, + maxAgeSec: maxAgeSec, + } +} + +// Describe implements prometheus.Collector and doesn't declare any metrics on purpose to bypass prometheus validation. +// see https://godoc.org/github.com/prometheus/client_golang/prometheus#hdr-Custom_Collectors_and_constant_Metrics search for "unchecked" +func (c *metricVec) Describe(ch chan<- *prometheus.Desc) {} + +// Collect implements prometheus.Collector +func (c *metricVec) Collect(ch chan<- prometheus.Metric) { + c.mtx.Lock() + defer c.mtx.Unlock() + for _, m := range c.metrics { + ch <- m + } + c.prune() +} + +// With returns the metric associated with the labelset. +func (c *metricVec) With(labels model.LabelSet) prometheus.Metric { + c.mtx.Lock() + defer c.mtx.Unlock() + fp := labels.Fingerprint() + var ok bool + var metric prometheus.Metric + if metric, ok = c.metrics[fp]; !ok { + metric = c.factory(labelutil.ModelLabelSetToMap(labels)) + c.metrics[fp] = metric + } + return metric +} + +func (c *metricVec) Delete(labels model.LabelSet) bool { + c.mtx.Lock() + defer c.mtx.Unlock() + fp := labels.Fingerprint() + _, ok := c.metrics[fp] + if ok { + delete(c.metrics, fp) + } + return ok +} + +// prune will remove all metrics which implement the Expirable interface and have expired +// it does not take out a lock on the metrics map so whoever calls this function should do so. +func (c *metricVec) prune() { + currentTimeSec := time.Now().Unix() + for fp, m := range c.metrics { + if em, ok := m.(Expirable); ok { + if em.HasExpired(currentTimeSec, c.maxAgeSec) { + delete(c.metrics, fp) + } + } + } +} diff --git a/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go b/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go new file mode 100644 index 000000000..a403f0059 --- /dev/null +++ b/vendor/github.com/netobserv/loki-client-go/pkg/urlutil/url.go @@ -0,0 +1,59 @@ +package urlutil + +import "net/url" + +// URLValue is a url.URL that can be used as a flag. +type URLValue struct { + *url.URL +} + +// String implements flag.Value +func (v URLValue) String() string { + if v.URL == nil { + return "" + } + return v.URL.String() +} + +// Set implements flag.Value +func (v *URLValue) Set(s string) error { + u, err := url.Parse(s) + if err != nil { + return err + } + v.URL = u + return nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + + // An empty string means no URL has been configured. + if s == "" { + v.URL = nil + return nil + } + + return v.Set(s) +} + +// MarshalYAML implements yaml.Marshaler. +func (v URLValue) MarshalYAML() (interface{}, error) { + if v.URL == nil { + return "", nil + } + + // Mask out passwords when marshalling URLs back to YAML. + u := *v.URL + if u.User != nil { + if _, set := u.User.Password(); set { + u.User = url.UserPassword(u.User.Username(), "********") + } + } + + return u.String(), nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE b/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go new file mode 100644 index 000000000..47fab9897 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/agent.go @@ -0,0 +1,587 @@ +package agent + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "time" + + "github.com/netobserv/gopipes/pkg/node" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/exporter" + "github.com/netobserv/netobserv-ebpf-agent/pkg/flow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces" + "github.com/netobserv/netobserv-ebpf-agent/pkg/kernel" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + promo "github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus" + "github.com/netobserv/netobserv-ebpf-agent/pkg/tracer" + + "github.com/cilium/ebpf/ringbuf" + "github.com/gavv/monotime" + ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" + kafkago "github.com/segmentio/kafka-go" + "github.com/segmentio/kafka-go/compress" + "github.com/sirupsen/logrus" +) + +var alog = logrus.WithField("component", "agent.Flows") +var plog = logrus.WithField("component", "agent.Packets") + +// Status of the agent service. Helps on the health report as well as making some asynchronous +// tests waiting for the agent to accept flows. +type Status int + +const ( + StatusNotStarted Status = iota + StatusStarting + StatusStarted + StatusStopping + StatusStopped +) + +const ( + networkEventsDBPath = "/var/run/ovn/ovnnb_db.sock" + networkEventsOwnerName = "netobservAgent" +) + +func (s Status) String() string { + switch s { + case StatusNotStarted: + return "StatusNotStarted" + case StatusStarting: + return "StatusStarting" + case StatusStarted: + return "StatusStarted" + case StatusStopping: + return "StatusStopping" + case StatusStopped: + return "StatusStopped" + default: + return "invalid" + } +} + +func configureInformer(cfg *Config, log *logrus.Entry) ifaces.Informer { + var informer ifaces.Informer + switch cfg.ListenInterfaces { + case ListenPoll: + log.WithField("period", cfg.ListenPollPeriod). + Debug("listening for new interfaces: use polling") + informer = ifaces.NewPoller(cfg.ListenPollPeriod, cfg.BuffersLength) + case ListenWatch: + log.Debug("listening for new interfaces: use watching") + informer = ifaces.NewWatcher(cfg.BuffersLength) + default: + log.WithField("providedValue", cfg.ListenInterfaces). + Warn("wrong interface listen method. Using file watcher as default") + informer = ifaces.NewWatcher(cfg.BuffersLength) + } + return informer + +} + +func interfaceListener(ctx context.Context, ifaceEvents <-chan ifaces.Event, slog *logrus.Entry, processEvent func(iface ifaces.Interface, add bool)) { + for { + select { + case <-ctx.Done(): + slog.Debug("stopping interfaces' listener") + return + case event := <-ifaceEvents: + slog.WithField("event", event).Debug("received event") + switch event.Type { + case ifaces.EventAdded: + processEvent(event.Interface, true) + case ifaces.EventDeleted: + processEvent(event.Interface, false) + default: + slog.WithField("event", event).Warn("unknown event type") + } + } + } +} + +// Flows reporting agent +type Flows struct { + cfg *Config + + // input data providers + interfaces ifaces.Informer + filter InterfaceFilter + ebpf ebpfFlowFetcher + + // processing nodes to be wired in the buildAndStartPipeline method + mapTracer *flow.MapTracer + rbTracer *flow.RingBufTracer + accounter *flow.Accounter + limiter *flow.CapacityLimiter + deduper node.MiddleFunc[[]*model.Record, []*model.Record] + exporter node.TerminalFunc[[]*model.Record] + + // elements used to decorate flows with extra information + interfaceNamer flow.InterfaceNamer + agentIP net.IP + + status Status + promoServer *http.Server + sampleDecoder *ovnobserv.SampleDecoder +} + +// ebpfFlowFetcher abstracts the interface of ebpf.FlowFetcher to allow dependency injection in tests +type ebpfFlowFetcher interface { + io.Closer + Register(iface ifaces.Interface) error + UnRegister(iface ifaces.Interface) error + AttachTCX(iface ifaces.Interface) error + DetachTCX(iface ifaces.Interface) error + + LookupAndDeleteMap(*metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent + DeleteMapsStaleEntries(timeOut time.Duration) + ReadRingBuf() (ringbuf.Record, error) +} + +// FlowsAgent instantiates a new agent, given a configuration. +func FlowsAgent(cfg *Config) (*Flows, error) { + alog.Info("initializing Flows agent") + + // manage deprecated configs + manageDeprecatedConfigs(cfg) + + // configure informer for new interfaces + var informer = configureInformer(cfg, alog) + + alog.Debug("acquiring Agent IP") + agentIP, err := fetchAgentIP(cfg) + if err != nil { + return nil, fmt.Errorf("acquiring Agent IP: %w", err) + } + alog.Debug("agent IP: " + agentIP.String()) + + // initialize metrics + metricsSettings := &metrics.Settings{ + PromConnectionInfo: metrics.PromConnectionInfo{ + Address: cfg.MetricsServerAddress, + Port: cfg.MetricsPort, + }, + Prefix: cfg.MetricsPrefix, + } + if cfg.MetricsTLSCertPath != "" && cfg.MetricsTLSKeyPath != "" { + metricsSettings.PromConnectionInfo.TLS = &metrics.PromTLS{ + CertPath: cfg.MetricsTLSCertPath, + KeyPath: cfg.MetricsTLSKeyPath, + } + } + m := metrics.NewMetrics(metricsSettings) + + var s *ovnobserv.SampleDecoder + if cfg.EnableNetworkEventsMonitoring { + if !kernel.IsKernelOlderThan("5.14.0") { + if s, err = ovnobserv.NewSampleDecoderWithDefaultCollector(context.Background(), networkEventsDBPath, + networkEventsOwnerName, cfg.NetworkEventsMonitoringGroupID); err != nil { + alog.Warnf("failed to create Network Events sample decoder: %v for id: %d", err, cfg.NetworkEventsMonitoringGroupID) + } + } else { + alog.Warn("old kernel doesn't support network events monitoring skip") + } + } + + // configure selected exporter + exportFunc, err := buildFlowExporter(cfg, m, s) + if err != nil { + return nil, err + } + + ingress, egress := flowDirections(cfg) + debug := false + if cfg.LogLevel == logrus.TraceLevel.String() || cfg.LogLevel == logrus.DebugLevel.String() { + debug = true + } + filterRules := make([]*tracer.FilterConfig, 0) + if cfg.EnableFlowFilter { + var flowFilters []*FlowFilter + if err := json.Unmarshal([]byte(cfg.FlowFilterRules), &flowFilters); err != nil { + return nil, err + } + + for _, r := range flowFilters { + filterRules = append(filterRules, &tracer.FilterConfig{ + FilterAction: r.FilterAction, + FilterDirection: r.FilterDirection, + FilterIPCIDR: r.FilterIPCIDR, + FilterProtocol: r.FilterProtocol, + FilterPeerIP: r.FilterPeerIP, + FilterDestinationPort: tracer.ConvertFilterPortsToInstr(r.FilterDestinationPort, r.FilterDestinationPortRange, r.FilterDestinationPorts), + FilterSourcePort: tracer.ConvertFilterPortsToInstr(r.FilterSourcePort, r.FilterSourcePortRange, r.FilterSourcePorts), + FilterPort: tracer.ConvertFilterPortsToInstr(r.FilterPort, r.FilterPortRange, r.FilterPorts), + FilterTCPFlags: r.FilterTCPFlags, + FilterDrops: r.FilterDrops, + FilterSample: r.FilterSample, + }) + } + } + ebpfConfig := &tracer.FlowFetcherConfig{ + EnableIngress: ingress, + EnableEgress: egress, + Debug: debug, + Sampling: cfg.Sampling, + CacheMaxSize: cfg.CacheMaxFlows, + EnablePktDrops: cfg.EnablePktDrops, + EnableDNSTracker: cfg.EnableDNSTracking, + DNSTrackerPort: cfg.DNSTrackingPort, + EnableRTT: cfg.EnableRTT, + EnableNetworkEventsMonitoring: cfg.EnableNetworkEventsMonitoring, + NetworkEventsMonitoringGroupID: cfg.NetworkEventsMonitoringGroupID, + EnableFlowFilter: cfg.EnableFlowFilter, + EnablePktTranslation: cfg.EnablePktTranslationTracking, + UseEbpfManager: cfg.EbpfProgramManagerMode, + BpfManBpfFSPath: cfg.BpfManBpfFSPath, + FilterConfig: filterRules, + } + + fetcher, err := tracer.NewFlowFetcher(ebpfConfig) + if err != nil { + return nil, err + } + + return flowsAgent(cfg, m, informer, fetcher, exportFunc, agentIP, s) +} + +// flowsAgent is a private constructor with injectable dependencies, usable for tests +func flowsAgent(cfg *Config, m *metrics.Metrics, + informer ifaces.Informer, + fetcher ebpfFlowFetcher, + exporter node.TerminalFunc[[]*model.Record], + agentIP net.IP, + s *ovnobserv.SampleDecoder, +) (*Flows, error) { + var filter InterfaceFilter + + switch { + case len(cfg.InterfaceIPs) > 0 && (len(cfg.Interfaces) > 0 || len(cfg.ExcludeInterfaces) > 0): + return nil, fmt.Errorf("INTERFACES/EXCLUDE_INTERFACES and INTERFACE_IPS are mutually exclusive") + + case len(cfg.InterfaceIPs) > 0: + // configure ip interface filter + f, err := initIPInterfaceFilter(cfg.InterfaceIPs, IPsFromInterface) + if err != nil { + return nil, fmt.Errorf("configuring interface ip filter: %w", err) + } + filter = &f + + default: + // configure allow/deny regexp interfaces filter + f, err := initRegexpInterfaceFilter(cfg.Interfaces, cfg.ExcludeInterfaces) + if err != nil { + return nil, fmt.Errorf("configuring interface filters: %w", err) + } + filter = &f + } + + registerer := ifaces.NewRegisterer(informer, cfg.BuffersLength) + + interfaceNamer := func(ifIndex int) string { + iface, ok := registerer.IfaceNameForIndex(ifIndex) + if !ok { + return "unknown" + } + return iface + } + var promoServer *http.Server + if cfg.MetricsEnable { + promoServer = promo.InitializePrometheus(m.Settings) + } + + samplingGauge := m.CreateSamplingRate() + samplingGauge.Set(float64(cfg.Sampling)) + + mapTracer := flow.NewMapTracer(fetcher, cfg.CacheActiveTimeout, cfg.StaleEntriesEvictTimeout, m) + rbTracer := flow.NewRingBufTracer(fetcher, mapTracer, cfg.CacheActiveTimeout, m) + accounter := flow.NewAccounter(cfg.CacheMaxFlows, cfg.CacheActiveTimeout, time.Now, monotime.Now, m) + limiter := flow.NewCapacityLimiter(m) + var deduper node.MiddleFunc[[]*model.Record, []*model.Record] + if cfg.Deduper == DeduperFirstCome { + deduper = flow.Dedupe(cfg.DeduperFCExpiry, cfg.DeduperJustMark, cfg.DeduperMerge, interfaceNamer, m) + } + + return &Flows{ + ebpf: fetcher, + exporter: exporter, + interfaces: registerer, + filter: filter, + cfg: cfg, + mapTracer: mapTracer, + rbTracer: rbTracer, + accounter: accounter, + limiter: limiter, + deduper: deduper, + agentIP: agentIP, + interfaceNamer: interfaceNamer, + promoServer: promoServer, + sampleDecoder: s, + }, nil +} + +func flowDirections(cfg *Config) (ingress, egress bool) { + switch cfg.Direction { + case DirectionIngress: + return true, false + case DirectionEgress: + return false, true + case DirectionBoth: + return true, true + default: + alog.Warnf("unknown DIRECTION %q. Tracing both ingress and egress traffic", cfg.Direction) + return true, true + } +} + +func buildFlowExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) { + switch cfg.Export { + case "grpc": + return buildGRPCExporter(cfg, m, s) + case "kafka": + return buildKafkaExporter(cfg, m, s) + case "ipfix+udp": + return buildIPFIXExporter(cfg, "udp") + case "ipfix+tcp": + return buildIPFIXExporter(cfg, "tcp") + case "direct-flp": + return buildFlowDirectFLPExporter(cfg) + default: + return nil, fmt.Errorf("wrong flow export type %s", cfg.Export) + } +} + +func buildGRPCExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) { + if cfg.TargetHost == "" || cfg.TargetPort == 0 { + return nil, fmt.Errorf("missing target host or port: %s:%d", + cfg.TargetHost, cfg.TargetPort) + } + grpcExporter, err := exporter.StartGRPCProto(cfg.TargetHost, cfg.TargetPort, cfg.GRPCMessageMaxFlows, m, s) + if err != nil { + return nil, err + } + return grpcExporter.ExportFlows, nil +} + +func buildFlowDirectFLPExporter(cfg *Config) (node.TerminalFunc[[]*model.Record], error) { + flpExporter, err := exporter.StartDirectFLP(cfg.FLPConfig, cfg.BuffersLength) + if err != nil { + return nil, err + } + return flpExporter.ExportFlows, nil +} + +func buildKafkaExporter(cfg *Config, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (node.TerminalFunc[[]*model.Record], error) { + if len(cfg.KafkaBrokers) == 0 { + return nil, errors.New("at least one Kafka broker is needed") + } + var compression compress.Compression + if err := compression.UnmarshalText([]byte(cfg.KafkaCompression)); err != nil { + return nil, fmt.Errorf("wrong Kafka compression value %s. Admitted values are "+ + "none, gzip, snappy, lz4, zstd: %w", cfg.KafkaCompression, err) + } + transport := kafkago.Transport{} + if cfg.KafkaEnableTLS { + tlsConfig, err := buildTLSConfig(cfg) + if err != nil { + return nil, err + } + transport.TLS = tlsConfig + } + if cfg.KafkaEnableSASL { + mechanism, err := buildSASLConfig(cfg) + if err != nil { + return nil, err + } + transport.SASL = mechanism + } + return (&exporter.KafkaProto{ + Writer: &kafkago.Writer{ + Addr: kafkago.TCP(cfg.KafkaBrokers...), + Topic: cfg.KafkaTopic, + BatchSize: cfg.KafkaBatchMessages, + // Assigning KafkaBatchSize to BatchBytes instead of BatchSize might be confusing here. + // The reason is that the "standard" Kafka name for this variable is "batch.size", + // which specifies the size of messages in terms of bytes, and not in terms of entries. + // We have decided to hide this library implementation detail and expose to the + // customer the common, standard name and meaning for batch.size + BatchBytes: int64(cfg.KafkaBatchSize), + // Segmentio's Kafka-go does not behave as standard Kafka library, and would + // throttle any Write invocation until reaching the timeout. + // Since we invoke write once each CacheActiveTimeout, we can safely disable this + // timeout throttling + // https://github.com/netobserv/flowlogs-pipeline/pull/233#discussion_r897830057 + BatchTimeout: time.Nanosecond, + Async: cfg.KafkaAsync, + Compression: compression, + Transport: &transport, + Balancer: &kafkago.Hash{}, + }, + Metrics: m, + SampleDecoder: s, + }).ExportFlows, nil +} + +func buildIPFIXExporter(cfg *Config, proto string) (node.TerminalFunc[[]*model.Record], error) { + if cfg.TargetHost == "" || cfg.TargetPort == 0 { + return nil, fmt.Errorf("missing target host or port: %s:%d", + cfg.TargetHost, cfg.TargetPort) + } + ipfix, err := exporter.StartIPFIXExporter(cfg.TargetHost, cfg.TargetPort, proto) + if err != nil { + return nil, err + } + return ipfix.ExportFlows, nil +} + +// Run a Flows agent. The function will keep running in the same thread +// until the passed context is canceled +func (f *Flows) Run(ctx context.Context) error { + f.status = StatusStarting + alog.Info("starting Flows agent") + graph, err := f.buildAndStartPipeline(ctx) + if err != nil { + return fmt.Errorf("starting processing graph: %w", err) + } + + f.status = StatusStarted + alog.Info("Flows agent successfully started") + <-ctx.Done() + + f.status = StatusStopping + alog.Info("stopping Flows agent") + if err := f.ebpf.Close(); err != nil { + alog.WithError(err).Warn("eBPF resources not correctly closed") + } + + alog.Debug("waiting for all nodes to finish their pending work") + <-graph.Done() + if f.promoServer != nil { + alog.Debug("closing prometheus server") + if err := f.promoServer.Close(); err != nil { + alog.WithError(err).Warn("error when closing prometheus server") + } + } + if f.sampleDecoder != nil { + f.sampleDecoder.Shutdown() + } + f.status = StatusStopped + alog.Info("Flows agent stopped") + return nil +} + +func (f *Flows) Status() Status { + return f.status +} + +// interfacesManager uses an informer to check new/deleted network interfaces. For each running +// interface, it registers a flow ebpfFetcher that will forward new flows to the returned channel +// TODO: consider move this method and "onInterfaceEvent" to another type +func (f *Flows) interfacesManager(ctx context.Context) error { + slog := alog.WithField("function", "interfacesManager") + + slog.Debug("subscribing for network interface events") + ifaceEvents, err := f.interfaces.Subscribe(ctx) + if err != nil { + return fmt.Errorf("instantiating interfaces' informer: %w", err) + } + + go interfaceListener(ctx, ifaceEvents, slog, f.onInterfaceEvent) + + return nil +} + +// buildAndStartPipeline creates the ETL flow processing graph. +// For a more visual view, check the docs/architecture.md document. +func (f *Flows) buildAndStartPipeline(ctx context.Context) (*node.Terminal[[]*model.Record], error) { + + if !f.cfg.EbpfProgramManagerMode { + alog.Debug("registering interfaces' listener in background") + err := f.interfacesManager(ctx) + if err != nil { + return nil, err + } + } + alog.Debug("connecting flows' processing graph") + mapTracer := node.AsStart(f.mapTracer.TraceLoop(ctx, f.cfg.ForceGC)) + rbTracer := node.AsStart(f.rbTracer.TraceLoop(ctx)) + + accounter := node.AsMiddle(f.accounter.Account, + node.ChannelBufferLen(f.cfg.BuffersLength)) + + limiter := node.AsMiddle(f.limiter.Limit, + node.ChannelBufferLen(f.cfg.BuffersLength)) + + decorator := node.AsMiddle(flow.Decorate(f.agentIP, f.interfaceNamer), + node.ChannelBufferLen(f.cfg.BuffersLength)) + + ebl := f.cfg.ExporterBufferLength + if ebl == 0 { + ebl = f.cfg.BuffersLength + } + + export := node.AsTerminal(f.exporter, + node.ChannelBufferLen(ebl)) + + rbTracer.SendsTo(accounter) + + if f.deduper != nil { + deduper := node.AsMiddle(f.deduper, node.ChannelBufferLen(f.cfg.BuffersLength)) + mapTracer.SendsTo(deduper) + accounter.SendsTo(deduper) + deduper.SendsTo(limiter) + } else { + mapTracer.SendsTo(limiter) + accounter.SendsTo(limiter) + } + limiter.SendsTo(decorator) + decorator.SendsTo(export) + + alog.Debug("starting graph") + mapTracer.Start() + rbTracer.Start() + return export, nil +} + +func (f *Flows) onInterfaceEvent(iface ifaces.Interface, add bool) { + // ignore interfaces that do not match the user configuration acceptance/exclusion lists + allowed, err := f.filter.Allowed(iface.Name) + if err != nil { + alog.WithField("interface", iface).Errorf("encountered error determining if interface is allowed: %v", err) + return + } + if !allowed { + alog.WithField("interface", iface). + Debug("interface does not match the allow/exclusion filters. Ignoring") + return + } + if add { + alog.WithField("interface", iface).Info("interface detected. trying to attach TCX hook") + if err := f.ebpf.AttachTCX(iface); err != nil { + alog.WithField("interface", iface).WithError(err). + Info("can't attach to TCx hook flow ebpfFetcher. fall back to use legacy TC hook") + if err := f.ebpf.Register(iface); err != nil { + alog.WithField("interface", iface).WithError(err). + Warn("can't register flow ebpfFetcher. Ignoring") + return + } + } + } else { + alog.WithField("interface", iface).Info("interface deleted. trying to detach TCX hook") + if err := f.ebpf.DetachTCX(iface); err != nil { + alog.WithField("interface", iface).WithError(err). + Info("can't detach from TCx hook flow ebpfFetcher. fall back to use legacy TC hook") + if err := f.ebpf.UnRegister(iface); err != nil { + alog.WithField("interface", iface).WithError(err). + Warn("can't unregister flow ebpfFetcher. Ignoring") + return + } + } + + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go new file mode 100644 index 000000000..9968b01a5 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/config.go @@ -0,0 +1,277 @@ +package agent + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +var clog = logrus.WithField("component", "config") + +const ( + ListenPoll = "poll" + ListenWatch = "watch" + DeduperNone = "none" + DeduperFirstCome = "firstCome" + DirectionIngress = "ingress" + DirectionEgress = "egress" + DirectionBoth = "both" + + IPTypeAny = "any" + IPTypeIPV4 = "ipv4" + IPTypeIPV6 = "ipv6" + + IPIfaceExternal = "external" + IPIfaceLocal = "local" + IPIfaceNamedPrefix = "name:" +) + +type FlowFilter struct { + // FilterDirection is the direction of the flow filter. + // Possible values are "Ingress" or "Egress". + FilterDirection string `json:"direction,omitempty"` + // FilterIPCIDR is the IP CIDR to filter flows. + // Example: 10.10.10.0/24 or 100:100:100:100::/64, default is 0.0.0.0/0 + FilterIPCIDR string `json:"ip_cidr,omitempty"` + // FilterProtocol is the protocol to filter flows. + // supported protocols: TCP, UDP, SCTP, ICMP, ICMPv6 + FilterProtocol string `json:"protocol,omitempty"` + // FilterSourcePort is the source port to filter flows. + FilterSourcePort int32 `json:"source_port,omitempty"` + // FilterDestinationPort is the destination port to filter flows. + FilterDestinationPort int32 `json:"destination_port,omitempty"` + // FilterPort is the port to filter flows, can be use for either source or destination port. + FilterPort int32 `json:"port,omitempty"` + // FilterSourcePortRange is the source port range to filter flows. + // Example: 8000-8010 + FilterSourcePortRange string `json:"source_port_range,omitempty"` + // FilterSourcePorts is two source ports to filter flows. + // Example: 8000,8010 + FilterSourcePorts string `json:"source_ports,omitempty"` + // FilterDestinationPortRange is the destination port range to filter flows. + // Example: 8000-8010 + FilterDestinationPortRange string `json:"destination_port_range,omitempty"` + // FilterDestinationPorts is two destination ports to filter flows. + // Example: 8000,8010 + FilterDestinationPorts string `json:"destination_ports,omitempty"` + // FilterPortRange is the port range to filter flows, can be used for either source or destination port. + // Example: 8000-8010 + FilterPortRange string `json:"port_range,omitempty"` + // FilterPorts is two ports option to filter flows, can be used for either source or destination port. + // Example: 8000,8010 + FilterPorts string `json:"ports,omitempty"` + // FilterICMPType is the ICMP type to filter flows. + FilterICMPType int `json:"icmp_type,omitempty"` + // FilterICMPCode is the ICMP code to filter flows. + FilterICMPCode int `json:"icmp_code,omitempty"` + // FilterPeerIP is the IP to filter flows. + // Example: 10.10.10.10 + FilterPeerIP string `json:"peer_ip,omitempty"` + // FilterAction is the action to filter flows. + // Possible values are "Accept" or "Reject". + FilterAction string `json:"action,omitempty"` + // FilterTCPFlags is the TCP flags to filter flows. + // possible values are: SYN, SYN-ACK, ACK, FIN, RST, PSH, URG, ECE, CWR, FIN-ACK, RST-ACK + FilterTCPFlags string `json:"tcp_flags,omitempty"` + // FilterDrops allow filtering flows with packet drops, default is false. + FilterDrops bool `json:"drops,omitempty"` + // FilterSample is the sample rate this matching flow will use + FilterSample uint32 `json:"sample,omitempty"` +} + +type Config struct { + // AgentIP allows overriding the reported Agent IP address on each flow. + AgentIP string `env:"AGENT_IP"` + // AgentIPIface specifies which interface should the agent pick the IP address from in order to + // report it in the AgentIP field on each flow. Accepted values are: external (default), local, + // or name: (e.g. name:eth0). + // If the AgentIP configuration property is set, this property has no effect. + AgentIPIface string `env:"AGENT_IP_IFACE" envDefault:"external"` + // AgentIPType specifies which type of IP address (IPv4 or IPv6 or any) should the agent report + // in the AgentID field of each flow. Accepted values are: any (default), ipv4, ipv6. + // If the AgentIP configuration property is set, this property has no effect. + AgentIPType string `env:"AGENT_IP_TYPE" envDefault:"any"` + // Export selects the exporter protocol. + // Accepted values for Flows are: grpc (default), kafka, ipfix+udp, ipfix+tcp or direct-flp. + // Accepted values for Packets are: grpc (default) or direct-flp + Export string `env:"EXPORT" envDefault:"grpc"` + // Host is the host name or IP of the flow or packet collector, when the EXPORT variable is + // set to "grpc" + TargetHost string `env:"TARGET_HOST"` + // Port is the port the flow or packet collector, when the EXPORT variable is set to "grpc" + TargetPort int `env:"TARGET_PORT"` + // GRPCMessageMaxFlows specifies the limit, in number of flows, of each GRPC message. Messages + // larger than that number will be split and submitted sequentially. + GRPCMessageMaxFlows int `env:"GRPC_MESSAGE_MAX_FLOWS" envDefault:"10000"` + // Interfaces contains the interface names from where flows will be collected. If empty, the agent + // will fetch all the interfaces in the system, excepting the ones listed in ExcludeInterfaces. + // If an entry is enclosed by slashes (e.g. `/br-/`), it will match as regular expression, + // otherwise it will be matched as a case-sensitive string. + Interfaces []string `env:"INTERFACES" envSeparator:","` + // ExcludeInterfaces contains the interface names that will be excluded from flow tracing. Default: + // "lo" (loopback). + // If an entry is enclosed by slashes (e.g. `/br-/`), it will match as regular expression, + // otherwise it will be matched as a case-sensitive string. + ExcludeInterfaces []string `env:"EXCLUDE_INTERFACES" envSeparator:"," envDefault:"lo"` + // BuffersLength establishes the length of communication channels between the different processing + // stages + BuffersLength int `env:"BUFFERS_LENGTH" envDefault:"50"` + // InterfaceIPs is a list of CIDR-notation IPs/Subnets where any interface containing an IP in the given ranges + // should be listened on. This allows users to specify interfaces without knowing the OS-assigned interface names. + // Exclusive with Interfaces/ExcludeInterfaces. + InterfaceIPs []string `env:"INTERFACE_IPS" envSeparator:","` + // ExporterBufferLength establishes the length of the buffer of flow batches (not individual flows) + // that can be accumulated before the Kafka or GRPC exporter. When this buffer is full (e.g. + // because the Kafka or GRPC endpoint is slow), incoming flow batches will be dropped. If unset, + // its value is the same as the BUFFERS_LENGTH property. + ExporterBufferLength int `env:"EXPORTER_BUFFER_LENGTH"` + // CacheMaxFlows specifies how many flows can be accumulated in the accounting cache before + // being flushed for its later export + CacheMaxFlows int `env:"CACHE_MAX_FLOWS" envDefault:"5000"` + // CacheActiveTimeout specifies the maximum duration that flows are kept in the accounting + // cache before being flushed for its later export + CacheActiveTimeout time.Duration `env:"CACHE_ACTIVE_TIMEOUT" envDefault:"5s"` + // Deduper specifies the deduper type. Accepted values are "none" (disabled) and "firstCome". + // When enabled, it will detect duplicate flows (flows that have been detected e.g. through + // both the physical and a virtual interface). + // "firstCome" will forward only flows from the first interface the flows are received from. + Deduper string `env:"DEDUPER" envDefault:"none"` + // DeduperFCExpiry specifies the expiry duration of the flows "firstCome" deduplicator. After + // a flow hasn't been received for that expiry time, the deduplicator forgets it. That means + // that a flow from a connection that has been inactive during that period could be forwarded + // again from a different interface. + // If the value is not set, it will default to 2 * CacheActiveTimeout + DeduperFCExpiry time.Duration `env:"DEDUPER_FC_EXPIRY"` + // DeduperJustMark will just mark duplicates (boolean field) instead of dropping them. + DeduperJustMark bool `env:"DEDUPER_JUST_MARK" envDefault:"false"` + // DeduperMerge will merge duplicated flows and generate list of interfaces and direction pairs + DeduperMerge bool `env:"DEDUPER_MERGE" envDefault:"true"` + // Direction allows selecting which flows to trace according to its direction. Accepted values + // are "ingress", "egress" or "both" (default). + Direction string `env:"DIRECTION" envDefault:"both"` + // Logger level. From more to less verbose: trace, debug, info, warn, error, fatal, panic. + LogLevel string `env:"LOG_LEVEL" envDefault:"info"` + // Sampling holds the rate at which packets should be sampled and sent to the target collector. + // E.g. if set to 100, one out of 100 packets, on average, will be sent to the target collector. + Sampling int `env:"SAMPLING" envDefault:"0"` + // ListenInterfaces specifies the mechanism used by the agent to listen for added or removed + // network interfaces. Accepted values are "watch" (default) or "poll". + // If the value is "watch", interfaces are traced immediately after they are created. This is + // the recommended setting for most configurations. "poll" value is a fallback mechanism that + // periodically queries the current network interfaces (frequency specified by ListenPollPeriod). + ListenInterfaces string `env:"LISTEN_INTERFACES" envDefault:"watch"` + // ListenPollPeriod specifies the periodicity to query the network interfaces when the + // ListenInterfaces value is set to "poll". + ListenPollPeriod time.Duration `env:"LISTEN_POLL_PERIOD" envDefault:"10s"` + // KafkaBrokers is a comma-separated list of tha addresses of the brokers of the Kafka cluster + // that this agent is configured to send messages to. + KafkaBrokers []string `env:"KAFKA_BROKERS" envSeparator:","` + // KafkaTopic is the name of the topic where the flows' processor will receive the flows from. + KafkaTopic string `env:"KAFKA_TOPIC" envDefault:"network-flows"` + // KafkaBatchMessages sets the limit on how many messages will be buffered before being sent to a + // partition. + KafkaBatchMessages int `env:"KAFKA_BATCH_MESSAGES" envDefault:"1000"` + // KafkaBatchSize sets the limit, in bytes, of the maximum size of a request before being sent + // to a partition. + KafkaBatchSize int `env:"KAFKA_BATCH_SIZE" envDefault:"1048576"` + // KafkaAsync. If it's true, the message writing process will never block. It also means that + // errors are ignored since the caller will not receive the returned value. + KafkaAsync bool `env:"KAFKA_ASYNC" envDefault:"true"` + // KafkaCompression sets the compression codec to be used to compress messages. The accepted + // values are: none (default), gzip, snappy, lz4, zstd. + KafkaCompression string `env:"KAFKA_COMPRESSION" envDefault:"none"` + // KafkaEnableTLS set true to enable TLS + KafkaEnableTLS bool `env:"KAFKA_ENABLE_TLS" envDefault:"false"` + // KafkaTLSInsecureSkipVerify skips server certificate verification in TLS connections + KafkaTLSInsecureSkipVerify bool `env:"KAFKA_TLS_INSECURE_SKIP_VERIFY" envDefault:"false"` + // KafkaTLSCACertPath is the path to the Kafka server certificate for TLS connections + KafkaTLSCACertPath string `env:"KAFKA_TLS_CA_CERT_PATH"` + // KafkaTLSUserCertPath is the path to the user (client) certificate for mTLS connections + KafkaTLSUserCertPath string `env:"KAFKA_TLS_USER_CERT_PATH"` + // KafkaTLSUserKeyPath is the path to the user (client) private key for mTLS connections + KafkaTLSUserKeyPath string `env:"KAFKA_TLS_USER_KEY_PATH"` + // KafkaEnableSASL set true to enable SASL auth + KafkaEnableSASL bool `env:"KAFKA_ENABLE_SASL" envDefault:"false"` + // KafkaSASLType type of SASL mechanism: plain or scramSHA512 + KafkaSASLType string `env:"KAFKA_SASL_TYPE" envDefault:"plain"` + // KafkaSASLClientIDPath is the path to the client ID (username) for SASL auth + KafkaSASLClientIDPath string `env:"KAFKA_SASL_CLIENT_ID_PATH"` + // KafkaSASLClientSecretPath is the path to the client secret (password) for SASL auth + KafkaSASLClientSecretPath string `env:"KAFKA_SASL_CLIENT_SECRET_PATH"` + // ProfilePort sets the listening port for Go's Pprof tool. If it is not set, profile is disabled + ProfilePort int `env:"PROFILE_PORT"` + // Flowlogs-pipeline configuration as YAML or JSON, used when export is "direct-flp". Cf https://github.com/netobserv/flowlogs-pipeline + // The "ingest" stage must be omitted from this configuration, since it is handled internally by the agent. The first stage should follow "preset-ingester". + // E.g: {"pipeline":[{"name": "writer","follows": "preset-ingester"}],"parameters":[{"name": "writer","write": {"type": "stdout"}}]}. + FLPConfig string `env:"FLP_CONFIG"` + // Enable RTT calculations for the flows, default is false (disabled), set to true to enable. + // This feature requires the flows agent to attach at both Ingress and Egress hookpoints. + // If both Ingress and Egress are not enabled then this feature will not be enabled even if set to true via env. + EnableRTT bool `env:"ENABLE_RTT" envDefault:"false"` + // ForceGC enables forcing golang garbage collection run at the end of every map eviction, default is true + ForceGC bool `env:"FORCE_GARBAGE_COLLECTION" envDefault:"true"` + // EnablePktDrops enable Packet drops eBPF hook to account for dropped flows + EnablePktDrops bool `env:"ENABLE_PKT_DROPS" envDefault:"false"` + // EnableDNSTracking enable DNS tracking eBPF hook to track dns query/response flows + EnableDNSTracking bool `env:"ENABLE_DNS_TRACKING" envDefault:"false"` + // DNSTrackingPort used to define which port the DNS service is mapped to at the pod level, + // so we can track DNS at the pod level + DNSTrackingPort uint16 `env:"DNS_TRACKING_PORT" envDefault:"53"` + // StaleEntriesEvictTimeout specifies the maximum duration that stale entries are kept + // before being deleted, default is 5 seconds. + StaleEntriesEvictTimeout time.Duration `env:"STALE_ENTRIES_EVICT_TIMEOUT" envDefault:"5s"` + // EnablePCA enables Packet Capture Agent (PCA). By default, PCA is off. + EnablePCA bool `env:"ENABLE_PCA" envDefault:"false"` + // MetricsEnable enables http server to collect ebpf agent metrics, default is false. + MetricsEnable bool `env:"METRICS_ENABLE" envDefault:"false"` + // MetricsServerAddress is the address of the server that collects ebpf agent metrics. + MetricsServerAddress string `env:"METRICS_SERVER_ADDRESS"` + // MetricsPort is the port of the server that collects ebpf agent metrics. + MetricsPort int `env:"METRICS_SERVER_PORT" envDefault:"9090"` + // MetricsTLSCertPath is the path to the server certificate for TLS connections + MetricsTLSCertPath string `env:"METRICS_TLS_CERT_PATH"` + // MetricsTLSKeyPath is the path to the server private key for TLS connections + MetricsTLSKeyPath string `env:"METRICS_TLS_KEY_PATH"` + // MetricsPrefix is the prefix of the metrics that are sent to the server. + MetricsPrefix string `env:"METRICS_PREFIX" envDefault:"ebpf_agent_"` + // EnableFlowFilter enables flow filter, default is false. + EnableFlowFilter bool `env:"ENABLE_FLOW_FILTER" envDefault:"false"` + // FlowFilterRules list of flow filter rules + FlowFilterRules string `env:"FLOW_FILTER_RULES"` + // EnableNetworkEventsMonitoring enables monitoring network plugin events, default is false. + EnableNetworkEventsMonitoring bool `env:"ENABLE_NETWORK_EVENTS_MONITORING" envDefault:"false"` + // NetworkEventsMonitoringGroupID to allow ebpf hook to process samples for specific groupID and ignore the rest + NetworkEventsMonitoringGroupID int `env:"NETWORK_EVENTS_MONITORING_GROUP_ID" envDefault:"10"` + // EnablePktTranslationTracking allow tracking packets after translation - for example, NAT, default is false. + EnablePktTranslationTracking bool `env:"ENABLE_PKT_TRANSLATION" envDefault:"false"` + // EbpfProgramManagerMode is enabled when eBPF manager is handling netobserv ebpf programs life cycle, default is false. + EbpfProgramManagerMode bool `env:"EBPF_PROGRAM_MANAGER_MODE" envDefault:"false"` + // BpfManBpfFSPath user configurable ebpf manager mount path + BpfManBpfFSPath string `env:"BPFMAN_BPF_FS_PATH" envDefault:"/run/netobserv/maps"` + + /* Deprecated configs are listed below this line + * See manageDeprecatedConfigs function for details + */ + + // Deprecated FlowsTargetHost replaced by TargetHost + FlowsTargetHost string `env:"FLOWS_TARGET_HOST"` + // Deprecated FlowsTargetPort replaced by TargetPort + FlowsTargetPort int `env:"FLOWS_TARGET_PORT"` + // Deprecated PCAServerPort replaced by TargetPort + PCAServerPort int `env:"PCA_SERVER_PORT"` +} + +func manageDeprecatedConfigs(cfg *Config) { + if len(cfg.FlowsTargetHost) != 0 { + clog.Infof("Using deprecated FlowsTargetHost %s", cfg.FlowsTargetHost) + cfg.TargetHost = cfg.FlowsTargetHost + } + + if cfg.FlowsTargetPort != 0 { + clog.Infof("Using deprecated FlowsTargetPort %d", cfg.FlowsTargetPort) + cfg.TargetPort = cfg.FlowsTargetPort + } else if cfg.PCAServerPort != 0 { + clog.Infof("Using deprecated PCAServerPort %d", cfg.PCAServerPort) + cfg.TargetPort = cfg.PCAServerPort + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go new file mode 100644 index 000000000..366bcd25d --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/filter.go @@ -0,0 +1,152 @@ +package agent + +import ( + "fmt" + "net" + "net/netip" + "regexp" + "strings" +) + +type InterfaceFilter interface { + Allowed(iface string) (bool, error) +} + +type ipInterfaceFilter struct { + allowedIPs []netip.Prefix + // Almost always going to be a wrapper around getting + // the interface from net.InterfaceByName and then calling + // .Addrs() on the interface + ipsFromIface func(ifaceName string) ([]netip.Addr, error) +} + +// Default function for getting the list of IPs configured +// for a specific network interface +func IPsFromInterface(ifaceName string) ([]netip.Addr, error) { + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return []netip.Addr{}, fmt.Errorf("error retrieving interface by name: %w", err) + } + addrs, err := iface.Addrs() + if err != nil { + return []netip.Addr{}, fmt.Errorf("error retrieving addresses from interface: %w", err) + } + + interfaceAddrs := []netip.Addr{} + for _, addr := range addrs { + prefix, err := netip.ParsePrefix(addr.String()) + if err != nil { + return []netip.Addr{}, fmt.Errorf("parsing given ip to netip.Addr: %w", err) + } + interfaceAddrs = append(interfaceAddrs, prefix.Addr()) + } + return interfaceAddrs, nil +} + +// initIPInterfaceFilter allows filtering network interfaces that are accepted/excluded by the user, +// according to the provided INTERFACE_IPS from the configuration. It allows interfaces where at least +// one of the provided CIDRs are associated with it. +func initIPInterfaceFilter(ips []string, ipsFromIface func(ifaceName string) ([]netip.Addr, error)) (ipInterfaceFilter, error) { + ipIfaceFilter := ipInterfaceFilter{} + ipIfaceFilter.ipsFromIface = ipsFromIface + + for _, ip := range ips { + prefix, err := netip.ParsePrefix(ip) + if err != nil { + return ipInterfaceFilter{}, fmt.Errorf("error parsing given ip: %s: %w", ip, err) + } + ipIfaceFilter.allowedIPs = append(ipIfaceFilter.allowedIPs, prefix) + } + + return ipIfaceFilter, nil +} + +func (f *ipInterfaceFilter) Allowed(iface string) (bool, error) { + ifaceAddrs, err := f.ipsFromIface(iface) + if err != nil { + return false, fmt.Errorf("error calling ipsFromIface(): %w", err) + } + + for _, ifaceAddr := range ifaceAddrs { + for _, allowedPrefix := range f.allowedIPs { + if allowedPrefix.Contains(ifaceAddr) { + return true, nil + } + } + } + return false, nil +} + +type regexpInterfaceFilter struct { + allowedRegexpes []*regexp.Regexp + allowedMatches []string + excludedRegexpes []*regexp.Regexp + excludedMatches []string +} + +// initRegexpInterfaceFilter allows filtering network interfaces that are accepted/excluded by the user, +// according to the provided allowed and excluded interfaces from the configuration. It allows +// matching by exact string or by regular expression +func initRegexpInterfaceFilter(allowed, excluded []string) (regexpInterfaceFilter, error) { + var isRegexp = regexp.MustCompile("^/(.*)/$") + + itf := regexpInterfaceFilter{} + for _, definition := range allowed { + definition = strings.Trim(definition, " ") + // the user defined a /regexp/ between slashes: compile and store it as regular expression + if sm := isRegexp.FindStringSubmatch(definition); len(sm) > 1 { + re, err := regexp.Compile(sm[1]) + if err != nil { + return itf, fmt.Errorf("wrong interface regexp %q: %w", definition, err) + } + itf.allowedRegexpes = append(itf.allowedRegexpes, re) + } else { + // otherwise, store it as exact match definition + itf.allowedMatches = append(itf.allowedMatches, definition) + } + } + + for _, definition := range excluded { + definition = strings.Trim(definition, " ") + // the user defined a /regexp/ between slashes: compile and store it as regexp + if sm := isRegexp.FindStringSubmatch(definition); len(sm) > 1 { + re, err := regexp.Compile(sm[1]) + if err != nil { + return itf, fmt.Errorf("wrong excluded interface regexp %q: %w", definition, err) + } + itf.excludedRegexpes = append(itf.excludedRegexpes, re) + } else { + // otherwise, store it as exact match definition + itf.excludedMatches = append(itf.excludedMatches, definition) + } + } + + return itf, nil +} + +func (itf *regexpInterfaceFilter) Allowed(name string) (bool, error) { + // if the allowed list is empty, any interface is allowed except if it matches the exclusion list + allowed := len(itf.allowedRegexpes)+len(itf.allowedMatches) == 0 + // otherwise, we check if it appears in the allowed lists (both exact match and regexp) + for i := 0; !allowed && i < len(itf.allowedMatches); i++ { + allowed = allowed || name == itf.allowedMatches[i] + } + for i := 0; !allowed && i < len(itf.allowedRegexpes); i++ { + allowed = allowed || itf.allowedRegexpes[i].MatchString(string(name)) + } + if !allowed { + return false, nil + } + // if the interface matches the allow lists, we still need to check that is not excluded + for _, match := range itf.excludedMatches { + if name == match { + return false, nil + } + } + for _, re := range itf.excludedRegexpes { + if re.MatchString(string(name)) { + return false, nil + } + } + return true, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go new file mode 100644 index 000000000..ef52d01d9 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/ip.go @@ -0,0 +1,139 @@ +package agent + +import ( + "fmt" + "net" + "strings" +) + +// dependencies that can be injected from testing +var ( + interfaceByName = net.InterfaceByName + interfaceAddrs = net.InterfaceAddrs + dial = net.Dial + ifaceAddrs = func(iface *net.Interface) ([]net.Addr, error) { + return iface.Addrs() + } +) + +// fetchAgentIP guesses the non-loopback IP address of the Agent host, according to the +// user-provided configuration: +// - If AgentIP is provided, this value is used whatever is the real IP of the Agent. +// - AgentIPIface specifies which interface this function should look into in order to pickup an address. +// - AgentIPType specifies which type of IP address should the agent pickup ("any" to pickup whichever +// ipv4 or ipv6 address is found first) +func fetchAgentIP(cfg *Config) (net.IP, error) { + if cfg.AgentIP != "" { + if ip := net.ParseIP(cfg.AgentIP); ip != nil { + return ip, nil + } + return nil, fmt.Errorf("can't parse provided IP %v", cfg.AgentIP) + } + + if cfg.AgentIPType != IPTypeAny && + cfg.AgentIPType != IPTypeIPV6 && + cfg.AgentIPType != IPTypeIPV4 { + return nil, fmt.Errorf("invalid IP type %q. Valid values are: %s, %s or %s", + cfg.AgentIPType, IPTypeIPV4, IPTypeIPV6, IPTypeAny) + } + + switch cfg.AgentIPIface { + case IPIfaceLocal: + return fromLocal(cfg.AgentIPType) + case IPIfaceExternal: + return fromExternal(cfg.AgentIPType) + default: + if !strings.HasPrefix(cfg.AgentIPIface, IPIfaceNamedPrefix) { + return nil, fmt.Errorf( + "invalid IP interface %q. Valid values are: %s, %s or %s", + cfg.AgentIPIface, IPIfaceLocal, IPIfaceExternal, IPIfaceNamedPrefix) + } + return fromInterface(cfg.AgentIPIface[len(IPIfaceNamedPrefix):], cfg.AgentIPType) + } +} + +func fromInterface(ifName, ipType string) (net.IP, error) { + iface, err := interfaceByName(ifName) + if err != nil { + return nil, err + } + addrs, err := ifaceAddrs(iface) + if err != nil { + return nil, err + } + if ip, ok := findAddress(addrs, ipType); ok { + return ip, nil + } + return nil, fmt.Errorf("no matching %q addresses found at interface %v", ipType, ifName) +} + +func fromLocal(ipType string) (net.IP, error) { + addrs, err := interfaceAddrs() + if err != nil { + return nil, err + } + if ip, ok := findAddress(addrs, ipType); ok { + return ip, nil + } + return nil, fmt.Errorf("no matching local %q addresses found", ipType) +} + +func fromExternal(ipType string) (net.IP, error) { + // We don't really care about the existence or nonexistence of the addresses. + // This will just establish an external dialer where we can pickup the external + // host address + addrStr := "8.8.8.8:80" + // When IPType is "any" and we have interface with IPv6 address only then use ipv6 dns address + ip, _ := fromLocal(IPTypeIPV4) + if ipType == IPTypeIPV6 || (ipType == IPTypeAny && ip == nil) { + addrStr = "[2001:4860:4860::8888]:80" + } + conn, err := dial("udp", addrStr) + if err != nil { + return nil, fmt.Errorf("can't establish an external connection %w", err) + } + if addr, ok := conn.LocalAddr().(*net.UDPAddr); !ok { + return nil, fmt.Errorf("unexpected local address type %T for external connection", + conn.LocalAddr()) + } else if ip, ok := getIP(addr.IP, ipType); ok { + return ip, nil + } + return nil, fmt.Errorf("no matching %q external addresses found", ipType) +} + +func findAddress(addrs []net.Addr, ipType string) (net.IP, bool) { + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && ipnet != nil { + if ip, ok := getIP(ipnet.IP, ipType); ok { + return ip, true + } + } + } + return nil, false +} + +func getIP(pip net.IP, ipType string) (net.IP, bool) { + if pip == nil || pip.IsLoopback() || pip.IsLinkLocalUnicast() { + return nil, false + } + switch ipType { + case IPTypeIPV4: + if ip := pip.To4(); ip != nil { + return ip, true + } + case IPTypeIPV6: + // as any IP4 address can be converted to IP6, we only return any + // address that can be converted to IP6 but not to IP4 + if ip := pip.To16(); ip != nil && pip.To4() == nil { + return ip, true + } + default: // Any + if ip := pip.To4(); ip != nil { + return ip, true + } + if ip := pip.To16(); ip != nil { + return ip, true + } + } + return nil, false +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go new file mode 100644 index 000000000..1157901c3 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/packets_agent.go @@ -0,0 +1,326 @@ +package agent + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + + "github.com/netobserv/gopipes/pkg/node" + "github.com/netobserv/netobserv-ebpf-agent/pkg/exporter" + "github.com/netobserv/netobserv-ebpf-agent/pkg/flow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/tracer" + + "github.com/cilium/ebpf/perf" + "github.com/sirupsen/logrus" +) + +// Packets reporting agent +type Packets struct { + cfg *Config + + // input data providers + interfaces ifaces.Informer + filter InterfaceFilter + ebpf ebpfPacketFetcher + + // processing nodes to be wired in the buildAndStartPipeline method + perfTracer *flow.PerfTracer + packetbuffer *flow.PerfBuffer + exporter node.TerminalFunc[[]*model.PacketRecord] + + // elements used to decorate flows with extra information + interfaceNamer flow.InterfaceNamer + agentIP net.IP + + status Status +} + +type ebpfPacketFetcher interface { + io.Closer + Register(iface ifaces.Interface) error + UnRegister(iface ifaces.Interface) error + AttachTCX(iface ifaces.Interface) error + DetachTCX(iface ifaces.Interface) error + LookupAndDeleteMap(*metrics.Metrics) map[int][]*byte + ReadPerf() (perf.Record, error) +} + +// PacketsAgent instantiates a new agent, given a configuration. +func PacketsAgent(cfg *Config) (*Packets, error) { + plog.Info("initializing Packets agent") + + // manage deprecated configs + manageDeprecatedConfigs(cfg) + + // configure informer for new interfaces + informer := configureInformer(cfg, plog) + + plog.Info("[PCA]acquiring Agent IP") + agentIP, err := fetchAgentIP(cfg) + if err != nil { + return nil, fmt.Errorf("acquiring Agent IP: %w", err) + } + + // configure selected exporter + packetexportFunc, err := buildPacketExporter(cfg) + if err != nil { + return nil, err + } + + ingress, egress := flowDirections(cfg) + debug := false + if cfg.LogLevel == logrus.TraceLevel.String() || cfg.LogLevel == logrus.DebugLevel.String() { + debug = true + } + filterRules := make([]*tracer.FilterConfig, 0) + if cfg.EnableFlowFilter { + var flowFilters []*FlowFilter + if err := json.Unmarshal([]byte(cfg.FlowFilterRules), &flowFilters); err != nil { + return nil, err + } + + for _, r := range flowFilters { + filterRules = append(filterRules, &tracer.FilterConfig{ + FilterAction: r.FilterAction, + FilterDirection: r.FilterDirection, + FilterIPCIDR: r.FilterIPCIDR, + FilterProtocol: r.FilterProtocol, + FilterPeerIP: r.FilterPeerIP, + FilterDestinationPort: tracer.ConvertFilterPortsToInstr(r.FilterDestinationPort, r.FilterDestinationPortRange, r.FilterDestinationPorts), + FilterSourcePort: tracer.ConvertFilterPortsToInstr(r.FilterSourcePort, r.FilterSourcePortRange, r.FilterSourcePorts), + FilterPort: tracer.ConvertFilterPortsToInstr(r.FilterPort, r.FilterPortRange, r.FilterPorts), + FilterTCPFlags: r.FilterTCPFlags, + FilterDrops: r.FilterDrops, + FilterSample: r.FilterSample, + }) + } + } + ebpfConfig := &tracer.FlowFetcherConfig{ + EnableIngress: ingress, + EnableEgress: egress, + Debug: debug, + Sampling: cfg.Sampling, + CacheMaxSize: cfg.CacheMaxFlows, + EnablePCA: cfg.EnablePCA, + UseEbpfManager: cfg.EbpfProgramManagerMode, + FilterConfig: filterRules, + } + + fetcher, err := tracer.NewPacketFetcher(ebpfConfig) + if err != nil { + return nil, err + } + + return packetsAgent(cfg, informer, fetcher, packetexportFunc, agentIP) +} + +// packetssAgent is a private constructor with injectable dependencies, usable for tests +func packetsAgent(cfg *Config, + informer ifaces.Informer, + fetcher ebpfPacketFetcher, + packetexporter node.TerminalFunc[[]*model.PacketRecord], + agentIP net.IP, +) (*Packets, error) { + var filter InterfaceFilter + + switch { + case len(cfg.InterfaceIPs) > 0 && (len(cfg.Interfaces) > 0 || len(cfg.ExcludeInterfaces) > 0): + return nil, fmt.Errorf("INTERFACES/EXCLUDE_INTERFACES and INTERFACE_IPS are mutually exclusive") + + case len(cfg.InterfaceIPs) > 0: + // configure ip interface filter + f, err := initIPInterfaceFilter(cfg.InterfaceIPs, IPsFromInterface) + if err != nil { + return nil, fmt.Errorf("configuring interface ip filter: %w", err) + } + filter = &f + + default: + // configure allow/deny regexp interfaces filter + f, err := initRegexpInterfaceFilter(cfg.Interfaces, cfg.ExcludeInterfaces) + if err != nil { + return nil, fmt.Errorf("configuring interface filters: %w", err) + } + filter = &f + } + + registerer := ifaces.NewRegisterer(informer, cfg.BuffersLength) + + interfaceNamer := func(ifIndex int) string { + iface, ok := registerer.IfaceNameForIndex(ifIndex) + if !ok { + return "unknown" + } + return iface + } + + perfTracer := flow.NewPerfTracer(fetcher, cfg.CacheActiveTimeout) + + packetbuffer := flow.NewPerfBuffer(cfg.CacheMaxFlows, cfg.CacheActiveTimeout) + + return &Packets{ + ebpf: fetcher, + interfaces: registerer, + filter: filter, + cfg: cfg, + packetbuffer: packetbuffer, + perfTracer: perfTracer, + agentIP: agentIP, + interfaceNamer: interfaceNamer, + exporter: packetexporter, + }, nil +} + +func buildGRPCPacketExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) { + if cfg.TargetHost == "" || cfg.TargetPort == 0 { + return nil, fmt.Errorf("missing target host or port for PCA: %s:%d", + cfg.TargetHost, cfg.TargetPort) + } + plog.Info("starting gRPC Packet send") + pcapStreamer, err := exporter.StartGRPCPacketSend(cfg.TargetHost, cfg.TargetPort) + if err != nil { + return nil, err + } + + return pcapStreamer.ExportGRPCPackets, nil +} + +func buildPacketExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) { + switch cfg.Export { + case "grpc": + return buildGRPCPacketExporter(cfg) + case "direct-flp": + return buildPacketDirectFLPExporter(cfg) + default: + return nil, fmt.Errorf("unsupported packet export type %s", cfg.Export) + } +} + +func buildPacketDirectFLPExporter(cfg *Config) (node.TerminalFunc[[]*model.PacketRecord], error) { + flpExporter, err := exporter.StartDirectFLP(cfg.FLPConfig, cfg.BuffersLength) + if err != nil { + return nil, err + } + return flpExporter.ExportPackets, nil +} + +// Run a Packets agent. The function will keep running in the same thread +// until the passed context is canceled +func (p *Packets) Run(ctx context.Context) error { + p.status = StatusStarting + plog.Info("Starting Packets agent") + graph, err := p.buildAndStartPipeline(ctx) + if err != nil { + return fmt.Errorf("error starting processing graph: %w", err) + } + + p.status = StatusStarted + plog.Info("Packets agent successfully started") + <-ctx.Done() + + p.status = StatusStopping + plog.Info("stopping Packets agent") + if err := p.ebpf.Close(); err != nil { + plog.WithError(err).Warn("eBPF resources not correctly closed") + } + + plog.Debug("waiting for all nodes to finish their pending work") + <-graph.Done() + + p.status = StatusStopped + plog.Info("Packets agent stopped") + return nil +} + +func (p *Packets) Status() Status { + return p.status +} + +func (p *Packets) interfacesManager(ctx context.Context) error { + slog := plog.WithField("function", "interfacesManager") + + slog.Debug("subscribing for network interface events") + ifaceEvents, err := p.interfaces.Subscribe(ctx) + if err != nil { + return fmt.Errorf("instantiating interfaces' informer: %w", err) + } + + go interfaceListener(ctx, ifaceEvents, slog, p.onInterfaceAdded) + + return nil +} + +func (p *Packets) buildAndStartPipeline(ctx context.Context) (*node.Terminal[[]*model.PacketRecord], error) { + + if !p.cfg.EbpfProgramManagerMode { + plog.Debug("registering interfaces' listener in background") + err := p.interfacesManager(ctx) + if err != nil { + return nil, err + } + } + plog.Debug("connecting packets' processing graph") + + perfTracer := node.AsStart(p.perfTracer.TraceLoop(ctx)) + + ebl := p.cfg.ExporterBufferLength + if ebl == 0 { + ebl = p.cfg.BuffersLength + } + + packetbuffer := node.AsMiddle(p.packetbuffer.PBuffer, + node.ChannelBufferLen(p.cfg.BuffersLength)) + + perfTracer.SendsTo(packetbuffer) + + export := node.AsTerminal(p.exporter, + node.ChannelBufferLen(ebl)) + + packetbuffer.SendsTo(export) + perfTracer.Start() + + return export, nil +} + +func (p *Packets) onInterfaceAdded(iface ifaces.Interface, add bool) { + // ignore interfaces that do not match the user configuration acceptance/exclusion lists + allowed, err := p.filter.Allowed(iface.Name) + if err != nil { + plog.WithField("[PCA]interface", iface).WithError(err). + Warn("couldn't determine if interface is allowed. Ignoring") + } + if !allowed { + plog.WithField("interface", iface). + Debug("[PCA]interface does not match the allow/exclusion filters. Ignoring") + return + } + if add { + plog.WithField("interface", iface).Info("interface detected. trying to attach TCX hook") + if err := p.ebpf.AttachTCX(iface); err != nil { + plog.WithField("[PCA]interface", iface).WithError(err). + Info("can't attach to TCx hook packet ebpfFetcher. fall back to use legacy TC hook") + if err := p.ebpf.Register(iface); err != nil { + plog.WithField("[PCA]interface", iface).WithError(err). + Warn("can't register packet ebpfFetcher. Ignoring") + return + } + } + } else { + plog.WithField("interface", iface).Info("interface deleted. trying to detach TCX hook") + if err := p.ebpf.DetachTCX(iface); err != nil { + plog.WithField("[PCA]interface", iface).WithError(err). + Info("can't detach from TCx hook packet ebpfFetcher. check if there is any legacy TC hook") + if err := p.ebpf.UnRegister(iface); err != nil { + plog.WithField("[PCA]interface", iface).WithError(err). + Warn("can't unregister packet ebpfFetcher. Ignoring") + return + } + } + + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go new file mode 100644 index 000000000..a8b228d5b --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/sasl.go @@ -0,0 +1,39 @@ +package agent + +import ( + "fmt" + "os" + "strings" + + "github.com/segmentio/kafka-go/sasl" + "github.com/segmentio/kafka-go/sasl/plain" + "github.com/segmentio/kafka-go/sasl/scram" +) + +func buildSASLConfig(cfg *Config) (sasl.Mechanism, error) { + // Read client ID + id, err := os.ReadFile(cfg.KafkaSASLClientIDPath) + if err != nil { + return nil, err + } + strID := strings.TrimSpace(string(id)) + // Read password + pwd, err := os.ReadFile(cfg.KafkaSASLClientSecretPath) + if err != nil { + return nil, err + } + strPwd := strings.TrimSpace(string(pwd)) + var mechanism sasl.Mechanism + switch cfg.KafkaSASLType { + case "plain": + mechanism = plain.Mechanism{Username: strID, Password: strPwd} + case "scramSHA512": + mechanism, err = scram.Mechanism(scram.SHA512, strID, strPwd) + default: + err = fmt.Errorf("unknown SASL type: %s", cfg.KafkaSASLType) + } + if err != nil { + return nil, err + } + return mechanism, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go new file mode 100644 index 000000000..c25b26a9b --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/agent/tls.go @@ -0,0 +1,38 @@ +package agent + +import ( + "crypto/tls" + "crypto/x509" + "os" +) + +func buildTLSConfig(cfg *Config) (*tls.Config, error) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: cfg.KafkaTLSInsecureSkipVerify, + } + if cfg.KafkaTLSCACertPath != "" { + caCert, err := os.ReadFile(cfg.KafkaTLSCACertPath) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = x509.NewCertPool() + tlsConfig.RootCAs.AppendCertsFromPEM(caCert) + + if cfg.KafkaTLSUserCertPath != "" && cfg.KafkaTLSUserKeyPath != "" { + userCert, err := os.ReadFile(cfg.KafkaTLSUserCertPath) + if err != nil { + return nil, err + } + userKey, err := os.ReadFile(cfg.KafkaTLSUserKeyPath) + if err != nil { + return nil, err + } + pair, err := tls.X509KeyPair([]byte(userCert), []byte(userKey)) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{pair} + } + } + return tlsConfig, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go new file mode 100644 index 000000000..f98e6b340 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/decode_protobuf.go @@ -0,0 +1,421 @@ +package decode + +import ( + "fmt" + "syscall" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" + + "github.com/mdlayher/ethernet" + log "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" +) + +const ( + skbDropReasonSubsystemShift = 16 + skbDropReasonSubSysCore = (0 << skbDropReasonSubsystemShift) + skbDropReasonSubSysOpenVSwitch = (3 << skbDropReasonSubsystemShift) +) + +// Protobuf decodes protobuf flow records definitions, as forwarded by +// ingest.NetObservAgent, into a Generic Map that follows the same naming conventions +// as the IPFIX flows from ingest.IngestCollector +type Protobuf struct { +} + +func NewProtobuf() (*Protobuf, error) { + log.Debugf("entering NewProtobuf") + return &Protobuf{}, nil +} + +// Decode decodes the protobuf raw flows and returns a list of GenericMaps representing all +// the flows there +func (p *Protobuf) Decode(rawFlow []byte) (config.GenericMap, error) { + record := pbflow.Record{} + if err := proto.Unmarshal(rawFlow, &record); err != nil { + return nil, fmt.Errorf("unmarshaling ProtoBuf record: %w", err) + } + return PBFlowToMap(&record), nil +} + +func PBFlowToMap(pb *pbflow.Record) config.GenericMap { + flow := pbflow.PBToFlow(pb) + if flow == nil { + return config.GenericMap{} + } + return RecordToMap(flow) +} + +// RecordToMap converts the flow from Agent inner model into FLP GenericMap model +// nolint:golint,cyclop +func RecordToMap(fr *model.Record) config.GenericMap { + if fr == nil { + return config.GenericMap{} + } + srcMAC := model.MacAddr(fr.Metrics.SrcMac) + dstMAC := model.MacAddr(fr.Metrics.DstMac) + out := config.GenericMap{ + "SrcMac": srcMAC.String(), + "DstMac": dstMAC.String(), + "Etype": fr.Metrics.EthProtocol, + "TimeFlowStartMs": fr.TimeFlowStart.UnixMilli(), + "TimeFlowEndMs": fr.TimeFlowEnd.UnixMilli(), + "TimeReceived": time.Now().Unix(), + "AgentIP": fr.AgentIP.String(), + } + + if fr.Duplicate { + out["Duplicate"] = true + } + + if fr.Metrics.Bytes != 0 { + out["Bytes"] = fr.Metrics.Bytes + } + + if fr.Metrics.Packets != 0 { + out["Packets"] = fr.Metrics.Packets + } + + if fr.Metrics.Sampling != 0 { + out["Sampling"] = fr.Metrics.Sampling + } + var interfaces []string + var directions []int + if len(fr.DupList) != 0 { + for _, m := range fr.DupList { + for key, value := range m { + interfaces = append(interfaces, key) + directions = append(directions, int(model.Direction(value))) + } + } + } else { + interfaces = append(interfaces, fr.Interface) + directions = append(directions, int(fr.ID.Direction)) + } + out["Interfaces"] = interfaces + out["IfDirections"] = directions + + if fr.Metrics.EthProtocol == uint16(ethernet.EtherTypeIPv4) || fr.Metrics.EthProtocol == uint16(ethernet.EtherTypeIPv6) { + out["SrcAddr"] = model.IP(fr.ID.SrcIp).String() + out["DstAddr"] = model.IP(fr.ID.DstIp).String() + out["Proto"] = fr.ID.TransportProtocol + out["Dscp"] = fr.Metrics.Dscp + + if fr.ID.TransportProtocol == syscall.IPPROTO_ICMP || fr.ID.TransportProtocol == syscall.IPPROTO_ICMPV6 { + out["IcmpType"] = fr.ID.IcmpType + out["IcmpCode"] = fr.ID.IcmpCode + } else if fr.ID.TransportProtocol == syscall.IPPROTO_TCP || fr.ID.TransportProtocol == syscall.IPPROTO_UDP || fr.ID.TransportProtocol == syscall.IPPROTO_SCTP { + out["SrcPort"] = fr.ID.SrcPort + out["DstPort"] = fr.ID.DstPort + if fr.ID.TransportProtocol == syscall.IPPROTO_TCP { + out["Flags"] = fr.Metrics.Flags + } + } + } + + if fr.Metrics.AdditionalMetrics != nil { + if fr.Metrics.AdditionalMetrics.DnsRecord.Errno != 0 { + out["DnsErrno"] = fr.Metrics.AdditionalMetrics.DnsRecord.Errno + } + if fr.Metrics.AdditionalMetrics.DnsRecord.Id != 0 { + out["DnsId"] = fr.Metrics.AdditionalMetrics.DnsRecord.Id + out["DnsFlags"] = fr.Metrics.AdditionalMetrics.DnsRecord.Flags + out["DnsFlagsResponseCode"] = DNSRcodeToStr(uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Flags) & 0xF) + out["DnsLatencyMs"] = fr.DNSLatency.Milliseconds() + } + + if fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause != 0 { + out["PktDropBytes"] = fr.Metrics.AdditionalMetrics.PktDrops.Bytes + out["PktDropPackets"] = fr.Metrics.AdditionalMetrics.PktDrops.Packets + out["PktDropLatestFlags"] = fr.Metrics.AdditionalMetrics.PktDrops.LatestFlags + out["PktDropLatestState"] = TCPStateToStr(uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestState)) + out["PktDropLatestDropCause"] = PktDropCauseToStr(fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause) + } + if !model.AllZeroIP(model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr)) && + !model.AllZeroIP(model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr)) { + out["ZoneId"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.ZoneId + out["XlatSrcPort"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.Sport + out["XlatDstPort"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.Dport + out["XlatSrcAddr"] = model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr).String() + out["XlatDstAddr"] = model.IP(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr).String() + out["XlatIcmpId"] = fr.Metrics.AdditionalMetrics.TranslatedFlow.IcmpId + } + } + + if fr.TimeFlowRtt != 0 { + out["TimeFlowRttNs"] = fr.TimeFlowRtt.Nanoseconds() + } + + if len(fr.NetworkMonitorEventsMD) != 0 { + out["NetworkEvents"] = fr.NetworkMonitorEventsMD + } + + return out +} + +// TCPStateToStr is based on kernel TCP state definition +// https://elixir.bootlin.com/linux/v6.3/source/include/net/tcp_states.h#L12 +func TCPStateToStr(state uint32) string { + switch state { + case 1: + return "TCP_ESTABLISHED" + case 2: + return "TCP_SYN_SENT" + case 3: + return "TCP_SYN_RECV" + case 4: + return "TCP_FIN_WAIT1" + case 5: + return "TCP_FIN_WAIT2" + case 6: + return "TCP_CLOSE" + case 7: + return "TCP_CLOSE_WAIT" + case 8: + return "TCP_LAST_ACK" + case 9: + return "TCP_LISTEN" + case 10: + return "TCP_CLOSING" + case 11: + return "TCP_NEW_SYN_RECV" + } + return "TCP_INVALID_STATE" +} + +// PktDropCauseToStr is based on kernel drop cause definition +// https://elixir.bootlin.com/linux/latest/source/include/net/dropreason.h#L88 +// nolint:cyclop +func PktDropCauseToStr(dropCause uint32) string { + switch dropCause { + case skbDropReasonSubSysCore + 2: + return "SKB_DROP_REASON_NOT_SPECIFIED" + case skbDropReasonSubSysCore + 3: + return "SKB_DROP_REASON_NO_SOCKET" + case skbDropReasonSubSysCore + 4: + return "SKB_DROP_REASON_PKT_TOO_SMALL" + case skbDropReasonSubSysCore + 5: + return "SKB_DROP_REASON_TCP_CSUM" + case skbDropReasonSubSysCore + 6: + return "SKB_DROP_REASON_SOCKET_FILTER" + case skbDropReasonSubSysCore + 7: + return "SKB_DROP_REASON_UDP_CSUM" + case skbDropReasonSubSysCore + 8: + return "SKB_DROP_REASON_NETFILTER_DROP" + case skbDropReasonSubSysCore + 9: + return "SKB_DROP_REASON_OTHERHOST" + case skbDropReasonSubSysCore + 10: + return "SKB_DROP_REASON_IP_CSUM" + case skbDropReasonSubSysCore + 11: + return "SKB_DROP_REASON_IP_INHDR" + case skbDropReasonSubSysCore + 12: + return "SKB_DROP_REASON_IP_RPFILTER" + case skbDropReasonSubSysCore + 13: + return "SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST" + case skbDropReasonSubSysCore + 14: + return "SKB_DROP_REASON_XFRM_POLICY" + case skbDropReasonSubSysCore + 15: + return "SKB_DROP_REASON_IP_NOPROTO" + case skbDropReasonSubSysCore + 16: + return "SKB_DROP_REASON_SOCKET_RCVBUFF" + case skbDropReasonSubSysCore + 17: + return "SKB_DROP_REASON_PROTO_MEM" + case skbDropReasonSubSysCore + 18: + return "SKB_DROP_REASON_TCP_MD5NOTFOUND" + case skbDropReasonSubSysCore + 19: + return "SKB_DROP_REASON_TCP_MD5UNEXPECTED" + case skbDropReasonSubSysCore + 20: + return "SKB_DROP_REASON_TCP_MD5FAILURE" + case skbDropReasonSubSysCore + 21: + return "SKB_DROP_REASON_SOCKET_BACKLOG" + case skbDropReasonSubSysCore + 22: + return "SKB_DROP_REASON_TCP_FLAGS" + case skbDropReasonSubSysCore + 23: + return "SKB_DROP_REASON_TCP_ZEROWINDOW" + case skbDropReasonSubSysCore + 24: + return "SKB_DROP_REASON_TCP_OLD_DATA" + case skbDropReasonSubSysCore + 25: + return "SKB_DROP_REASON_TCP_OVERWINDOW" + case skbDropReasonSubSysCore + 26: + return "SKB_DROP_REASON_TCP_OFOMERGE" + case skbDropReasonSubSysCore + 27: + return "SKB_DROP_REASON_TCP_RFC7323_PAWS" + case skbDropReasonSubSysCore + 28: + return "SKB_DROP_REASON_TCP_INVALID_SEQUENCE" + case skbDropReasonSubSysCore + 29: + return "SKB_DROP_REASON_TCP_RESET" + case skbDropReasonSubSysCore + 30: + return "SKB_DROP_REASON_TCP_INVALID_SYN" + case skbDropReasonSubSysCore + 31: + return "SKB_DROP_REASON_TCP_CLOSE" + case skbDropReasonSubSysCore + 32: + return "SKB_DROP_REASON_TCP_FASTOPEN" + case skbDropReasonSubSysCore + 33: + return "SKB_DROP_REASON_TCP_OLD_ACK" + case skbDropReasonSubSysCore + 34: + return "SKB_DROP_REASON_TCP_TOO_OLD_ACK" + case skbDropReasonSubSysCore + 35: + return "SKB_DROP_REASON_TCP_ACK_UNSENT_DATA" + case skbDropReasonSubSysCore + 36: + return "SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE" + case skbDropReasonSubSysCore + 37: + return "SKB_DROP_REASON_TCP_OFO_DROP" + case skbDropReasonSubSysCore + 38: + return "SKB_DROP_REASON_IP_OUTNOROUTES" + case skbDropReasonSubSysCore + 39: + return "SKB_DROP_REASON_BPF_CGROUP_EGRESS" + case skbDropReasonSubSysCore + 40: + return "SKB_DROP_REASON_IPV6DISABLED" + case skbDropReasonSubSysCore + 41: + return "SKB_DROP_REASON_NEIGH_CREATEFAIL" + case skbDropReasonSubSysCore + 42: + return "SKB_DROP_REASON_NEIGH_FAILED" + case skbDropReasonSubSysCore + 43: + return "SKB_DROP_REASON_NEIGH_QUEUEFULL" + case skbDropReasonSubSysCore + 44: + return "SKB_DROP_REASON_NEIGH_DEAD" + case skbDropReasonSubSysCore + 45: + return "SKB_DROP_REASON_TC_EGRESS" + case skbDropReasonSubSysCore + 46: + return "SKB_DROP_REASON_QDISC_DROP" + case skbDropReasonSubSysCore + 47: + return "SKB_DROP_REASON_CPU_BACKLOG" + case skbDropReasonSubSysCore + 48: + return "SKB_DROP_REASON_XDP" + case skbDropReasonSubSysCore + 49: + return "SKB_DROP_REASON_TC_INGRESS" + case skbDropReasonSubSysCore + 50: + return "SKB_DROP_REASON_UNHANDLED_PROTO" + case skbDropReasonSubSysCore + 51: + return "SKB_DROP_REASON_SKB_CSUM" + case skbDropReasonSubSysCore + 52: + return "SKB_DROP_REASON_SKB_GSO_SEG" + case skbDropReasonSubSysCore + 53: + return "SKB_DROP_REASON_SKB_UCOPY_FAULT" + case skbDropReasonSubSysCore + 54: + return "SKB_DROP_REASON_DEV_HDR" + case skbDropReasonSubSysCore + 55: + return "SKB_DROP_REASON_DEV_READY" + case skbDropReasonSubSysCore + 56: + return "SKB_DROP_REASON_FULL_RING" + case skbDropReasonSubSysCore + 57: + return "SKB_DROP_REASON_NOMEM" + case skbDropReasonSubSysCore + 58: + return "SKB_DROP_REASON_HDR_TRUNC" + case skbDropReasonSubSysCore + 59: + return "SKB_DROP_REASON_TAP_FILTER" + case skbDropReasonSubSysCore + 60: + return "SKB_DROP_REASON_TAP_TXFILTER" + case skbDropReasonSubSysCore + 61: + return "SKB_DROP_REASON_ICMP_CSUM" + case skbDropReasonSubSysCore + 62: + return "SKB_DROP_REASON_INVALID_PROTO" + case skbDropReasonSubSysCore + 63: + return "SKB_DROP_REASON_IP_INADDRERRORS" + case skbDropReasonSubSysCore + 64: + return "SKB_DROP_REASON_IP_INNOROUTES" + case skbDropReasonSubSysCore + 65: + return "SKB_DROP_REASON_PKT_TOO_BIG" + case skbDropReasonSubSysCore + 66: + return "SKB_DROP_REASON_DUP_FRAG" + case skbDropReasonSubSysCore + 67: + return "SKB_DROP_REASON_FRAG_REASM_TIMEOUT" + case skbDropReasonSubSysCore + 68: + return "SKB_DROP_REASON_FRAG_TOO_FAR" + case skbDropReasonSubSysCore + 69: + return "SKB_DROP_REASON_TCP_MINTTL" + case skbDropReasonSubSysCore + 70: + return "SKB_DROP_REASON_IPV6_BAD_EXTHDR" + case skbDropReasonSubSysCore + 71: + return "SKB_DROP_REASON_IPV6_NDISC_FRAG" + case skbDropReasonSubSysCore + 72: + return "SKB_DROP_REASON_IPV6_NDISC_HOP_LIMIT" + case skbDropReasonSubSysCore + 73: + return "SKB_DROP_REASON_IPV6_NDISC_BAD_CODE" + case skbDropReasonSubSysCore + 74: + return "SKB_DROP_REASON_IPV6_NDISC_BAD_OPTIONS" + case skbDropReasonSubSysCore + 75: + return "SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST" + case skbDropReasonSubSysCore + 76: + return "SKB_DROP_REASON_QUEUE_PURGE" + case skbDropReasonSubSysCore + 77: + return "SKB_DROP_REASON_TC_COOKIE_ERROR" + case skbDropReasonSubSysCore + 78: + return "SKB_DROP_REASON_PACKET_SOCK_ERROR" + case skbDropReasonSubSysCore + 79: + return "SKB_DROP_REASON_TC_CHAIN_NOTFOUND" + case skbDropReasonSubSysCore + 80: + return "SKB_DROP_REASON_TC_RECLASSIFY_LOOP" + + // ovs drop causes + // https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/tree/net/openvswitch/drop.h + case skbDropReasonSubSysOpenVSwitch + 1: + return "OVS_DROP_LAST_ACTION" + case skbDropReasonSubSysOpenVSwitch + 2: + return "OVS_DROP_ACTION_ERROR" + case skbDropReasonSubSysOpenVSwitch + 3: + return "OVS_DROP_EXPLICIT" + case skbDropReasonSubSysOpenVSwitch + 4: + return "OVS_DROP_EXPLICIT_WITH_ERROR" + case skbDropReasonSubSysOpenVSwitch + 5: + return "OVS_DROP_METER" + case skbDropReasonSubSysOpenVSwitch + 6: + return "OVS_DROP_RECURSION_LIMIT" + case skbDropReasonSubSysOpenVSwitch + 7: + return "OVS_DROP_DEFERRED_LIMIT" + case skbDropReasonSubSysOpenVSwitch + 8: + return "OVS_DROP_FRAG_L2_TOO_LONG" + case skbDropReasonSubSysOpenVSwitch + 9: + return "OVS_DROP_FRAG_INVALID_PROTO" + case skbDropReasonSubSysOpenVSwitch + 10: + return "OVS_DROP_CONNTRACK" + case skbDropReasonSubSysOpenVSwitch + 11: + return "OVS_DROP_IP_TTL" + } + return "SKB_DROP_UNKNOWN_CAUSE" +} + +// DNSRcodeToStr decode DNS flags response code bits and return a string +// https://datatracker.ietf.org/doc/html/rfc2929#section-2.3 +func DNSRcodeToStr(rcode uint32) string { + switch rcode { + case 0: + return "NoError" + case 1: + return "FormErr" + case 2: + return "ServFail" + case 3: + return "NXDomain" + case 4: + return "NotImp" + case 5: + return "Refused" + case 6: + return "YXDomain" + case 7: + return "YXRRSet" + case 8: + return "NXRRSet" + case 9: + return "NotAuth" + case 10: + return "NotZone" + case 16: + return "BADVERS" + case 17: + return "BADKEY" + case 18: + return "BADTIME" + case 19: + return "BADMODE" + case 20: + return "BADNAME" + case 21: + return "BADALG" + } + return "UnDefined" +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go new file mode 100644 index 000000000..ee8be8cdf --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets/decode_protobuf_packets.go @@ -0,0 +1,76 @@ +package packets + +import ( + "encoding/base64" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + + "github.com/gopacket/gopacket" + "github.com/gopacket/gopacket/layers" +) + +func PacketToMap(pr *model.PacketRecord) config.GenericMap { + out := config.GenericMap{} + + if pr == nil { + return out + } + + packet := gopacket.NewPacket(pr.Stream, layers.LayerTypeEthernet, gopacket.Lazy) + if ethLayer := packet.Layer(layers.LayerTypeEthernet); ethLayer != nil { + eth, _ := ethLayer.(*layers.Ethernet) + out["SrcMac"] = eth.SrcMAC.String() + out["DstMac"] = eth.DstMAC.String() + } + + if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil { + tcp, _ := tcpLayer.(*layers.TCP) + out["SrcPort"] = tcp.SrcPort.String() + out["DstPort"] = tcp.DstPort.String() + } else if udpLayer := packet.Layer(layers.LayerTypeUDP); udpLayer != nil { + udp, _ := udpLayer.(*layers.UDP) + out["SrcPort"] = udp.SrcPort.String() + out["DstPort"] = udp.DstPort.String() + } else if sctpLayer := packet.Layer(layers.LayerTypeSCTP); sctpLayer != nil { + sctp, _ := sctpLayer.(*layers.SCTP) + out["SrcPort"] = sctp.SrcPort.String() + out["DstPort"] = sctp.DstPort.String() + } + + if ipv4Layer := packet.Layer(layers.LayerTypeIPv4); ipv4Layer != nil { + ipv4, _ := ipv4Layer.(*layers.IPv4) + out["SrcAddr"] = ipv4.SrcIP.String() + out["DstAddr"] = ipv4.DstIP.String() + out["Proto"] = ipv4.Protocol + } else if ipv6Layer := packet.Layer(layers.LayerTypeIPv6); ipv6Layer != nil { + ipv6, _ := ipv6Layer.(*layers.IPv6) + out["SrcAddr"] = ipv6.SrcIP.String() + out["DstAddr"] = ipv6.DstIP.String() + out["Proto"] = ipv6.NextHeader + } + + if icmpv4Layer := packet.Layer(layers.LayerTypeICMPv4); icmpv4Layer != nil { + icmpv4, _ := icmpv4Layer.(*layers.ICMPv4) + out["IcmpType"] = icmpv4.TypeCode.Type() + out["IcmpCode"] = icmpv4.TypeCode.Code() + } else if icmpv6Layer := packet.Layer(layers.LayerTypeICMPv6); icmpv6Layer != nil { + icmpv6, _ := icmpv6Layer.(*layers.ICMPv6) + out["IcmpType"] = icmpv6.TypeCode.Type() + out["IcmpCode"] = icmpv6.TypeCode.Code() + } + + if dnsLayer := packet.Layer(layers.LayerTypeDNS); dnsLayer != nil { + dns, _ := dnsLayer.(*layers.DNS) + out["DnsId"] = dns.ID + out["DnsFlagsResponseCode"] = dns.ResponseCode.String() + //TODO: add DNS questions / answers / authorities + } + + out["Bytes"] = len(pr.Stream) + // Data is base64 encoded to avoid marshal / unmarshal issues + out["Data"] = base64.StdEncoding.EncodeToString(packet.Data()) + out["Time"] = pr.Time.Unix() + + return out +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go new file mode 100644 index 000000000..f12dda23f --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.go @@ -0,0 +1,338 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build arm64 + +package ebpf + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type BpfAdditionalMetrics struct { + DnsRecord BpfDnsRecordT + PktDrops BpfPktDropsT + FlowRtt uint64 + NetworkEventsIdx uint8 + NetworkEvents [4][8]uint8 + _ [1]byte + TranslatedFlow BpfTranslatedFlowT + _ [6]byte +} + +type BpfDirectionT uint32 + +const ( + BpfDirectionTINGRESS BpfDirectionT = 0 + BpfDirectionTEGRESS BpfDirectionT = 1 + BpfDirectionTMAX_DIRECTION BpfDirectionT = 2 +) + +type BpfDnsFlowId struct { + SrcPort uint16 + DstPort uint16 + SrcIp [16]uint8 + DstIp [16]uint8 + Id uint16 + Protocol uint8 + _ [1]byte +} + +type BpfDnsRecordT struct { + Id uint16 + Flags uint16 + _ [4]byte + Latency uint64 + Errno uint8 + _ [7]byte +} + +type BpfFilterActionT uint32 + +const ( + BpfFilterActionTACCEPT BpfFilterActionT = 0 + BpfFilterActionTREJECT BpfFilterActionT = 1 + BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2 +) + +type BpfFilterKeyT struct { + PrefixLen uint32 + IpData [16]uint8 +} + +type BpfFilterValueT struct { + Protocol uint8 + DstPortStart uint16 + DstPortEnd uint16 + DstPort1 uint16 + DstPort2 uint16 + SrcPortStart uint16 + SrcPortEnd uint16 + SrcPort1 uint16 + SrcPort2 uint16 + PortStart uint16 + PortEnd uint16 + Port1 uint16 + Port2 uint16 + IcmpType uint8 + IcmpCode uint8 + Direction BpfDirectionT + Action BpfFilterActionT + TcpFlags BpfTcpFlagsT + FilterDrops uint8 + Sample uint32 + Ip [16]uint8 +} + +type BpfFlowId BpfFlowIdT + +type BpfFlowIdT struct { + Direction uint8 + SrcIp [16]uint8 + DstIp [16]uint8 + _ [1]byte + SrcPort uint16 + DstPort uint16 + TransportProtocol uint8 + IcmpType uint8 + IcmpCode uint8 + _ [3]byte + IfIndex uint32 +} + +type BpfFlowMetrics BpfFlowMetricsT + +type BpfFlowMetricsT struct { + Lock struct{ Val uint32 } + EthProtocol uint16 + SrcMac [6]uint8 + DstMac [6]uint8 + _ [2]byte + Packets uint32 + Bytes uint64 + StartMonoTimeTs uint64 + EndMonoTimeTs uint64 + Flags uint16 + Errno uint8 + Dscp uint8 + Sampling uint32 +} + +type BpfFlowRecordT struct { + Id BpfFlowId + Metrics BpfFlowMetrics +} + +type BpfGlobalCountersKeyT uint32 + +const ( + BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0 + BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1 + BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2 + BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3 + BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7 + BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8 + BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9 +) + +type BpfPktDropsT struct { + Packets uint32 + _ [4]byte + Bytes uint64 + LatestFlags uint16 + LatestState uint8 + _ [1]byte + LatestDropCause uint32 +} + +type BpfTcpFlagsT uint32 + +const ( + BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1 + BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2 + BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4 + BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8 + BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16 + BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32 + BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64 + BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128 + BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256 + BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512 + BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024 +) + +type BpfTranslatedFlowT struct { + Saddr [16]uint8 + Daddr [16]uint8 + Sport uint16 + Dport uint16 + ZoneId uint16 + IcmpId uint8 + _ [1]byte +} + +// LoadBpf returns the embedded CollectionSpec for Bpf. +func LoadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load Bpf: %w", err) + } + + return spec, err +} + +// LoadBpfObjects loads Bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *BpfObjects +// *BpfPrograms +// *BpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := LoadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// BpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfSpecs struct { + BpfProgramSpecs + BpfMapSpecs +} + +// BpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfProgramSpecs struct { + KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"` +} + +// BpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfMapSpecs struct { + AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"` + DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"` + DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"` + FilterMap *ebpf.MapSpec `ebpf:"filter_map"` + GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"` + PacketRecord *ebpf.MapSpec `ebpf:"packet_record"` +} + +// BpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfObjects struct { + BpfPrograms + BpfMaps +} + +func (o *BpfObjects) Close() error { + return _BpfClose( + &o.BpfPrograms, + &o.BpfMaps, + ) +} + +// BpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfMaps struct { + AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"` + DirectFlows *ebpf.Map `ebpf:"direct_flows"` + DnsFlows *ebpf.Map `ebpf:"dns_flows"` + FilterMap *ebpf.Map `ebpf:"filter_map"` + GlobalCounters *ebpf.Map `ebpf:"global_counters"` + PacketRecord *ebpf.Map `ebpf:"packet_record"` +} + +func (m *BpfMaps) Close() error { + return _BpfClose( + m.AdditionalFlowMetrics, + m.AggregatedFlows, + m.DirectFlows, + m.DnsFlows, + m.FilterMap, + m.GlobalCounters, + m.PacketRecord, + ) +} + +// BpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfPrograms struct { + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"` +} + +func (p *BpfPrograms) Close() error { + return _BpfClose( + p.KfreeSkb, + p.RhNetworkEventsMonitoring, + p.TcEgressFlowParse, + p.TcEgressPcaParse, + p.TcIngressFlowParse, + p.TcIngressPcaParse, + p.TcpRcvFentry, + p.TcpRcvKprobe, + p.TcxEgressFlowParse, + p.TcxEgressPcaParse, + p.TcxIngressFlowParse, + p.TcxIngressPcaParse, + p.TrackNatManipPkt, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_arm64_bpfel.o +var _BpfBytes []byte diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o new file mode 100644 index 000000000..240486c52 Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_arm64_bpfel.o differ diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go new file mode 100644 index 000000000..a8b825750 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.go @@ -0,0 +1,338 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build ppc64le + +package ebpf + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type BpfAdditionalMetrics struct { + DnsRecord BpfDnsRecordT + PktDrops BpfPktDropsT + FlowRtt uint64 + NetworkEventsIdx uint8 + NetworkEvents [4][8]uint8 + _ [1]byte + TranslatedFlow BpfTranslatedFlowT + _ [6]byte +} + +type BpfDirectionT uint32 + +const ( + BpfDirectionTINGRESS BpfDirectionT = 0 + BpfDirectionTEGRESS BpfDirectionT = 1 + BpfDirectionTMAX_DIRECTION BpfDirectionT = 2 +) + +type BpfDnsFlowId struct { + SrcPort uint16 + DstPort uint16 + SrcIp [16]uint8 + DstIp [16]uint8 + Id uint16 + Protocol uint8 + _ [1]byte +} + +type BpfDnsRecordT struct { + Id uint16 + Flags uint16 + _ [4]byte + Latency uint64 + Errno uint8 + _ [7]byte +} + +type BpfFilterActionT uint32 + +const ( + BpfFilterActionTACCEPT BpfFilterActionT = 0 + BpfFilterActionTREJECT BpfFilterActionT = 1 + BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2 +) + +type BpfFilterKeyT struct { + PrefixLen uint32 + IpData [16]uint8 +} + +type BpfFilterValueT struct { + Protocol uint8 + DstPortStart uint16 + DstPortEnd uint16 + DstPort1 uint16 + DstPort2 uint16 + SrcPortStart uint16 + SrcPortEnd uint16 + SrcPort1 uint16 + SrcPort2 uint16 + PortStart uint16 + PortEnd uint16 + Port1 uint16 + Port2 uint16 + IcmpType uint8 + IcmpCode uint8 + Direction BpfDirectionT + Action BpfFilterActionT + TcpFlags BpfTcpFlagsT + FilterDrops uint8 + Sample uint32 + Ip [16]uint8 +} + +type BpfFlowId BpfFlowIdT + +type BpfFlowIdT struct { + Direction uint8 + SrcIp [16]uint8 + DstIp [16]uint8 + _ [1]byte + SrcPort uint16 + DstPort uint16 + TransportProtocol uint8 + IcmpType uint8 + IcmpCode uint8 + _ [3]byte + IfIndex uint32 +} + +type BpfFlowMetrics BpfFlowMetricsT + +type BpfFlowMetricsT struct { + Lock struct{ Val uint32 } + EthProtocol uint16 + SrcMac [6]uint8 + DstMac [6]uint8 + _ [2]byte + Packets uint32 + Bytes uint64 + StartMonoTimeTs uint64 + EndMonoTimeTs uint64 + Flags uint16 + Errno uint8 + Dscp uint8 + Sampling uint32 +} + +type BpfFlowRecordT struct { + Id BpfFlowId + Metrics BpfFlowMetrics +} + +type BpfGlobalCountersKeyT uint32 + +const ( + BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0 + BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1 + BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2 + BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3 + BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7 + BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8 + BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9 +) + +type BpfPktDropsT struct { + Packets uint32 + _ [4]byte + Bytes uint64 + LatestFlags uint16 + LatestState uint8 + _ [1]byte + LatestDropCause uint32 +} + +type BpfTcpFlagsT uint32 + +const ( + BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1 + BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2 + BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4 + BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8 + BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16 + BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32 + BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64 + BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128 + BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256 + BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512 + BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024 +) + +type BpfTranslatedFlowT struct { + Saddr [16]uint8 + Daddr [16]uint8 + Sport uint16 + Dport uint16 + ZoneId uint16 + IcmpId uint8 + _ [1]byte +} + +// LoadBpf returns the embedded CollectionSpec for Bpf. +func LoadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load Bpf: %w", err) + } + + return spec, err +} + +// LoadBpfObjects loads Bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *BpfObjects +// *BpfPrograms +// *BpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := LoadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// BpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfSpecs struct { + BpfProgramSpecs + BpfMapSpecs +} + +// BpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfProgramSpecs struct { + KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"` +} + +// BpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfMapSpecs struct { + AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"` + DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"` + DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"` + FilterMap *ebpf.MapSpec `ebpf:"filter_map"` + GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"` + PacketRecord *ebpf.MapSpec `ebpf:"packet_record"` +} + +// BpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfObjects struct { + BpfPrograms + BpfMaps +} + +func (o *BpfObjects) Close() error { + return _BpfClose( + &o.BpfPrograms, + &o.BpfMaps, + ) +} + +// BpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfMaps struct { + AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"` + DirectFlows *ebpf.Map `ebpf:"direct_flows"` + DnsFlows *ebpf.Map `ebpf:"dns_flows"` + FilterMap *ebpf.Map `ebpf:"filter_map"` + GlobalCounters *ebpf.Map `ebpf:"global_counters"` + PacketRecord *ebpf.Map `ebpf:"packet_record"` +} + +func (m *BpfMaps) Close() error { + return _BpfClose( + m.AdditionalFlowMetrics, + m.AggregatedFlows, + m.DirectFlows, + m.DnsFlows, + m.FilterMap, + m.GlobalCounters, + m.PacketRecord, + ) +} + +// BpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfPrograms struct { + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"` +} + +func (p *BpfPrograms) Close() error { + return _BpfClose( + p.KfreeSkb, + p.RhNetworkEventsMonitoring, + p.TcEgressFlowParse, + p.TcEgressPcaParse, + p.TcIngressFlowParse, + p.TcIngressPcaParse, + p.TcpRcvFentry, + p.TcpRcvKprobe, + p.TcxEgressFlowParse, + p.TcxEgressPcaParse, + p.TcxIngressFlowParse, + p.TcxIngressPcaParse, + p.TrackNatManipPkt, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_powerpc_bpfel.o +var _BpfBytes []byte diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o new file mode 100644 index 000000000..fae0677f0 Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_powerpc_bpfel.o differ diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go new file mode 100644 index 000000000..3372ed20b --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.go @@ -0,0 +1,338 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build s390x + +package ebpf + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type BpfAdditionalMetrics struct { + DnsRecord BpfDnsRecordT + PktDrops BpfPktDropsT + FlowRtt uint64 + NetworkEventsIdx uint8 + NetworkEvents [4][8]uint8 + _ [1]byte + TranslatedFlow BpfTranslatedFlowT + _ [6]byte +} + +type BpfDirectionT uint32 + +const ( + BpfDirectionTINGRESS BpfDirectionT = 0 + BpfDirectionTEGRESS BpfDirectionT = 1 + BpfDirectionTMAX_DIRECTION BpfDirectionT = 2 +) + +type BpfDnsFlowId struct { + SrcPort uint16 + DstPort uint16 + SrcIp [16]uint8 + DstIp [16]uint8 + Id uint16 + Protocol uint8 + _ [1]byte +} + +type BpfDnsRecordT struct { + Id uint16 + Flags uint16 + _ [4]byte + Latency uint64 + Errno uint8 + _ [7]byte +} + +type BpfFilterActionT uint32 + +const ( + BpfFilterActionTACCEPT BpfFilterActionT = 0 + BpfFilterActionTREJECT BpfFilterActionT = 1 + BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2 +) + +type BpfFilterKeyT struct { + PrefixLen uint32 + IpData [16]uint8 +} + +type BpfFilterValueT struct { + Protocol uint8 + DstPortStart uint16 + DstPortEnd uint16 + DstPort1 uint16 + DstPort2 uint16 + SrcPortStart uint16 + SrcPortEnd uint16 + SrcPort1 uint16 + SrcPort2 uint16 + PortStart uint16 + PortEnd uint16 + Port1 uint16 + Port2 uint16 + IcmpType uint8 + IcmpCode uint8 + Direction BpfDirectionT + Action BpfFilterActionT + TcpFlags BpfTcpFlagsT + FilterDrops uint8 + Sample uint32 + Ip [16]uint8 +} + +type BpfFlowId BpfFlowIdT + +type BpfFlowIdT struct { + Direction uint8 + SrcIp [16]uint8 + DstIp [16]uint8 + _ [1]byte + SrcPort uint16 + DstPort uint16 + TransportProtocol uint8 + IcmpType uint8 + IcmpCode uint8 + _ [3]byte + IfIndex uint32 +} + +type BpfFlowMetrics BpfFlowMetricsT + +type BpfFlowMetricsT struct { + Lock struct{ Val uint32 } + EthProtocol uint16 + SrcMac [6]uint8 + DstMac [6]uint8 + _ [2]byte + Packets uint32 + Bytes uint64 + StartMonoTimeTs uint64 + EndMonoTimeTs uint64 + Flags uint16 + Errno uint8 + Dscp uint8 + Sampling uint32 +} + +type BpfFlowRecordT struct { + Id BpfFlowId + Metrics BpfFlowMetrics +} + +type BpfGlobalCountersKeyT uint32 + +const ( + BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0 + BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1 + BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2 + BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3 + BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7 + BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8 + BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9 +) + +type BpfPktDropsT struct { + Packets uint32 + _ [4]byte + Bytes uint64 + LatestFlags uint16 + LatestState uint8 + _ [1]byte + LatestDropCause uint32 +} + +type BpfTcpFlagsT uint32 + +const ( + BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1 + BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2 + BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4 + BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8 + BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16 + BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32 + BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64 + BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128 + BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256 + BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512 + BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024 +) + +type BpfTranslatedFlowT struct { + Saddr [16]uint8 + Daddr [16]uint8 + Sport uint16 + Dport uint16 + ZoneId uint16 + IcmpId uint8 + _ [1]byte +} + +// LoadBpf returns the embedded CollectionSpec for Bpf. +func LoadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load Bpf: %w", err) + } + + return spec, err +} + +// LoadBpfObjects loads Bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *BpfObjects +// *BpfPrograms +// *BpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := LoadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// BpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfSpecs struct { + BpfProgramSpecs + BpfMapSpecs +} + +// BpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfProgramSpecs struct { + KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"` +} + +// BpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfMapSpecs struct { + AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"` + DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"` + DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"` + FilterMap *ebpf.MapSpec `ebpf:"filter_map"` + GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"` + PacketRecord *ebpf.MapSpec `ebpf:"packet_record"` +} + +// BpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfObjects struct { + BpfPrograms + BpfMaps +} + +func (o *BpfObjects) Close() error { + return _BpfClose( + &o.BpfPrograms, + &o.BpfMaps, + ) +} + +// BpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfMaps struct { + AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"` + DirectFlows *ebpf.Map `ebpf:"direct_flows"` + DnsFlows *ebpf.Map `ebpf:"dns_flows"` + FilterMap *ebpf.Map `ebpf:"filter_map"` + GlobalCounters *ebpf.Map `ebpf:"global_counters"` + PacketRecord *ebpf.Map `ebpf:"packet_record"` +} + +func (m *BpfMaps) Close() error { + return _BpfClose( + m.AdditionalFlowMetrics, + m.AggregatedFlows, + m.DirectFlows, + m.DnsFlows, + m.FilterMap, + m.GlobalCounters, + m.PacketRecord, + ) +} + +// BpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfPrograms struct { + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"` +} + +func (p *BpfPrograms) Close() error { + return _BpfClose( + p.KfreeSkb, + p.RhNetworkEventsMonitoring, + p.TcEgressFlowParse, + p.TcEgressPcaParse, + p.TcIngressFlowParse, + p.TcIngressPcaParse, + p.TcpRcvFentry, + p.TcpRcvKprobe, + p.TcxEgressFlowParse, + p.TcxEgressPcaParse, + p.TcxIngressFlowParse, + p.TcxIngressPcaParse, + p.TrackNatManipPkt, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_s390_bpfeb.o +var _BpfBytes []byte diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o new file mode 100644 index 000000000..aae1b0003 Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_s390_bpfeb.o differ diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go new file mode 100644 index 000000000..a5839acdd --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.go @@ -0,0 +1,338 @@ +// Code generated by bpf2go; DO NOT EDIT. +//go:build 386 || amd64 + +package ebpf + +import ( + "bytes" + _ "embed" + "fmt" + "io" + + "github.com/cilium/ebpf" +) + +type BpfAdditionalMetrics struct { + DnsRecord BpfDnsRecordT + PktDrops BpfPktDropsT + FlowRtt uint64 + NetworkEventsIdx uint8 + NetworkEvents [4][8]uint8 + _ [1]byte + TranslatedFlow BpfTranslatedFlowT + _ [6]byte +} + +type BpfDirectionT uint32 + +const ( + BpfDirectionTINGRESS BpfDirectionT = 0 + BpfDirectionTEGRESS BpfDirectionT = 1 + BpfDirectionTMAX_DIRECTION BpfDirectionT = 2 +) + +type BpfDnsFlowId struct { + SrcPort uint16 + DstPort uint16 + SrcIp [16]uint8 + DstIp [16]uint8 + Id uint16 + Protocol uint8 + _ [1]byte +} + +type BpfDnsRecordT struct { + Id uint16 + Flags uint16 + _ [4]byte + Latency uint64 + Errno uint8 + _ [7]byte +} + +type BpfFilterActionT uint32 + +const ( + BpfFilterActionTACCEPT BpfFilterActionT = 0 + BpfFilterActionTREJECT BpfFilterActionT = 1 + BpfFilterActionTMAX_FILTER_ACTIONS BpfFilterActionT = 2 +) + +type BpfFilterKeyT struct { + PrefixLen uint32 + IpData [16]uint8 +} + +type BpfFilterValueT struct { + Protocol uint8 + DstPortStart uint16 + DstPortEnd uint16 + DstPort1 uint16 + DstPort2 uint16 + SrcPortStart uint16 + SrcPortEnd uint16 + SrcPort1 uint16 + SrcPort2 uint16 + PortStart uint16 + PortEnd uint16 + Port1 uint16 + Port2 uint16 + IcmpType uint8 + IcmpCode uint8 + Direction BpfDirectionT + Action BpfFilterActionT + TcpFlags BpfTcpFlagsT + FilterDrops uint8 + Sample uint32 + Ip [16]uint8 +} + +type BpfFlowId BpfFlowIdT + +type BpfFlowIdT struct { + Direction uint8 + SrcIp [16]uint8 + DstIp [16]uint8 + _ [1]byte + SrcPort uint16 + DstPort uint16 + TransportProtocol uint8 + IcmpType uint8 + IcmpCode uint8 + _ [3]byte + IfIndex uint32 +} + +type BpfFlowMetrics BpfFlowMetricsT + +type BpfFlowMetricsT struct { + Lock struct{ Val uint32 } + EthProtocol uint16 + SrcMac [6]uint8 + DstMac [6]uint8 + _ [2]byte + Packets uint32 + Bytes uint64 + StartMonoTimeTs uint64 + EndMonoTimeTs uint64 + Flags uint16 + Errno uint8 + Dscp uint8 + Sampling uint32 +} + +type BpfFlowRecordT struct { + Id BpfFlowId + Metrics BpfFlowMetrics +} + +type BpfGlobalCountersKeyT uint32 + +const ( + BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED BpfGlobalCountersKeyT = 0 + BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS BpfGlobalCountersKeyT = 1 + BpfGlobalCountersKeyTFILTER_REJECT BpfGlobalCountersKeyT = 2 + BpfGlobalCountersKeyTFILTER_ACCEPT BpfGlobalCountersKeyT = 3 + BpfGlobalCountersKeyTFILTER_NOMATCH BpfGlobalCountersKeyT = 4 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR BpfGlobalCountersKeyT = 5 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH BpfGlobalCountersKeyT = 6 + BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS BpfGlobalCountersKeyT = 7 + BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD BpfGlobalCountersKeyT = 8 + BpfGlobalCountersKeyTMAX_COUNTERS BpfGlobalCountersKeyT = 9 +) + +type BpfPktDropsT struct { + Packets uint32 + _ [4]byte + Bytes uint64 + LatestFlags uint16 + LatestState uint8 + _ [1]byte + LatestDropCause uint32 +} + +type BpfTcpFlagsT uint32 + +const ( + BpfTcpFlagsTFIN_FLAG BpfTcpFlagsT = 1 + BpfTcpFlagsTSYN_FLAG BpfTcpFlagsT = 2 + BpfTcpFlagsTRST_FLAG BpfTcpFlagsT = 4 + BpfTcpFlagsTPSH_FLAG BpfTcpFlagsT = 8 + BpfTcpFlagsTACK_FLAG BpfTcpFlagsT = 16 + BpfTcpFlagsTURG_FLAG BpfTcpFlagsT = 32 + BpfTcpFlagsTECE_FLAG BpfTcpFlagsT = 64 + BpfTcpFlagsTCWR_FLAG BpfTcpFlagsT = 128 + BpfTcpFlagsTSYN_ACK_FLAG BpfTcpFlagsT = 256 + BpfTcpFlagsTFIN_ACK_FLAG BpfTcpFlagsT = 512 + BpfTcpFlagsTRST_ACK_FLAG BpfTcpFlagsT = 1024 +) + +type BpfTranslatedFlowT struct { + Saddr [16]uint8 + Daddr [16]uint8 + Sport uint16 + Dport uint16 + ZoneId uint16 + IcmpId uint8 + _ [1]byte +} + +// LoadBpf returns the embedded CollectionSpec for Bpf. +func LoadBpf() (*ebpf.CollectionSpec, error) { + reader := bytes.NewReader(_BpfBytes) + spec, err := ebpf.LoadCollectionSpecFromReader(reader) + if err != nil { + return nil, fmt.Errorf("can't load Bpf: %w", err) + } + + return spec, err +} + +// LoadBpfObjects loads Bpf and converts it into a struct. +// +// The following types are suitable as obj argument: +// +// *BpfObjects +// *BpfPrograms +// *BpfMaps +// +// See ebpf.CollectionSpec.LoadAndAssign documentation for details. +func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { + spec, err := LoadBpf() + if err != nil { + return err + } + + return spec.LoadAndAssign(obj, opts) +} + +// BpfSpecs contains maps and programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfSpecs struct { + BpfProgramSpecs + BpfMapSpecs +} + +// BpfSpecs contains programs before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfProgramSpecs struct { + KfreeSkb *ebpf.ProgramSpec `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.ProgramSpec `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.ProgramSpec `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.ProgramSpec `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.ProgramSpec `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.ProgramSpec `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.ProgramSpec `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.ProgramSpec `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.ProgramSpec `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.ProgramSpec `ebpf:"track_nat_manip_pkt"` +} + +// BpfMapSpecs contains maps before they are loaded into the kernel. +// +// It can be passed ebpf.CollectionSpec.Assign. +type BpfMapSpecs struct { + AdditionalFlowMetrics *ebpf.MapSpec `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.MapSpec `ebpf:"aggregated_flows"` + DirectFlows *ebpf.MapSpec `ebpf:"direct_flows"` + DnsFlows *ebpf.MapSpec `ebpf:"dns_flows"` + FilterMap *ebpf.MapSpec `ebpf:"filter_map"` + GlobalCounters *ebpf.MapSpec `ebpf:"global_counters"` + PacketRecord *ebpf.MapSpec `ebpf:"packet_record"` +} + +// BpfObjects contains all objects after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfObjects struct { + BpfPrograms + BpfMaps +} + +func (o *BpfObjects) Close() error { + return _BpfClose( + &o.BpfPrograms, + &o.BpfMaps, + ) +} + +// BpfMaps contains all maps after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfMaps struct { + AdditionalFlowMetrics *ebpf.Map `ebpf:"additional_flow_metrics"` + AggregatedFlows *ebpf.Map `ebpf:"aggregated_flows"` + DirectFlows *ebpf.Map `ebpf:"direct_flows"` + DnsFlows *ebpf.Map `ebpf:"dns_flows"` + FilterMap *ebpf.Map `ebpf:"filter_map"` + GlobalCounters *ebpf.Map `ebpf:"global_counters"` + PacketRecord *ebpf.Map `ebpf:"packet_record"` +} + +func (m *BpfMaps) Close() error { + return _BpfClose( + m.AdditionalFlowMetrics, + m.AggregatedFlows, + m.DirectFlows, + m.DnsFlows, + m.FilterMap, + m.GlobalCounters, + m.PacketRecord, + ) +} + +// BpfPrograms contains all programs after they have been loaded into the kernel. +// +// It can be passed to LoadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. +type BpfPrograms struct { + KfreeSkb *ebpf.Program `ebpf:"kfree_skb"` + RhNetworkEventsMonitoring *ebpf.Program `ebpf:"rh_network_events_monitoring"` + TcEgressFlowParse *ebpf.Program `ebpf:"tc_egress_flow_parse"` + TcEgressPcaParse *ebpf.Program `ebpf:"tc_egress_pca_parse"` + TcIngressFlowParse *ebpf.Program `ebpf:"tc_ingress_flow_parse"` + TcIngressPcaParse *ebpf.Program `ebpf:"tc_ingress_pca_parse"` + TcpRcvFentry *ebpf.Program `ebpf:"tcp_rcv_fentry"` + TcpRcvKprobe *ebpf.Program `ebpf:"tcp_rcv_kprobe"` + TcxEgressFlowParse *ebpf.Program `ebpf:"tcx_egress_flow_parse"` + TcxEgressPcaParse *ebpf.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressFlowParse *ebpf.Program `ebpf:"tcx_ingress_flow_parse"` + TcxIngressPcaParse *ebpf.Program `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *ebpf.Program `ebpf:"track_nat_manip_pkt"` +} + +func (p *BpfPrograms) Close() error { + return _BpfClose( + p.KfreeSkb, + p.RhNetworkEventsMonitoring, + p.TcEgressFlowParse, + p.TcEgressPcaParse, + p.TcIngressFlowParse, + p.TcIngressPcaParse, + p.TcpRcvFentry, + p.TcpRcvKprobe, + p.TcxEgressFlowParse, + p.TcxEgressPcaParse, + p.TcxIngressFlowParse, + p.TcxIngressPcaParse, + p.TrackNatManipPkt, + ) +} + +func _BpfClose(closers ...io.Closer) error { + for _, closer := range closers { + if err := closer.Close(); err != nil { + return err + } + } + return nil +} + +// Do not access this directly. +// +//go:embed bpf_x86_bpfel.o +var _BpfBytes []byte diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o new file mode 100644 index 000000000..9399eaff3 Binary files /dev/null and b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/bpf_x86_bpfel.o differ diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go new file mode 100644 index 000000000..3ec45b49c --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf/gen.go @@ -0,0 +1,4 @@ +package ebpf + +// $BPF_CLANG and $BPF_CFLAGS are set by the Makefile. +//go:generate bpf2go -cc $BPF_CLANG -cflags $BPF_CFLAGS -target amd64,arm64,ppc64le,s390x -type flow_metrics_t -type flow_id_t -type flow_record_t -type pkt_drops_t -type dns_record_t -type global_counters_key_t -type direction_t -type filter_action_t -type tcp_flags_t -type translated_flow_t Bpf ../../bpf/flows.c -- -I../../bpf/headers diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go new file mode 100644 index 000000000..3f8822b23 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/direct_flp.go @@ -0,0 +1,63 @@ +package exporter + +import ( + "fmt" + + flpconfig "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/flowlogs-pipeline/pkg/pipeline" + "github.com/netobserv/netobserv-ebpf-agent/pkg/decode" + "github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "gopkg.in/yaml.v2" +) + +// DirectFLP flow exporter +type DirectFLP struct { + fwd chan flpconfig.GenericMap +} + +func StartDirectFLP(jsonConfig string, bufLen int) (*DirectFLP, error) { + var cfg flpconfig.ConfigFileStruct + // Note that, despite jsonConfig being json, we use yaml unmarshaler because the json one + // is screwed up for HTTPClientConfig in github.com/prometheus/common/config (used for Loki) + // This is ok as YAML is a superset of JSON. + // E.g. try unmarshaling `{"clientConfig":{"proxy_url":null}}` as a api.WriteLoki + // See also https://github.com/prometheus/prometheus/issues/11816 + if err := yaml.Unmarshal([]byte(jsonConfig), &cfg); err != nil { + return nil, fmt.Errorf("failed to read config: %w", err) + } + + fwd := make(chan flpconfig.GenericMap, bufLen) + err := pipeline.StartFLPInProcess(&cfg, fwd) + if err != nil { + return nil, fmt.Errorf("failed to initialize pipeline %w", err) + } + + return &DirectFLP{fwd: fwd}, nil +} + +// ExportFlows accepts slices of *model.Record by its input channel, converts them +// to *pbflow.Records instances, and submits them to the collector. +func (d *DirectFLP) ExportFlows(input <-chan []*model.Record) { + for inputRecords := range input { + for _, rec := range inputRecords { + d.fwd <- decode.RecordToMap(rec) + } + } +} + +// ExportPackets accepts slices of *model.PacketRecord by its input channel, converts them +// to *pbflow.Records instances, and submits them to the collector. +func (d *DirectFLP) ExportPackets(input <-chan []*model.PacketRecord) { + for inputPackets := range input { + for _, packet := range inputPackets { + if len(packet.Stream) != 0 { + d.fwd <- packets.PacketToMap(packet) + } + } + } +} + +func (d *DirectFLP) Close() { + close(d.fwd) +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go new file mode 100644 index 000000000..b78ce1e34 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_packets.go @@ -0,0 +1,67 @@ +package exporter + +import ( + "context" + "time" + + grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets" + + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/anypb" +) + +type GRPCPacketProto struct { + hostIP string + hostPort int + clientConn *grpc.ClientConnection +} + +var gplog = logrus.WithField("component", "packet/GRPCPackets") + +// WritePacket writes the given packet data out to gRPC. +func writeGRPCPacket(time time.Time, data []byte, conn *grpc.ClientConnection) error { + bytes, err := packets.GetPacketBytesWithHeader(time, data) + if err != nil { + return err + } + _, err = conn.Client().Send(context.TODO(), &pbpacket.Packet{ + Pcap: &anypb.Any{ + Value: bytes, + }, + }) + return err +} + +func StartGRPCPacketSend(hostIP string, hostPort int) (*GRPCPacketProto, error) { + clientConn, err := grpc.ConnectClient(hostIP, hostPort) + if err != nil { + return nil, err + } + return &GRPCPacketProto{ + hostIP: hostIP, + hostPort: hostPort, + clientConn: clientConn, + }, nil +} + +func (p *GRPCPacketProto) ExportGRPCPackets(in <-chan []*model.PacketRecord) { + for packetRecord := range in { + var errs []error + for _, packet := range packetRecord { + if len(packet.Stream) != 0 { + if err := writeGRPCPacket(packet.Time, packet.Stream, p.clientConn); err != nil { + errs = append(errs, err) + } + } + } + if len(errs) != 0 { + gplog.Errorf("%d errors while sending packets:\n%s", len(errs), errs) + } + } + if err := p.clientConn.Close(); err != nil { + gplog.WithError(err).Warn("couldn't close packet export client") + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go new file mode 100644 index 000000000..761f0053c --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/grpc_proto.go @@ -0,0 +1,74 @@ +package exporter + +import ( + "context" + + grpc "github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils" + + ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +var glog = logrus.WithField("component", "exporter/GRPCProto") + +const componentGRPC = "grpc" + +// GRPCProto flow exporter. Its ExportFlows method accepts slices of *model.Record +// by its input channel, converts them to *pbflow.Records instances, and submits +// them to the collector. +type GRPCProto struct { + hostIP string + hostPort int + clientConn *grpc.ClientConnection + // maxFlowsPerMessage limits the maximum number of flows per GRPC message. + // If a message contains more flows than this number, the GRPC message will be split into + // multiple messages. + maxFlowsPerMessage int + metrics *metrics.Metrics + batchCounter prometheus.Counter + sampler *ovnobserv.SampleDecoder +} + +func StartGRPCProto(hostIP string, hostPort int, maxFlowsPerMessage int, m *metrics.Metrics, s *ovnobserv.SampleDecoder) (*GRPCProto, error) { + clientConn, err := grpc.ConnectClient(hostIP, hostPort) + if err != nil { + return nil, err + } + return &GRPCProto{ + hostIP: hostIP, + hostPort: hostPort, + clientConn: clientConn, + maxFlowsPerMessage: maxFlowsPerMessage, + metrics: m, + batchCounter: m.CreateBatchCounter(componentGRPC), + sampler: s, + }, nil +} + +// ExportFlows accepts slices of *model.Record by its input channel, converts them +// to *pbflow.Records instances, and submits them to the collector. +func (g *GRPCProto) ExportFlows(input <-chan []*model.Record) { + socket := utils.GetSocket(g.hostIP, g.hostPort) + log := glog.WithField("collector", socket) + for inputRecords := range input { + g.metrics.EvictionCounter.WithSource(componentGRPC).Inc() + for _, pbRecords := range pbflow.FlowsToPB(inputRecords, g.maxFlowsPerMessage, g.sampler) { + log.Debugf("sending %d records", len(pbRecords.Entries)) + if _, err := g.clientConn.Client().Send(context.TODO(), pbRecords); err != nil { + g.metrics.Errors.WithErrorName(componentGRPC, "CannotWriteMessage").Inc() + log.WithError(err).Error("couldn't send flow records to collector") + } + g.batchCounter.Inc() + g.metrics.EvictedFlowsCounter.WithSource(componentGRPC).Add(float64(len(pbRecords.Entries))) + } + } + if err := g.clientConn.Close(); err != nil { + log.WithError(err).Warn("couldn't close flow export client") + g.metrics.Errors.WithErrorName(componentGRPC, "CannotCloseClient").Inc() + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go new file mode 100644 index 000000000..379401ccf --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/ipfix.go @@ -0,0 +1,383 @@ +package exporter + +import ( + "net" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils" + "github.com/sirupsen/logrus" + "github.com/vmware/go-ipfix/pkg/entities" + ipfixExporter "github.com/vmware/go-ipfix/pkg/exporter" + "github.com/vmware/go-ipfix/pkg/registry" +) + +var ilog = logrus.WithField("component", "exporter/IPFIXProto") + +// TODO: encode also the equivalent of the Protobuf's AgentIP field in a format that is binary- +// compatible with OVN-K. + +type IPFIX struct { + hostIP string + hostPort int + exporter *ipfixExporter.ExportingProcess + templateIDv4 uint16 + templateIDv6 uint16 + entitiesV4 []entities.InfoElementWithValue + entitiesV6 []entities.InfoElementWithValue +} + +func addElementToTemplate(log *logrus.Entry, elementName string, value []byte, elements *[]entities.InfoElementWithValue) error { + element, err := registry.GetInfoElement(elementName, registry.IANAEnterpriseID) + if err != nil { + log.WithError(err).Errorf("Did not find the element with name %s", elementName) + return err + } + ie, err := entities.DecodeAndCreateInfoElementWithValue(element, value) + if err != nil { + log.WithError(err).Errorf("Failed to decode element %s", elementName) + return err + } + *elements = append(*elements, ie) + return nil +} + +func AddRecordValuesToTemplate(log *logrus.Entry, elements *[]entities.InfoElementWithValue) error { + err := addElementToTemplate(log, "octetDeltaCount", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "tcpControlBits", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "flowStartSeconds", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "flowStartMilliseconds", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "flowEndSeconds", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "flowEndMilliseconds", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "packetDeltaCount", nil, elements) + if err != nil { + return err + } + err = addElementToTemplate(log, "interfaceName", nil, elements) + if err != nil { + return err + } + return nil +} + +func SendTemplateRecordv4(log *logrus.Entry, exporter *ipfixExporter.ExportingProcess) (uint16, []entities.InfoElementWithValue, error) { + templateID := exporter.NewTemplateID() + templateSet := entities.NewSet(false) + err := templateSet.PrepareSet(entities.Template, templateID) + if err != nil { + return 0, nil, err + } + elements := make([]entities.InfoElementWithValue, 0) + + err = addElementToTemplate(log, "ethernetType", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "flowDirection", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceMacAddress", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationMacAddress", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceIPv4Address", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationIPv4Address", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "protocolIdentifier", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceTransportPort", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationTransportPort", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "icmpTypeIPv4", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "icmpCodeIPv4", nil, &elements) + if err != nil { + return 0, nil, err + } + err = AddRecordValuesToTemplate(log, &elements) + if err != nil { + return 0, nil, err + } + err = templateSet.AddRecord(elements, templateID) + if err != nil { + return 0, nil, err + } + _, err = exporter.SendSet(templateSet) + if err != nil { + return 0, nil, err + } + + return templateID, elements, nil +} + +func SendTemplateRecordv6(log *logrus.Entry, exporter *ipfixExporter.ExportingProcess) (uint16, []entities.InfoElementWithValue, error) { + templateID := exporter.NewTemplateID() + templateSet := entities.NewSet(false) + err := templateSet.PrepareSet(entities.Template, templateID) + if err != nil { + return 0, nil, err + } + elements := make([]entities.InfoElementWithValue, 0) + + err = addElementToTemplate(log, "ethernetType", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "flowDirection", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceMacAddress", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationMacAddress", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceIPv6Address", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationIPv6Address", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "nextHeaderIPv6", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "sourceTransportPort", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "destinationTransportPort", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "icmpTypeIPv6", nil, &elements) + if err != nil { + return 0, nil, err + } + err = addElementToTemplate(log, "icmpCodeIPv6", nil, &elements) + if err != nil { + return 0, nil, err + } + err = AddRecordValuesToTemplate(log, &elements) + if err != nil { + return 0, nil, err + } + + err = templateSet.AddRecord(elements, templateID) + if err != nil { + return 0, nil, err + } + _, err = exporter.SendSet(templateSet) + if err != nil { + return 0, nil, err + } + + return templateID, elements, nil +} + +// Sends out Template record to the IPFIX collector +func StartIPFIXExporter(hostIP string, hostPort int, transportProto string) (*IPFIX, error) { + socket := utils.GetSocket(hostIP, hostPort) + log := ilog.WithField("collector", socket) + + registry.LoadRegistry() + // Create exporter using local server info + input := ipfixExporter.ExporterInput{ + CollectorAddress: socket, + CollectorProtocol: transportProto, + ObservationDomainID: 1, + TempRefTimeout: 1, + } + exporter, err := ipfixExporter.InitExportingProcess(input) + if err != nil { + log.Fatalf("Got error when connecting to local server %s: %v", socket, err) + return nil, err + } + log.Infof("Created exporter connecting to local server with address: %s", socket) + + templateIDv4, entitiesV4, err := SendTemplateRecordv4(log, exporter) + if err != nil { + log.WithError(err).Error("Failed in send IPFIX template v4 record") + return nil, err + } + + templateIDv6, entitiesV6, err := SendTemplateRecordv6(log, exporter) + if err != nil { + log.WithError(err).Error("Failed in send IPFIX template v6 record") + return nil, err + } + log.Infof("entities v4 %+v", entitiesV4) + log.Infof("entities v6 %+v", entitiesV6) + + return &IPFIX{ + hostIP: hostIP, + hostPort: hostPort, + exporter: exporter, + templateIDv4: templateIDv4, + templateIDv6: templateIDv6, + entitiesV4: entitiesV4, + entitiesV6: entitiesV6, + }, nil +} + +func setIPv4Address(ieValPtr *entities.InfoElementWithValue, ipAddress net.IP) { + ieVal := *ieValPtr + if ipAddress == nil { + ieVal.SetIPAddressValue(net.ParseIP("0.0.0.0")) + } else { + ieVal.SetIPAddressValue(ipAddress) + } +} +func setIERecordValue(record *model.Record, ieValPtr *entities.InfoElementWithValue) { + ieVal := *ieValPtr + switch ieVal.GetName() { + case "octetDeltaCount": + ieVal.SetUnsigned64Value(record.Metrics.Bytes) + case "tcpControlBits": + ieVal.SetUnsigned16Value(record.Metrics.Flags) + case "flowStartSeconds": + ieVal.SetUnsigned32Value(uint32(record.TimeFlowStart.Unix())) + case "flowStartMilliseconds": + ieVal.SetUnsigned64Value(uint64(record.TimeFlowStart.UnixMilli())) + case "flowEndSeconds": + ieVal.SetUnsigned32Value(uint32(record.TimeFlowEnd.Unix())) + case "flowEndMilliseconds": + ieVal.SetUnsigned64Value(uint64(record.TimeFlowEnd.UnixMilli())) + case "packetDeltaCount": + ieVal.SetUnsigned64Value(uint64(record.Metrics.Packets)) + case "interfaceName": + ieVal.SetStringValue(record.Interface) + } +} +func setIEValue(record *model.Record, ieValPtr *entities.InfoElementWithValue) { + ieVal := *ieValPtr + switch ieVal.GetName() { + case "ethernetType": + ieVal.SetUnsigned16Value(record.Metrics.EthProtocol) + case "flowDirection": + ieVal.SetUnsigned8Value(record.ID.Direction) + case "sourceMacAddress": + ieVal.SetMacAddressValue(record.Metrics.SrcMac[:]) + case "destinationMacAddress": + ieVal.SetMacAddressValue(record.Metrics.DstMac[:]) + case "sourceIPv4Address": + setIPv4Address(ieValPtr, model.IP(record.ID.SrcIp).To4()) + case "destinationIPv4Address": + setIPv4Address(ieValPtr, model.IP(record.ID.DstIp).To4()) + case "sourceIPv6Address": + ieVal.SetIPAddressValue(record.ID.SrcIp[:]) + case "destinationIPv6Address": + ieVal.SetIPAddressValue(record.ID.DstIp[:]) + case "protocolIdentifier", "nextHeaderIPv6": + ieVal.SetUnsigned8Value(record.ID.TransportProtocol) + case "sourceTransportPort": + ieVal.SetUnsigned16Value(record.ID.SrcPort) + case "destinationTransportPort": + ieVal.SetUnsigned16Value(record.ID.DstPort) + case "icmpTypeIPv4", "icmpTypeIPv6": + ieVal.SetUnsigned8Value(record.ID.IcmpType) + case "icmpCodeIPv4", "icmpCodeIPv6": + ieVal.SetUnsigned8Value(record.ID.IcmpCode) + } +} +func setEntities(record *model.Record, elements *[]entities.InfoElementWithValue) { + for _, ieVal := range *elements { + setIEValue(record, &ieVal) + setIERecordValue(record, &ieVal) + } +} +func (ipf *IPFIX) sendDataRecord(_ *logrus.Entry, record *model.Record, v6 bool) error { + dataSet := entities.NewSet(false) + var templateID uint16 + if v6 { + templateID = ipf.templateIDv6 + setEntities(record, &ipf.entitiesV6) + } else { + templateID = ipf.templateIDv4 + setEntities(record, &ipf.entitiesV4) + } + err := dataSet.PrepareSet(entities.Data, templateID) + if err != nil { + return err + } + if v6 { + err = dataSet.AddRecord(ipf.entitiesV6, templateID) + if err != nil { + return err + } + } else { + err = dataSet.AddRecord(ipf.entitiesV4, templateID) + if err != nil { + return err + } + } + _, err = ipf.exporter.SendSet(dataSet) + if err != nil { + return err + } + return nil +} + +// ExportFlows accepts slices of *model.Record by its input channel, converts them +// to IPFIX Records, and submits them to the collector. +func (ipf *IPFIX) ExportFlows(input <-chan []*model.Record) { + socket := utils.GetSocket(ipf.hostIP, ipf.hostPort) + log := ilog.WithField("collector", socket) + for inputRecords := range input { + for _, record := range inputRecords { + if record.Metrics.EthProtocol == model.IPv6Type { + err := ipf.sendDataRecord(log, record, true) + if err != nil { + log.WithError(err).Error("Failed in send IPFIX data record") + } + } else { + err := ipf.sendDataRecord(log, record, false) + if err != nil { + log.WithError(err).Error("Failed in send IPFIX data record") + } + } + } + } + ipf.exporter.CloseConnToCollector() +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go new file mode 100644 index 000000000..50b5ee3f1 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/exporter/kafka_proto.go @@ -0,0 +1,78 @@ +package exporter + +import ( + "context" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" + + ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" + kafkago "github.com/segmentio/kafka-go" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/proto" +) + +var klog = logrus.WithField("component", "exporter/KafkaProto") + +const componentKafka = "kafka" + +type kafkaWriter interface { + WriteMessages(ctx context.Context, msgs ...kafkago.Message) error +} + +// KafkaProto exports flows over Kafka, encoded as a protobuf that is understandable by the +// Flowlogs-Pipeline collector +type KafkaProto struct { + Writer kafkaWriter + Metrics *metrics.Metrics + SampleDecoder *ovnobserv.SampleDecoder +} + +func (kp *KafkaProto) ExportFlows(input <-chan []*model.Record) { + klog.Info("starting Kafka exporter") + for records := range input { + kp.batchAndSubmit(records) + } +} + +func getFlowKey(record *model.Record) []byte { + // We are sorting IP address so flows from on ip to a second IP get the same key whatever the direction is + for k := range record.ID.SrcIp { + if record.ID.SrcIp[k] < record.ID.DstIp[k] { + return append(record.ID.SrcIp[:], record.ID.DstIp[:]...) + } else if record.ID.SrcIp[k] > record.ID.DstIp[k] { + return append(record.ID.DstIp[:], record.ID.SrcIp[:]...) + } + } + return append(record.ID.SrcIp[:], record.ID.DstIp[:]...) +} + +func (kp *KafkaProto) batchAndSubmit(records []*model.Record) { + klog.Debugf("sending %d records", len(records)) + msgs := make([]kafkago.Message, 0, len(records)) + for _, record := range records { + pbBytes, err := proto.Marshal(pbflow.FlowToPB(record, kp.SampleDecoder)) + if err != nil { + klog.WithError(err).Debug("can't encode protobuf message. Ignoring") + kp.Metrics.Errors.WithErrorName(componentKafka, "CannotEncodeMessage").Inc() + continue + } + msgs = append(msgs, kafkago.Message{Value: pbBytes, Key: getFlowKey(record)}) + } + + if err := kp.Writer.WriteMessages(context.TODO(), msgs...); err != nil { + klog.WithError(err).Error("can't write messages into Kafka") + kp.Metrics.Errors.WithErrorName(componentKafka, "CannotWriteMessage").Inc() + } + kp.Metrics.EvictionCounter.WithSource(componentKafka).Inc() + kp.Metrics.EvictedFlowsCounter.WithSource(componentKafka).Add(float64(len(records))) +} + +type JSONRecord struct { + *model.Record + TimeFlowStart int64 + TimeFlowEnd int64 + TimeFlowStartMs int64 + TimeFlowEndMs int64 +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go new file mode 100644 index 000000000..f850f960d --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/account.go @@ -0,0 +1,105 @@ +package flow + +import ( + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + + "github.com/sirupsen/logrus" +) + +// Accounter accumulates flows metrics in memory and eventually evicts them via an evictor channel. +// The accounting process is usually done at kernel-space. This type reimplements it at userspace +// for the edge case where packets are submitted directly via ring-buffer because the kernel-side +// accounting map is full. +type Accounter struct { + maxEntries int + evictTimeout time.Duration + entries map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics + clock func() time.Time + monoClock func() time.Duration + metrics *metrics.Metrics +} + +var alog = logrus.WithField("component", "flow/Accounter") + +// NewAccounter creates a new Accounter. +// The cache has no limit and it's assumed that eviction is done by the caller. +func NewAccounter( + maxEntries int, evictTimeout time.Duration, + clock func() time.Time, + monoClock func() time.Duration, + m *metrics.Metrics, +) *Accounter { + acc := Accounter{ + maxEntries: maxEntries, + evictTimeout: evictTimeout, + entries: map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{}, + clock: clock, + monoClock: monoClock, + metrics: m, + } + return &acc +} + +// Account runs in a new goroutine. It reads all the records from the input channel +// and accumulate their metrics internally. Once the metrics have reached their max size +// or the eviction times out, it evicts all the accumulated flows by the returned channel. +func (c *Accounter) Account(in <-chan *model.RawRecord, out chan<- []*model.Record) { + evictTick := time.NewTicker(c.evictTimeout) + defer evictTick.Stop() + for { + select { + case <-evictTick.C: + if len(c.entries) == 0 { + break + } + evictingEntries := c.entries + c.entries = map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{} + logrus.WithField("flows", len(evictingEntries)). + Debug("evicting flows from userspace accounter on timeout") + c.evict(evictingEntries, out, "timeout") + case record, ok := <-in: + if !ok { + alog.Debug("input channel closed. Evicting entries") + // if the records channel is closed, we evict the entries in the + // same goroutine to wait for all the entries to be sent before + // closing the channel + c.evict(c.entries, out, "closing") + alog.Debug("exiting account routine") + return + } + if stored, ok := c.entries[record.Id]; ok { + model.AccumulateBase(stored, &record.Metrics) + } else { + if len(c.entries) >= c.maxEntries { + evictingEntries := c.entries + c.entries = map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics{} + logrus.WithField("flows", len(evictingEntries)). + Debug("evicting flows from userspace accounter after reaching cache max length") + c.evict(evictingEntries, out, "full") + // Since we will evict flows because we reached to cacheMaxFlows then reset + // evictTimer to avoid unnecessary another eviction when timer expires. + evictTick.Reset(c.evictTimeout) + } + c.entries[record.Id] = &record.Metrics + } + } + c.metrics.BufferSizeGauge.WithBufferName("accounter-entries").Set(float64(len(c.entries))) + } +} + +func (c *Accounter) evict(entries map[ebpf.BpfFlowId]*ebpf.BpfFlowMetrics, evictor chan<- []*model.Record, reason string) { + now := c.clock() + monotonicNow := uint64(c.monoClock()) + records := make([]*model.Record, 0, len(entries)) + for key, metrics := range entries { + records = append(records, model.NewRecord(key, &model.BpfFlowContent{BpfFlowMetrics: metrics}, now, monotonicNow)) + } + c.metrics.EvictionCounter.WithSourceAndReason("accounter", reason).Inc() + c.metrics.EvictedFlowsCounter.WithSourceAndReason("accounter", reason).Add(float64(len(records))) + alog.WithField("numEntries", len(records)).Debug("records evicted from userspace accounter") + evictor <- records +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go new file mode 100644 index 000000000..75d90bdd3 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/decorator.go @@ -0,0 +1,24 @@ +package flow + +import ( + "net" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" +) + +type InterfaceNamer func(ifIndex int) string + +// Decorate adds to the flows extra metadata fields that are not directly fetched by eBPF: +// - The interface name (corresponding to the interface index in the flow). +// - The IP address of the agent host. +func Decorate(agentIP net.IP, ifaceNamer InterfaceNamer) func(in <-chan []*model.Record, out chan<- []*model.Record) { + return func(in <-chan []*model.Record, out chan<- []*model.Record) { + for flows := range in { + for _, flow := range flows { + flow.Interface = ifaceNamer(int(flow.ID.IfIndex)) + flow.AgentIP = agentIP + } + out <- flows + } + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go new file mode 100644 index 000000000..06f7b07f5 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/deduper.go @@ -0,0 +1,177 @@ +package flow + +import ( + "container/list" + "reflect" + "time" + + "github.com/sirupsen/logrus" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" +) + +var dlog = logrus.WithField("component", "flow/Deduper") +var timeNow = time.Now + +// deduperCache implement a LRU cache whose elements are evicted if they haven't been accessed +// during the expire duration. +// It is not safe for concurrent access. +type deduperCache struct { + expire time.Duration + // key: ebpf.BpfFlowId with the interface and MACs erased, to detect duplicates + // value: listElement pointing to a struct entry + ifaces map[ebpf.BpfFlowId]*list.Element + // element: entry structs of the ifaces map ordered by expiry time + entries *list.List +} + +type entry struct { + key *ebpf.BpfFlowId + dnsRecord *ebpf.BpfDnsRecordT + flowRTT *uint64 + networkEvents *[model.MaxNetworkEvents][model.NetworkEventsMaxEventsMD]uint8 + ifIndex uint32 + expiryTime time.Time + dupList *[]map[string]uint8 +} + +// Dedupe receives flows and filters these belonging to duplicate interfaces. It will forward +// the flows from the first interface coming to it, until that flow expires in the cache +// (no activity for it during the expiration time) +// The justMark argument tells that the deduper should not drop the duplicate flows but +// set their Duplicate field. +func Dedupe(expireTime time.Duration, justMark, mergeDup bool, ifaceNamer InterfaceNamer, m *metrics.Metrics) func(in <-chan []*model.Record, out chan<- []*model.Record) { + cache := &deduperCache{ + expire: expireTime, + entries: list.New(), + ifaces: map[ebpf.BpfFlowId]*list.Element{}, + } + return func(in <-chan []*model.Record, out chan<- []*model.Record) { + for records := range in { + cache.removeExpired() + fwd := make([]*model.Record, 0, len(records)) + for _, record := range records { + cache.checkDupe(record, justMark, mergeDup, &fwd, ifaceNamer) + } + if len(fwd) > 0 { + out <- fwd + m.EvictionCounter.WithSource("deduper").Inc() + m.EvictedFlowsCounter.WithSource("deduper").Add(float64(len(fwd))) + } + m.BufferSizeGauge.WithBufferName("deduper-list").Set(float64(cache.entries.Len())) + m.BufferSizeGauge.WithBufferName("deduper-map").Set(float64(len(cache.ifaces))) + } + } +} + +// checkDupe check current record if its already available nad if not added to fwd records list +func (c *deduperCache) checkDupe(r *model.Record, justMark, mergeDup bool, fwd *[]*model.Record, ifaceNamer InterfaceNamer) { + mergeEntry := make(map[string]uint8) + rk := r.ID + // zeroes fields from key that should be ignored from the flow comparison + rk.IfIndex = 0 + rk.Direction = 0 + if r.Metrics.AdditionalMetrics == nil { + r.Metrics.AdditionalMetrics = &ebpf.BpfAdditionalMetrics{} + } + // If a flow has been accounted previously, whatever its interface was, + // it updates the expiry time for that flow + if ele, ok := c.ifaces[rk]; ok { + fEntry := ele.Value.(*entry) + fEntry.expiryTime = timeNow().Add(c.expire) + c.entries.MoveToFront(ele) + // The input flow is duplicate if its interface is different to the interface + // of the non-duplicate flow that was first registered in the cache + // except if the new flow has DNS enrichment in this case will enrich the flow in the cache + // with DNS info and mark the current flow as duplicate + if r.Metrics.AdditionalMetrics.DnsRecord.Latency != 0 && fEntry.dnsRecord.Latency == 0 { + // copy DNS record to the cached entry and mark it as duplicate + fEntry.dnsRecord.Flags = r.Metrics.AdditionalMetrics.DnsRecord.Flags + fEntry.dnsRecord.Id = r.Metrics.AdditionalMetrics.DnsRecord.Id + fEntry.dnsRecord.Latency = r.Metrics.AdditionalMetrics.DnsRecord.Latency + fEntry.dnsRecord.Errno = r.Metrics.AdditionalMetrics.DnsRecord.Errno + } + // If the new flow has flowRTT then enrich the flow in the case with the same RTT and mark it duplicate + if r.Metrics.AdditionalMetrics.FlowRtt != 0 && *fEntry.flowRTT == 0 { + *fEntry.flowRTT = r.Metrics.AdditionalMetrics.FlowRtt + } + // If the new flows have network events, then enrich the flow in the cache and mark the flow as duplicate + for i, md := range r.Metrics.AdditionalMetrics.NetworkEvents { + if !model.AllZerosMetaData(md) && model.AllZerosMetaData(fEntry.networkEvents[i]) { + copy(fEntry.networkEvents[i][:], md[:]) + } + } + if fEntry.ifIndex != r.ID.IfIndex { + if justMark { + r.Duplicate = true + *fwd = append(*fwd, r) + } + if mergeDup { + ifName := ifaceNamer(int(r.ID.IfIndex)) + mergeEntry[ifName] = r.ID.Direction + if dupEntryNew(*fEntry.dupList, mergeEntry) { + *fEntry.dupList = append(*fEntry.dupList, mergeEntry) + dlog.Debugf("merge list entries dump:") + for _, entry := range *fEntry.dupList { + for k, v := range entry { + dlog.Debugf("interface %s dir %d", k, v) + } + } + } + } + return + } + *fwd = append(*fwd, r) + return + } + // The flow has not been accounted previously (or was forgotten after expiration) + // so we register it for that concrete interface + e := entry{ + key: &rk, + dnsRecord: &r.Metrics.AdditionalMetrics.DnsRecord, + flowRTT: &r.Metrics.AdditionalMetrics.FlowRtt, + networkEvents: &r.Metrics.AdditionalMetrics.NetworkEvents, + ifIndex: r.ID.IfIndex, + expiryTime: timeNow().Add(c.expire), + } + if mergeDup { + ifName := ifaceNamer(int(r.ID.IfIndex)) + mergeEntry[ifName] = r.ID.Direction + r.DupList = append(r.DupList, mergeEntry) + e.dupList = &r.DupList + } + c.ifaces[rk] = c.entries.PushFront(&e) + *fwd = append(*fwd, r) +} + +func dupEntryNew(dupList []map[string]uint8, mergeEntry map[string]uint8) bool { + for _, entry := range dupList { + if reflect.DeepEqual(entry, mergeEntry) { + return false + } + } + return true +} + +func (c *deduperCache) removeExpired() { + now := timeNow() + ele := c.entries.Back() + evicted := 0 + for ele != nil && now.After(ele.Value.(*entry).expiryTime) { + evicted++ + c.entries.Remove(ele) + fEntry := ele.Value.(*entry) + fEntry.dupList = nil + delete(c.ifaces, *fEntry.key) + ele = c.entries.Back() + } + if evicted > 0 { + dlog.WithFields(logrus.Fields{ + "current": c.entries.Len(), + "evicted": evicted, + "expiryTime": c.expire, + }).Debug("entries evicted from the deduper cache") + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go new file mode 100644 index 000000000..282ad5761 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/limiter.go @@ -0,0 +1,67 @@ +package flow + +import ( + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/sirupsen/logrus" +) + +const initialLogPeriod = time.Minute +const maxLogPeriod = time.Hour + +var cllog = logrus.WithField("component", "capacity.Limiter") + +// CapacityLimiter forwards the flows between two nodes but checks the status of the destination +// node's buffered channel. If it is already full, it drops the incoming flow and periodically will +// log a message about the number of lost flows. +type CapacityLimiter struct { + droppedFlows int + metrics *metrics.Metrics +} + +func NewCapacityLimiter(m *metrics.Metrics) *CapacityLimiter { + return &CapacityLimiter{metrics: m} +} + +func (c *CapacityLimiter) Limit(in <-chan []*model.Record, out chan<- []*model.Record) { + go c.logDroppedFlows() + for i := range in { + if len(out) < cap(out) || cap(out) == 0 { + out <- i + } else { + c.metrics.DroppedFlowsCounter.WithSourceAndReason("limiter", "full").Add(float64(len(i))) + c.droppedFlows += len(i) + } + } +} + +func (c *CapacityLimiter) logDroppedFlows() { + logPeriod := initialLogPeriod + debugging := logrus.IsLevelEnabled(logrus.DebugLevel) + for { + time.Sleep(logPeriod) + + // a race condition might happen in this counter but it's not important as it's just for + // logging purposes + df := c.droppedFlows + if df > 0 { + c.droppedFlows = 0 + cllog.Warnf("%d flows were dropped during the last %s because the agent is forwarding "+ + "more flows than the remote ingestor is able to process. You might "+ + "want to increase the CACHE_MAX_FLOWS and CACHE_ACTIVE_TIMEOUT property", + df, logPeriod) + + // if not debug logs, backoff to avoid flooding the log with warning messages + if !debugging && logPeriod < maxLogPeriod { + logPeriod *= 2 + if logPeriod > maxLogPeriod { + logPeriod = maxLogPeriod + } + } + } else { + logPeriod = initialLogPeriod + } + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go new file mode 100644 index 000000000..a795abbaf --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/perfbuffer.go @@ -0,0 +1,70 @@ +package flow + +import ( + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/sirupsen/logrus" +) + +var plog = logrus.WithField("component", "packet/PerfBuffer") + +type PerfBuffer struct { + maxEntries int + evictTimeout time.Duration + entries [](*model.PacketRecord) +} + +func NewPerfBuffer( + maxEntries int, evictTimeout time.Duration, +) *PerfBuffer { + return &PerfBuffer{ + maxEntries: maxEntries, + evictTimeout: evictTimeout, + entries: []*model.PacketRecord{}, + } +} + +func (c *PerfBuffer) PBuffer(in <-chan *model.PacketRecord, out chan<- []*model.PacketRecord) { + evictTick := time.NewTicker(c.evictTimeout) + defer evictTick.Stop() + ind := 0 + for { + select { + case <-evictTick.C: + if len(c.entries) == 0 { + break + } + evictingEntries := c.entries + c.entries = []*model.PacketRecord{} + logrus.WithField("packets", len(evictingEntries)). + Debug("evicting packets from userspace on timeout") + c.evict(evictingEntries, out) + case packet, ok := <-in: + if !ok { + plog.Debug("input channel closed. Evicting entries") + c.evict(c.entries, out) + plog.Debug("exiting perfbuffer routine") + return + } + if len(c.entries) >= c.maxEntries { + evictingEntries := c.entries + c.entries = []*model.PacketRecord{} + logrus.WithField("packets", len(evictingEntries)). + Debug("evicting packets from userspace accounter after reaching cache max length") + c.evict(evictingEntries, out) + } + c.entries = append(c.entries, model.NewPacketRecord(packet.Stream, (uint32)(len(packet.Stream)), packet.Time)) + ind++ + } + } +} + +func (c *PerfBuffer) evict(entries [](*model.PacketRecord), evictor chan<- []*model.PacketRecord) { + packets := make([]*model.PacketRecord, 0, len(entries)) + for _, payload := range entries { + packets = append(packets, model.NewPacketRecord(payload.Stream, (uint32)(len(payload.Stream)), payload.Time)) + } + alog.WithField("numEntries", len(packets)).Debug("packets evicted from userspace accounter") + evictor <- packets +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go new file mode 100644 index 000000000..fe9d711ec --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_map.go @@ -0,0 +1,127 @@ +package flow + +import ( + "context" + "runtime" + "sync" + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + + "github.com/gavv/monotime" + "github.com/netobserv/gopipes/pkg/node" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +var mtlog = logrus.WithField("component", "flow.MapTracer") + +// MapTracer accesses a mapped source of flows (the eBPF PerCPU HashMap), deserializes it into +// a flow model.Record structure, and performs the accumulation of each perCPU-record into a single flow +type MapTracer struct { + mapFetcher mapFetcher + evictionTimeout time.Duration + staleEntriesEvictTimeout time.Duration + // manages the access to the eviction routines, avoiding two evictions happening at the same time + evictionCond *sync.Cond + metrics *metrics.Metrics + timeSpentinLookupAndDelete prometheus.Histogram +} + +type mapFetcher interface { + LookupAndDeleteMap(*metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent + DeleteMapsStaleEntries(timeOut time.Duration) +} + +func NewMapTracer(fetcher mapFetcher, evictionTimeout, staleEntriesEvictTimeout time.Duration, m *metrics.Metrics) *MapTracer { + return &MapTracer{ + mapFetcher: fetcher, + evictionTimeout: evictionTimeout, + evictionCond: sync.NewCond(&sync.Mutex{}), + staleEntriesEvictTimeout: staleEntriesEvictTimeout, + metrics: m, + timeSpentinLookupAndDelete: m.CreateTimeSpendInLookupAndDelete(), + } +} + +// Flush forces reading (and removing) all the flows from the source eBPF map +// and sending the entries to the next stage in the pipeline +func (m *MapTracer) Flush() { + m.evictionCond.Broadcast() +} + +func (m *MapTracer) TraceLoop(ctx context.Context, forceGC bool) node.StartFunc[[]*model.Record] { + return func(out chan<- []*model.Record) { + evictionTicker := time.NewTicker(m.evictionTimeout) + go m.evictionSynchronization(ctx, forceGC, out) + for { + select { + case <-ctx.Done(): + evictionTicker.Stop() + mtlog.Debug("exiting trace loop due to context cancellation") + return + case <-evictionTicker.C: + mtlog.Debug("triggering flow eviction on timer") + m.Flush() + } + } + } +} + +// evictionSynchronization loop just waits for the evictionCond to happen +// and triggers the actual eviction. It makes sure that only one eviction +// is being triggered at the same time +func (m *MapTracer) evictionSynchronization(ctx context.Context, forceGC bool, out chan<- []*model.Record) { + // flow eviction loop. It just keeps waiting for eviction until someone triggers the + // evictionCond.Broadcast signal + for { + // make sure we only evict once at a time, even if there are multiple eviction signals + m.evictionCond.L.Lock() + m.evictionCond.Wait() + select { + case <-ctx.Done(): + mtlog.Debug("context canceled. Stopping goroutine before evicting flows") + return + default: + mtlog.Debug("evictionSynchronization signal received") + m.evictFlows(ctx, forceGC, out) + } + m.evictionCond.L.Unlock() + + } +} + +func (m *MapTracer) evictFlows(ctx context.Context, forceGC bool, forwardFlows chan<- []*model.Record) { + // it's important that this monotonic timer reports same or approximate values as kernel-side bpf_ktime_get_ns() + monotonicTimeNow := monotime.Now() + currentTime := time.Now() + + var forwardingFlows []*model.Record + flows := m.mapFetcher.LookupAndDeleteMap(m.metrics) + elapsed := time.Since(currentTime) + for flowKey, flowMetrics := range flows { + forwardingFlows = append(forwardingFlows, model.NewRecord( + flowKey, + &flowMetrics, + currentTime, + uint64(monotonicTimeNow), + )) + } + m.mapFetcher.DeleteMapsStaleEntries(m.staleEntriesEvictTimeout) + select { + case <-ctx.Done(): + mtlog.Debug("skipping flow eviction as agent is being stopped") + default: + forwardFlows <- forwardingFlows + } + + if forceGC { + runtime.GC() + } + m.metrics.EvictionCounter.WithSource("hashmap").Inc() + m.metrics.EvictedFlowsCounter.WithSource("hashmap").Add(float64(len(forwardingFlows))) + m.timeSpentinLookupAndDelete.Observe(elapsed.Seconds()) + mtlog.Debugf("%d flows evicted", len(forwardingFlows)) +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go new file mode 100644 index 000000000..237cbe946 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_perf.go @@ -0,0 +1,73 @@ +package flow + +import ( + "bytes" + "context" + "errors" + "fmt" + "time" + + "github.com/cilium/ebpf/perf" + "github.com/netobserv/gopipes/pkg/node" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/sirupsen/logrus" +) + +var pblog = logrus.WithField("component", "flow.PerfTracer") + +// RingBufTracer receives single-packet flows via ringbuffer (usually, these that couldn't be +// added in the eBPF kernel space due to the map being full or busy) and submits them to the +// userspace Aggregator map +type PerfTracer struct { + perfArray perfReader + stats stats +} + +type perfReader interface { + ReadPerf() (perf.Record, error) +} + +func NewPerfTracer( + reader perfReader, logTimeout time.Duration, +) *PerfTracer { + return &PerfTracer{ + perfArray: reader, + stats: stats{loggingTimeout: logTimeout}, + } +} + +func (m *PerfTracer) TraceLoop(ctx context.Context) node.StartFunc[*model.PacketRecord] { + return func(out chan<- *model.PacketRecord) { + for { + select { + case <-ctx.Done(): + pblog.Debug("exiting trace loop due to context cancellation") + return + default: + if err := m.listenAndForwardPerf(out); err != nil { + + if errors.Is(err, perf.ErrClosed) { + pblog.Debug("Received signal, exiting..") + return + } + pblog.WithError(err).Warn("ignoring packet event") + continue + } + } + } + } +} + +func (m *PerfTracer) listenAndForwardPerf(forwardCh chan<- *model.PacketRecord) error { + event, err := m.perfArray.ReadPerf() + if err != nil { + return fmt.Errorf("reading from perf event array: %w", err) + } + // Parses the perf event entry into an Event structure. + readFlow, err := model.ReadRawPacket(bytes.NewBuffer(event.RawSample)) + if err != nil { + return fmt.Errorf("parsing data received from the perf event array: %w", err) + } + forwardCh <- readFlow + return nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go new file mode 100644 index 000000000..a92f7d38d --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/flow/tracer_ringbuf.go @@ -0,0 +1,128 @@ +package flow + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync/atomic" + "syscall" + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + + "github.com/cilium/ebpf/ringbuf" + "github.com/netobserv/gopipes/pkg/node" + "github.com/sirupsen/logrus" +) + +var rtlog = logrus.WithField("component", "flow.RingBufTracer") + +// RingBufTracer receives single-packet flows via ringbuffer (usually, these that couldn't be +// added in the eBPF kernel space due to the map being full or busy) and submits them to the +// userspace Aggregator map +type RingBufTracer struct { + mapFlusher mapFlusher + ringBuffer ringBufReader + stats stats + metrics *metrics.Metrics +} + +type ringBufReader interface { + ReadRingBuf() (ringbuf.Record, error) +} + +// stats supports atomic logging of ringBuffer metrics +type stats struct { + loggingTimeout time.Duration + isForwarding int32 + forwardedFlows int32 + mapFullErrs int32 +} + +type mapFlusher interface { + Flush() +} + +func NewRingBufTracer(reader ringBufReader, flusher mapFlusher, logTimeout time.Duration, m *metrics.Metrics) *RingBufTracer { + return &RingBufTracer{ + mapFlusher: flusher, + ringBuffer: reader, + stats: stats{loggingTimeout: logTimeout}, + metrics: m, + } +} + +func (m *RingBufTracer) TraceLoop(ctx context.Context) node.StartFunc[*model.RawRecord] { + return func(out chan<- *model.RawRecord) { + debugging := logrus.IsLevelEnabled(logrus.DebugLevel) + for { + select { + case <-ctx.Done(): + rtlog.Debug("exiting trace loop due to context cancellation") + return + default: + if err := m.listenAndForwardRingBuffer(debugging, out); err != nil { + if errors.Is(err, ringbuf.ErrClosed) { + rtlog.Debug("Received signal, exiting..") + return + } + rtlog.WithError(err).Warn("ignoring flow event") + continue + } + } + } + } +} + +func (m *RingBufTracer) listenAndForwardRingBuffer(debugging bool, forwardCh chan<- *model.RawRecord) error { + event, err := m.ringBuffer.ReadRingBuf() + if err != nil { + m.metrics.Errors.WithErrorName("ringbuffer", "CannotReadRingbuffer").Inc() + return fmt.Errorf("reading from ring buffer: %w", err) + } + // Parses the ringbuf event entry into an Event structure. + readFlow, err := model.ReadFrom(bytes.NewBuffer(event.RawSample)) + if err != nil { + m.metrics.Errors.WithErrorName("ringbuffer", "CannotParseRingbuffer").Inc() + return fmt.Errorf("parsing data received from the ring buffer: %w", err) + } + mapFullError := readFlow.Metrics.Errno == uint8(syscall.E2BIG) + if debugging { + m.stats.logRingBufferFlows(mapFullError) + } + errno := syscall.Errno(readFlow.Metrics.Errno) + // In ringbuffer, a "flow" is a 1-packet flow, it hasn't gone through aggregation yet. So we use the packet counter metric. + m.metrics.EvictedPacketsCounter.WithSourceAndReason("ringbuffer", errno.Error()).Inc() + // Will need to send it to accounter anyway to account regardless of complete/ongoing flow + forwardCh <- readFlow + return nil +} + +// logRingBufferFlows avoids flooding logs on long series of evicted flows by grouping how +// many flows are forwarded +func (m *stats) logRingBufferFlows(mapFullErr bool) { + atomic.AddInt32(&m.forwardedFlows, 1) + if mapFullErr { + atomic.AddInt32(&m.mapFullErrs, 1) + } + if atomic.CompareAndSwapInt32(&m.isForwarding, 0, 1) { + go func() { + time.Sleep(m.loggingTimeout) + mfe := atomic.LoadInt32(&m.mapFullErrs) + l := rtlog.WithFields(logrus.Fields{ + "flows": atomic.LoadInt32(&m.forwardedFlows), + "mapFullErrs": mfe, + }) + if mfe == 0 { + l.Debug("received flows via ringbuffer") + } else { + l.Debug("received flows via ringbuffer. You might want to increase the CACHE_MAX_FLOWS value") + } + atomic.StoreInt32(&m.forwardedFlows, 0) + atomic.StoreInt32(&m.isForwarding, 0) + atomic.StoreInt32(&m.mapFullErrs, 0) + }() + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go new file mode 100644 index 000000000..350bc619b --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/client.go @@ -0,0 +1,37 @@ +// Package flowgrpc provides the basic interfaces to build a gRPC+Protobuf flows client & server +package flowgrpc + +import ( + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// ClientConnection wraps a gRPC+protobuf connection +type ClientConnection struct { + client pbflow.CollectorClient + conn *grpc.ClientConn +} + +func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) { + // TODO: allow configuring some options (keepalive, backoff...) + socket := utils.GetSocket(hostIP, hostPort) + conn, err := grpc.NewClient(socket, + grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + return &ClientConnection{ + client: pbflow.NewCollectorClient(conn), + conn: conn, + }, nil +} + +func (cp *ClientConnection) Client() pbflow.CollectorClient { + return cp.client +} + +func (cp *ClientConnection) Close() error { + return cp.conn.Close() +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go new file mode 100644 index 000000000..ca5ee8dae --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow/server.go @@ -0,0 +1,77 @@ +package flowgrpc + +import ( + "context" + "fmt" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow" +) + +// CollectorServer wraps a Flow Collector connection & session +type CollectorServer struct { + grpcServer *grpc.Server +} + +type collectorOptions struct { + grpcServerOptions []grpc.ServerOption +} + +// CollectorOption allows overriding the default configuration of the CollectorServer instance. +// Use them in the StartCollector function. +type CollectorOption func(options *collectorOptions) + +func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption { + return func(copt *collectorOptions) { + copt.grpcServerOptions = options + } +} + +// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each +// set of *pbflow.Records by the provided channel. +func StartCollector( + port int, recordForwarder chan<- *pbflow.Records, options ...CollectorOption, +) (*CollectorServer, error) { + copts := collectorOptions{} + for _, opt := range options { + opt(&copts) + } + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + return nil, err + } + grpcServer := grpc.NewServer(copts.grpcServerOptions...) + pbflow.RegisterCollectorServer(grpcServer, &collectorAPI{ + recordForwarder: recordForwarder, + }) + reflection.Register(grpcServer) + go func() { + if err := grpcServer.Serve(lis); err != nil { + panic("error connecting to server: " + err.Error()) + } + }() + return &CollectorServer{ + grpcServer: grpcServer, + }, nil +} + +func (c *CollectorServer) Close() error { + c.grpcServer.Stop() + return nil +} + +type collectorAPI struct { + pbflow.UnimplementedCollectorServer + recordForwarder chan<- *pbflow.Records +} + +var okReply = &pbflow.CollectorReply{} + +func (c *collectorAPI) Send(_ context.Context, records *pbflow.Records) (*pbflow.CollectorReply, error) { + c.recordForwarder <- records + return okReply, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go new file mode 100644 index 000000000..52d2fef0a --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/client.go @@ -0,0 +1,37 @@ +// Package pktgrpc provides the basic interfaces to build a gRPC+Protobuf packet client & server +package pktgrpc + +import ( + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket" + "github.com/netobserv/netobserv-ebpf-agent/pkg/utils" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// ClientConnection wraps a gRPC+protobuf connection +type ClientConnection struct { + client pbpacket.CollectorClient + conn *grpc.ClientConn +} + +func ConnectClient(hostIP string, hostPort int) (*ClientConnection, error) { + // TODO: allow configuring some options (keepalive, backoff...) + socket := utils.GetSocket(hostIP, hostPort) + conn, err := grpc.NewClient(socket, + grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + return &ClientConnection{ + client: pbpacket.NewCollectorClient(conn), + conn: conn, + }, nil +} + +func (cp *ClientConnection) Client() pbpacket.CollectorClient { + return cp.client +} + +func (cp *ClientConnection) Close() error { + return cp.conn.Close() +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go new file mode 100644 index 000000000..272814857 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet/server.go @@ -0,0 +1,77 @@ +package pktgrpc + +import ( + "context" + "fmt" + "net" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket" + + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +// CollectorServer wraps a Flow Collector connection & session +type CollectorServer struct { + grpcServer *grpc.Server +} + +type collectorOptions struct { + grpcServerOptions []grpc.ServerOption +} + +// CollectorOption allows overriding the default configuration of the CollectorServer instance. +// Use them in the StartCollector function. +type CollectorOption func(options *collectorOptions) + +func WithGRPCServerOptions(options ...grpc.ServerOption) CollectorOption { + return func(copt *collectorOptions) { + copt.grpcServerOptions = options + } +} + +// StartCollector listens in background for gRPC+Protobuf flows in the given port, and forwards each +// set of *pbpacket.Packet by the provided channel. +func StartCollector( + port int, pktForwarder chan<- *pbpacket.Packet, options ...CollectorOption, +) (*CollectorServer, error) { + copts := collectorOptions{} + for _, opt := range options { + opt(&copts) + } + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + return nil, err + } + grpcServer := grpc.NewServer(copts.grpcServerOptions...) + pbpacket.RegisterCollectorServer(grpcServer, &collectorAPI{ + pktForwarder: pktForwarder, + }) + reflection.Register(grpcServer) + go func() { + if err := grpcServer.Serve(lis); err != nil { + panic("error connecting to server: " + err.Error()) + } + }() + return &CollectorServer{ + grpcServer: grpcServer, + }, nil +} + +func (c *CollectorServer) Close() error { + c.grpcServer.Stop() + return nil +} + +type collectorAPI struct { + pbpacket.UnimplementedCollectorServer + pktForwarder chan<- *pbpacket.Packet +} + +var okReply = &pbpacket.CollectorReply{} + +func (c *collectorAPI) Send(_ context.Context, pkts *pbpacket.Packet) (*pbpacket.CollectorReply, error) { + c.pktForwarder <- pkts + return okReply, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go new file mode 100644 index 000000000..518010112 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/informer.go @@ -0,0 +1,69 @@ +package ifaces + +import ( + "context" + "fmt" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +// EventType for an interface: added, deleted +type EventType int + +const ( + EventAdded EventType = iota + EventDeleted +) + +func (e EventType) String() string { + switch e { + case EventAdded: + return "Added" + case EventDeleted: + return "Deleted" + default: + return fmt.Sprintf("Unknown (%d)", e) + } +} + +var ilog = logrus.WithField("component", "ifaces.Informer") + +// Event of a network interface, given the type (added, removed) and the interface name +type Event struct { + Type EventType + Interface Interface +} + +type Interface struct { + Name string + Index int + NetNS netns.NsHandle +} + +// Informer provides notifications about each network interface that is added or removed +// from the host. Production implementations: Poller and Watcher. +type Informer interface { + // Subscribe returns a channel that sends Event instances. + Subscribe(ctx context.Context) (<-chan Event, error) +} + +func netInterfaces(nsh netns.NsHandle) ([]Interface, error) { + handle, err := netlink.NewHandleAt(nsh) + if err != nil { + return nil, fmt.Errorf("failed to create handle for netns (%s): %w", nsh.String(), err) + } + defer handle.Close() + + // Get a list of interfaces in the namespace + links, err := handle.LinkList() + if err != nil { + return nil, fmt.Errorf("failed to list interfaces in netns (%s): %w", nsh.String(), err) + } + + names := make([]Interface, len(links)) + for i, link := range links { + names[i] = Interface{Name: link.Attrs().Name, Index: link.Attrs().Index, NetNS: nsh} + } + return names, nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go new file mode 100644 index 000000000..e655b9e23 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/poller.go @@ -0,0 +1,105 @@ +package ifaces + +import ( + "context" + "time" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netns" +) + +// Poller periodically looks for the network interfaces in the system and forwards Event +// notifications when interfaces are added or deleted. +type Poller struct { + period time.Duration + current map[Interface]struct{} + interfaces func(handle netns.NsHandle) ([]Interface, error) + bufLen int +} + +func NewPoller(period time.Duration, bufLen int) *Poller { + return &Poller{ + period: period, + bufLen: bufLen, + interfaces: netInterfaces, + current: map[Interface]struct{}{}, + } +} + +func (np *Poller) Subscribe(ctx context.Context) (<-chan Event, error) { + + out := make(chan Event, np.bufLen) + netns, err := getNetNS() + if err != nil { + go np.pollForEvents(ctx, "", out) + } else { + for _, n := range netns { + go np.pollForEvents(ctx, n, out) + } + } + return out, nil +} + +func (np *Poller) pollForEvents(ctx context.Context, ns string, out chan Event) { + log := logrus.WithField("component", "ifaces.Poller") + log.WithField("period", np.period).Debug("subscribing to Interface events") + ticker := time.NewTicker(np.period) + var netnsHandle netns.NsHandle + var err error + + if ns == "" { + netnsHandle = netns.None() + } else { + netnsHandle, err = netns.GetFromName(ns) + if err != nil { + return + } + } + + defer ticker.Stop() + for { + if ifaces, err := np.interfaces(netnsHandle); err != nil { + log.WithError(err).Warn("fetching interface names") + } else { + log.WithField("names", ifaces).Debug("fetched interface names") + np.diffNames(out, ifaces) + } + select { + case <-ctx.Done(): + log.Debug("stopped") + close(out) + return + case <-ticker.C: + // continue after a period + } + } +} + +// diffNames compares and updates the internal account of interfaces with the latest list of +// polled interfaces. It forwards Events for any detected addition or removal of interfaces. +func (np *Poller) diffNames(events chan Event, ifaces []Interface) { + // Check for new interfaces + acquired := map[Interface]struct{}{} + for _, iface := range ifaces { + acquired[iface] = struct{}{} + if _, ok := np.current[iface]; !ok { + ilog.WithField("interface", iface).Debug("added network interface") + np.current[iface] = struct{}{} + events <- Event{ + Type: EventAdded, + Interface: iface, + } + } + } + // Check for deleted interfaces + for iface := range np.current { + if _, ok := acquired[iface]; !ok { + delete(np.current, iface) + ilog.WithField("interface", iface).Debug("deleted network interface") + events <- Event{ + Type: EventDeleted, + Interface: iface, + } + } + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go new file mode 100644 index 000000000..e006c6c4b --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/registerer.go @@ -0,0 +1,73 @@ +package ifaces + +import ( + "context" + "net" + "sync" +) + +// Registerer is an informer that wraps another informer implementation, and keeps track of +// the currently existing interfaces in the system, accessible through the IfaceNameForIndex method. +type Registerer struct { + m sync.RWMutex + inner Informer + ifaces map[int]string + bufLen int +} + +func NewRegisterer(inner Informer, bufLen int) *Registerer { + return &Registerer{ + inner: inner, + bufLen: bufLen, + ifaces: map[int]string{}, + } +} + +func (r *Registerer) Subscribe(ctx context.Context) (<-chan Event, error) { + innerCh, err := r.inner.Subscribe(ctx) + if err != nil { + return nil, err + } + out := make(chan Event, r.bufLen) + go func() { + for ev := range innerCh { + switch ev.Type { + case EventAdded: + r.m.Lock() + r.ifaces[ev.Interface.Index] = ev.Interface.Name + r.m.Unlock() + case EventDeleted: + r.m.Lock() + name, ok := r.ifaces[ev.Interface.Index] + // prevent removing an interface with the same index but different name + // e.g. due to an out-of-order add/delete signaling + if ok && name == ev.Interface.Name { + delete(r.ifaces, ev.Interface.Index) + } + r.m.Unlock() + } + out <- ev + } + }() + return out, nil +} + +// IfaceNameForIndex gets the interface name given an index as recorded by the underlying +// interfaces' informer. It backs up into the net.InterfaceByIndex function if the interface +// has not been previously registered +func (r *Registerer) IfaceNameForIndex(idx int) (string, bool) { + r.m.RLock() + name, ok := r.ifaces[idx] + r.m.RUnlock() + if !ok { + iface, err := net.InterfaceByIndex(idx) + if err != nil { + return "", false + } + name = iface.Name + r.m.Lock() + r.ifaces[idx] = name + r.m.Unlock() + } + return name, ok +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go new file mode 100644 index 000000000..e35bb9c90 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces/watcher.go @@ -0,0 +1,232 @@ +package ifaces + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + netnsVolume = "/var/run/netns" +) + +// Watcher uses system's netlink to get real-time information events about network interfaces' +// addition or removal. +type Watcher struct { + bufLen int + current map[Interface]struct{} + interfaces func(handle netns.NsHandle) ([]Interface, error) + // linkSubscriber abstracts netlink.LinkSubscribe implementation, allowing the injection of + // mocks for unit testing + linkSubscriberAt func(ns netns.NsHandle, ch chan<- netlink.LinkUpdate, done <-chan struct{}) error + mutex *sync.Mutex + netnsWatcher *fsnotify.Watcher + nsDone map[string]chan struct{} +} + +func NewWatcher(bufLen int) *Watcher { + return &Watcher{ + bufLen: bufLen, + current: map[Interface]struct{}{}, + interfaces: netInterfaces, + linkSubscriberAt: netlink.LinkSubscribeAt, + mutex: &sync.Mutex{}, + netnsWatcher: &fsnotify.Watcher{}, + nsDone: make(map[string]chan struct{}), + } +} + +func (w *Watcher) Subscribe(ctx context.Context) (<-chan Event, error) { + out := make(chan Event, w.bufLen) + netns, err := getNetNS() + if err != nil { + w.nsDone[""] = make(chan struct{}) + go w.sendUpdates(ctx, "", out) + } else { + for _, n := range netns { + w.nsDone[n] = make(chan struct{}) + go w.sendUpdates(ctx, n, out) + } + } + // register to get notification when netns is created or deleted and register for link update for new netns + w.netnsNotify(ctx, out) + return out, nil +} + +func (w *Watcher) sendUpdates(ctx context.Context, ns string, out chan Event) { + var netnsHandle netns.NsHandle + var err error + log := logrus.WithField("component", "ifaces.Watcher") + doneChan := w.nsDone[ns] + defer func() { + close(doneChan) + delete(w.nsDone, ns) + }() + // subscribe for interface events + links := make(chan netlink.LinkUpdate) + if err = wait.PollUntilContextTimeout(ctx, 50*time.Microsecond, time.Second, true, func(_ context.Context) (done bool, err error) { + if ns == "" { + netnsHandle = netns.None() + } else { + if netnsHandle, err = netns.GetFromName(ns); err != nil { + return false, nil + } + } + + if err = w.linkSubscriberAt(netnsHandle, links, doneChan); err != nil { + log.WithFields(logrus.Fields{ + "netns": ns, + "netnsHandle": netnsHandle.String(), + "error": err, + }).Debug("linkSubscribe failed retry") + if err := netnsHandle.Close(); err != nil { + log.WithError(err).Warn("netnsHandle close failed") + } + return false, nil + } + + log.WithFields(logrus.Fields{ + "netns": ns, + "netnsHandle": netnsHandle.String(), + }).Debug("linkSubscribe to receive links update") + return true, nil + }); err != nil { + log.WithError(err).Errorf("can't subscribe to links netns %s netnsHandle %s", ns, netnsHandle.String()) + return + } + + // before sending netlink updates, send all the existing interfaces at the moment of starting + // the Watcher + if netnsHandle.IsOpen() || netnsHandle.Equal(netns.None()) { + if names, err := w.interfaces(netnsHandle); err != nil { + log.WithError(err).Error("can't fetch network interfaces. You might be missing flows") + } else { + for _, name := range names { + iface := Interface{Name: name.Name, Index: name.Index, NetNS: netnsHandle} + w.mutex.Lock() + w.current[iface] = struct{}{} + w.mutex.Unlock() + out <- Event{Type: EventAdded, Interface: iface} + } + } + } + + for link := range links { + attrs := link.Attrs() + if attrs == nil { + log.WithField("link", link).Debug("received link update without attributes. Ignoring") + continue + } + iface := Interface{Name: attrs.Name, Index: attrs.Index, NetNS: netnsHandle} + w.mutex.Lock() + if link.Flags&(syscall.IFF_UP|syscall.IFF_RUNNING) != 0 && attrs.OperState == netlink.OperUp { + log.WithFields(logrus.Fields{ + "operstate": attrs.OperState, + "flags": attrs.Flags, + "name": attrs.Name, + "netns": netnsHandle.String(), + }).Debug("Interface up and running") + if _, ok := w.current[iface]; !ok { + w.current[iface] = struct{}{} + out <- Event{Type: EventAdded, Interface: iface} + } + } else { + log.WithFields(logrus.Fields{ + "operstate": attrs.OperState, + "flags": attrs.Flags, + "name": attrs.Name, + "netns": netnsHandle.String(), + }).Debug("Interface down or not running") + if _, ok := w.current[iface]; ok { + delete(w.current, iface) + out <- Event{Type: EventDeleted, Interface: iface} + } + } + w.mutex.Unlock() + } +} + +func getNetNS() ([]string, error) { + log := logrus.WithField("component", "ifaces.Watcher") + files, err := os.ReadDir(netnsVolume) + if err != nil { + log.Warningf("can't detect any network-namespaces err: %v [Ignore if the agent privileged flag is not set]", err) + return nil, fmt.Errorf("failed to list network-namespaces: %w", err) + } + + netns := []string{""} + if len(files) == 0 { + log.WithField("netns", files).Debug("empty network-namespaces list") + return netns, nil + } + for _, f := range files { + ns := f.Name() + netns = append(netns, ns) + log.WithFields(logrus.Fields{ + "netns": ns, + }).Debug("Detected network-namespace") + } + + return netns, nil +} + +func (w *Watcher) netnsNotify(ctx context.Context, out chan Event) { + var err error + log := logrus.WithField("component", "ifaces.Watcher") + + w.netnsWatcher, err = fsnotify.NewWatcher() + if err != nil { + log.WithError(err).Error("can't subscribe fsnotify") + return + } + // Start a goroutine to handle netns events + go func() { + for { + select { + case event, ok := <-w.netnsWatcher.Events: + if !ok { + return + } + if event.Op&fsnotify.Create == fsnotify.Create { + ns := filepath.Base(event.Name) + log.WithField("netns", ns).Debug("netns create notification") + if _, ok := w.nsDone[ns]; ok { + log.WithField("netns", ns).Debug("netns channel already exists, delete it") + delete(w.nsDone, ns) + } + w.nsDone[ns] = make(chan struct{}) + go w.sendUpdates(ctx, ns, out) + } + if event.Op&fsnotify.Remove == fsnotify.Remove { + ns := filepath.Base(event.Name) + log.WithField("netns", ns).Debug("netns delete notification") + if _, ok := w.nsDone[ns]; ok { + w.nsDone[ns] <- struct{}{} + } else { + log.WithField("netns", ns).Debug("netns delete but there is no channel to send events to") + } + } + case err, ok := <-w.netnsWatcher.Errors: + if !ok { + return + } + log.WithError(err).Error("netns watcher detected an error") + } + } + }() + + err = w.netnsWatcher.Add(netnsVolume) + if err != nil { + log.Warningf("failed to add watcher to netns directory err: %v [Ignore if the agent privileged flag is not set]", err) + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go new file mode 100644 index 000000000..4f81c7fa4 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/kernel/kernel_utils.go @@ -0,0 +1,122 @@ +package kernel + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/sirupsen/logrus" +) + +var ( + versionRegex = regexp.MustCompile(`^(\d+)\.(\d+)\.(\d+)(?:-(\d+))?`) + rtRegex = regexp.MustCompile(`[.-]rt`) + kernelVersion uint32 + log = logrus.WithField("component", "kernel") +) + +func init() { + var err error + kernelVersion, err = currentKernelVersion() + if err != nil { + log.Errorf("failed to get current kernel version: %v", err) + } +} + +func IsKernelOlderThan(version string) bool { + refVersion, err := kernelVersionFromReleaseString(version) + if err != nil { + log.Warnf("failed to get kernel version from release string: %v", err) + return false + } + return kernelVersion != 0 && kernelVersion < refVersion +} + +// kernelVersionFromReleaseString converts a release string with format +// 4.4.2[-1] to a kernel version number in LINUX_VERSION_CODE format. +// That is, for kernel "a.b.c-d", the version number will be (a<<24 + b<<16 + c<<8 + d) +func kernelVersionFromReleaseString(releaseString string) (uint32, error) { + versionParts := versionRegex.FindStringSubmatch(releaseString) + if len(versionParts) == 0 { + return 0, fmt.Errorf("got invalid release version %q (expected format '4.3.2-1')", releaseString) + } + major, err := strconv.Atoi(versionParts[1]) + if err != nil { + return 0, err + } + + minor, err := strconv.Atoi(versionParts[2]) + if err != nil { + return 0, err + } + + patch, err := strconv.Atoi(versionParts[3]) + if err != nil { + return 0, err + } + extraNumeric := 0 + if versionParts[4] != "" { + extraNumeric, err = strconv.Atoi(versionParts[4]) + if err != nil { + return 0, err + } + } + out := major*256*256*256 + minor*256*256 + patch*256 + (extraNumeric & 0xFF) + return uint32(out), nil +} + +func currentKernelVersion() (uint32, error) { + var buf syscall.Utsname + if err := syscall.Uname(&buf); err != nil { + return 0, err + } + releaseString := strings.Trim(utsnameStr(buf.Release[:]), "\x00") + return kernelVersionFromReleaseString(releaseString) +} + +func utsnameStr[T int8 | uint8](in []T) string { + out := make([]byte, len(in)) + for i := 0; i < len(in); i++ { + if in[i] == 0 { + break + } + out = append(out, byte(in[i])) + } + return string(out) +} + +func getKernelVersion() (string, error) { + output, err := exec.Command("uname", "-r").Output() + if err != nil { + return "", err + } + if len(output) == 0 { + return "", fmt.Errorf("kernel version not found") + } + return strings.TrimSpace(string(output)), nil +} + +func isRealTimeKernel(kernelVersion string) bool { + rt := rtRegex.FindStringSubmatch(kernelVersion) + return len(rt) != 0 +} + +func IsRealTimeKernel() bool { + version, err := getKernelVersion() + if err != nil { + log.Errorf("failed to get kernel version: %v", err) + return false + } + + if len(version) == 0 { + return false + } + if isRealTimeKernel(version) { + log.Debugf("kernel version: %v is realtime", version) + return true + } + return false +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go new file mode 100644 index 000000000..77e3ac28d --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/metrics/metrics.go @@ -0,0 +1,285 @@ +package metrics + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" +) + +type MetricDefinition struct { + Name string + Help string + Type metricType + Labels []string +} + +type PromTLS struct { + CertPath string + KeyPath string +} + +type PromConnectionInfo struct { + Address string + Port int + TLS *PromTLS +} + +type Settings struct { + PromConnectionInfo + Prefix string +} + +type metricType string + +const ( + TypeCounter metricType = "counter" + TypeGauge metricType = "gauge" + TypeHistogram metricType = "histogram" +) + +var allMetrics = []MetricDefinition{} + +func defineMetric(name, help string, t metricType, labels ...string) MetricDefinition { + def := MetricDefinition{ + Name: name, + Help: help, + Type: t, + Labels: labels, + } + allMetrics = append(allMetrics, def) + return def +} + +var ( + evictionsTotal = defineMetric( + "evictions_total", + "Number of eviction events", + TypeCounter, + "source", + "reason", + ) + evictedFlowsTotal = defineMetric( + "evicted_flows_total", + "Number of evicted flows", + TypeCounter, + "source", + "reason", + ) + evictedPktTotal = defineMetric( + "evicted_packets_total", + "Number of evicted packets", + TypeCounter, + "source", + "reason", + ) + lookupAndDeleteMapDurationSeconds = defineMetric( + "lookup_and_delete_map_duration_seconds", + "Lookup and delete map duration in seconds", + TypeHistogram, + ) + droppedFlows = defineMetric( + "dropped_flows_total", + "Number of dropped flows", + TypeCounter, + "source", + "reason", + ) + filterFlows = defineMetric( + "filtered_flows_total", + "Number of filtered flows", + TypeCounter, + "source", + "reason", + ) + networkEvents = defineMetric( + "network_events_total", + "Number of Network Events flows", + TypeCounter, + "source", + "reason", + ) + bufferSize = defineMetric( + "buffer_size", + "Buffer size", + TypeGauge, + "name", + ) + exportedBatchCounterTotal = defineMetric( + "exported_batch_total", + "Exported batches", + TypeCounter, + "exporter", + ) + samplingRate = defineMetric( + "sampling_rate", + "Sampling rate", + TypeGauge, + ) + errorsCounter = defineMetric( + "errors_total", + "errors counter", + TypeCounter, + "component", + "error", + ) +) + +func (def *MetricDefinition) mapLabels(labels []string) prometheus.Labels { + if len(labels) != len(def.Labels) { + logrus.Errorf("Could not map labels, length differ in def %s [%v / %v]", def.Name, def.Labels, labels) + } + labelsMap := prometheus.Labels{} + for i, label := range labels { + labelsMap[def.Labels[i]] = label + } + return labelsMap +} + +func verifyMetricType(def *MetricDefinition, t metricType) { + if def.Type != t { + logrus.Panicf("operational metric %q is of type %q but is being registered as %q", def.Name, def.Type, t) + } +} + +type Metrics struct { + Settings *Settings + + // Shared metrics: + EvictionCounter *EvictionCounter + EvictedFlowsCounter *EvictionCounter + EvictedPacketsCounter *EvictionCounter + DroppedFlowsCounter *EvictionCounter + FilteredFlowsCounter *EvictionCounter + NetworkEventsCounter *EvictionCounter + BufferSizeGauge *BufferSizeGauge + Errors *ErrorCounter +} + +func NewMetrics(settings *Settings) *Metrics { + m := &Metrics{ + Settings: settings, + } + m.EvictionCounter = &EvictionCounter{vec: m.NewCounterVec(&evictionsTotal)} + m.EvictedFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&evictedFlowsTotal)} + m.EvictedPacketsCounter = &EvictionCounter{vec: m.NewCounterVec(&evictedPktTotal)} + m.DroppedFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&droppedFlows)} + m.FilteredFlowsCounter = &EvictionCounter{vec: m.NewCounterVec(&filterFlows)} + m.NetworkEventsCounter = &EvictionCounter{vec: m.NewCounterVec(&networkEvents)} + m.BufferSizeGauge = &BufferSizeGauge{vec: m.NewGaugeVec(&bufferSize)} + m.Errors = &ErrorCounter{vec: m.NewCounterVec(&errorsCounter)} + return m +} + +// register will register against the default registry. May panic or not depending on settings +func (m *Metrics) register(c prometheus.Collector, name string) { + err := prometheus.DefaultRegisterer.Register(c) + if err != nil { + if errors.As(err, &prometheus.AlreadyRegisteredError{}) { + logrus.Warningf("metrics registration error [%s]: %v", name, err) + } else { + logrus.Panicf("metrics registration error [%s]: %v", name, err) + } + } +} + +func (m *Metrics) NewCounter(def *MetricDefinition, labels ...string) prometheus.Counter { + verifyMetricType(def, TypeCounter) + fullName := m.Settings.Prefix + def.Name + c := prometheus.NewCounter(prometheus.CounterOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + }) + m.register(c, fullName) + return c +} + +func (m *Metrics) NewCounterVec(def *MetricDefinition) *prometheus.CounterVec { + verifyMetricType(def, TypeCounter) + fullName := m.Settings.Prefix + def.Name + c := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: fullName, + Help: def.Help, + }, def.Labels) + m.register(c, fullName) + return c +} + +func (m *Metrics) NewGauge(def *MetricDefinition, labels ...string) prometheus.Gauge { + verifyMetricType(def, TypeGauge) + fullName := m.Settings.Prefix + def.Name + c := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: fullName, + Help: def.Help, + ConstLabels: def.mapLabels(labels), + }) + m.register(c, fullName) + return c +} + +func (m *Metrics) NewGaugeVec(def *MetricDefinition) *prometheus.GaugeVec { + verifyMetricType(def, TypeGauge) + fullName := m.Settings.Prefix + def.Name + g := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: fullName, + Help: def.Help, + }, def.Labels) + m.register(g, fullName) + return g +} + +func (m *Metrics) NewHistogram(def *MetricDefinition, buckets []float64, labels ...string) prometheus.Histogram { + verifyMetricType(def, TypeHistogram) + fullName := m.Settings.Prefix + def.Name + c := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: fullName, + Help: def.Help, + Buckets: buckets, + ConstLabels: def.mapLabels(labels), + }) + m.register(c, fullName) + return c +} + +// EvictionCounter provides syntactic sugar hidding prom's counter for eviction purpose +type EvictionCounter struct { + vec *prometheus.CounterVec +} + +func (c *EvictionCounter) WithSourceAndReason(source, reason string) prometheus.Counter { + return c.vec.WithLabelValues(source, reason) +} + +func (c *EvictionCounter) WithSource(source string) prometheus.Counter { + return c.vec.WithLabelValues(source, "") +} + +func (m *Metrics) CreateTimeSpendInLookupAndDelete() prometheus.Histogram { + return m.NewHistogram(&lookupAndDeleteMapDurationSeconds, []float64{.001, .01, .1, 1, 10, 100, 1000, 10000}) +} + +// BufferSizeGauge provides syntactic sugar hidding prom's gauge tailored for buffer size +type BufferSizeGauge struct { + vec *prometheus.GaugeVec +} + +func (g *BufferSizeGauge) WithBufferName(bufferName string) prometheus.Gauge { + return g.vec.WithLabelValues(bufferName) +} + +func (m *Metrics) CreateBatchCounter(exporter string) prometheus.Counter { + return m.NewCounter(&exportedBatchCounterTotal, exporter) +} + +func (m *Metrics) CreateSamplingRate() prometheus.Gauge { + return m.NewGauge(&samplingRate) +} + +type ErrorCounter struct { + vec *prometheus.CounterVec +} + +func (c *ErrorCounter) WithErrorName(component, errName string) prometheus.Counter { + return c.vec.WithLabelValues(component, errName) +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go new file mode 100644 index 000000000..111303565 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/flow_content.go @@ -0,0 +1,110 @@ +package model + +import "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + +type BpfFlowContent struct { + *ebpf.BpfFlowMetrics + AdditionalMetrics *ebpf.BpfAdditionalMetrics +} + +type BpfFlowContents []BpfFlowContent + +func (a *BpfFlowContents) Accumulate() BpfFlowContent { + res := BpfFlowContent{} + for _, p := range *a { + res.AccumulateBase(p.BpfFlowMetrics) + res.AccumulateAdditional(p.AdditionalMetrics) + } + return res +} + +func (p *BpfFlowContent) AccumulateBase(other *ebpf.BpfFlowMetrics) { + p.BpfFlowMetrics = AccumulateBase(p.BpfFlowMetrics, other) +} + +func AccumulateBase(p *ebpf.BpfFlowMetrics, other *ebpf.BpfFlowMetrics) *ebpf.BpfFlowMetrics { + if other == nil { + return p + } + if p == nil { + return other + } + // time == 0 if the value has not been yet set + if p.StartMonoTimeTs == 0 || (p.StartMonoTimeTs > other.StartMonoTimeTs && other.StartMonoTimeTs != 0) { + p.StartMonoTimeTs = other.StartMonoTimeTs + } + if p.EndMonoTimeTs == 0 || p.EndMonoTimeTs < other.EndMonoTimeTs { + p.EndMonoTimeTs = other.EndMonoTimeTs + } + p.Bytes += other.Bytes + p.Packets += other.Packets + p.Flags |= other.Flags + if other.EthProtocol != 0 { + p.EthProtocol = other.EthProtocol + } + if allZerosMac(p.SrcMac) { + p.SrcMac = other.SrcMac + } + if allZerosMac(p.DstMac) { + p.DstMac = other.DstMac + } + if other.Dscp != 0 { + p.Dscp = other.Dscp + } + if other.Sampling != 0 { + p.Sampling = other.Sampling + } + return p +} + +func (p *BpfFlowContent) AccumulateAdditional(other *ebpf.BpfAdditionalMetrics) { + if other == nil { + return + } + if p.AdditionalMetrics == nil { + p.AdditionalMetrics = other + return + } + // DNS + p.AdditionalMetrics.DnsRecord.Flags |= other.DnsRecord.Flags + if other.DnsRecord.Id != 0 { + p.AdditionalMetrics.DnsRecord.Id = other.DnsRecord.Id + } + if p.AdditionalMetrics.DnsRecord.Errno != other.DnsRecord.Errno { + p.AdditionalMetrics.DnsRecord.Errno = other.DnsRecord.Errno + } + if p.AdditionalMetrics.DnsRecord.Latency < other.DnsRecord.Latency { + p.AdditionalMetrics.DnsRecord.Latency = other.DnsRecord.Latency + } + // Drop statistics + p.AdditionalMetrics.PktDrops.Bytes += other.PktDrops.Bytes + p.AdditionalMetrics.PktDrops.Packets += other.PktDrops.Packets + p.AdditionalMetrics.PktDrops.LatestFlags |= other.PktDrops.LatestFlags + if other.PktDrops.LatestDropCause != 0 { + p.AdditionalMetrics.PktDrops.LatestDropCause = other.PktDrops.LatestDropCause + } + // RTT + if p.AdditionalMetrics.FlowRtt < other.FlowRtt { + p.AdditionalMetrics.FlowRtt = other.FlowRtt + } + // Network events + for _, md := range other.NetworkEvents { + if !AllZerosMetaData(md) && !networkEventsMDExist(p.AdditionalMetrics.NetworkEvents, md) { + copy(p.AdditionalMetrics.NetworkEvents[p.AdditionalMetrics.NetworkEventsIdx][:], md[:]) + p.AdditionalMetrics.NetworkEventsIdx = (p.AdditionalMetrics.NetworkEventsIdx + 1) % MaxNetworkEvents + } + } + // Packet Translations + if !AllZeroIP(IP(other.TranslatedFlow.Saddr)) && !AllZeroIP(IP(other.TranslatedFlow.Daddr)) { + p.AdditionalMetrics.TranslatedFlow = other.TranslatedFlow + } +} + +func allZerosMac(s [6]uint8) bool { + for _, v := range s { + if v != 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go new file mode 100644 index 000000000..85af86136 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/packet_record.go @@ -0,0 +1,52 @@ +package model + +import ( + "encoding/binary" + "io" + "time" + + "github.com/gavv/monotime" +) + +type RawByte byte + +type PacketRecord struct { + Stream []byte + Time time.Time +} + +// NewPacketRecord contains packet bytes +func NewPacketRecord( + stream []byte, + len uint32, + ts time.Time, +) *PacketRecord { + pr := PacketRecord{} + pr.Time = ts + pr.Stream = make([]byte, len) + pr.Stream = stream + return &pr +} + +// ReadRawPacket reads a PacketRecord from a binary source, in LittleEndian order +func ReadRawPacket(reader io.Reader) (*PacketRecord, error) { + var pr PacketRecord + currentTime := time.Now() + monotonicTimeNow := monotime.Now() + getLen := make([]byte, 4) + packetTimestamp := make([]byte, 8) + // Read IfIndex and discard it: To be used in other usecases + _ = binary.Read(reader, binary.LittleEndian, make([]byte, 4)) + // Read Length of packet + _ = binary.Read(reader, binary.LittleEndian, getLen) + pr.Stream = make([]byte, binary.LittleEndian.Uint32(getLen)) + // Read TimeStamp of packet + _ = binary.Read(reader, binary.LittleEndian, packetTimestamp) + // The assumption is monotonic time should be as close to time recorded by ebpf. + // The difference is considered the delta time from current time. + tsDelta := time.Duration(uint64(monotonicTimeNow) - binary.LittleEndian.Uint64(packetTimestamp)) + pr.Time = currentTime.Add(-tsDelta) + + err := binary.Read(reader, binary.LittleEndian, &pr.Stream) + return &pr, err +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go new file mode 100644 index 000000000..8f6b83230 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/model/record.go @@ -0,0 +1,157 @@ +package model + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "time" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" +) + +// Values according to field 61 in https://www.iana.org/assignments/ipfix/ipfix.xhtml +const ( + DirectionIngress = uint8(0) + DirectionEgress = uint8(1) +) +const MacLen = 6 + +// IPv4Type / IPv6Type value as defined in IEEE 802: https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml +const ( + IPv6Type = 0x86DD + NetworkEventsMaxEventsMD = 8 + MaxNetworkEvents = 4 +) + +type HumanBytes uint64 +type MacAddr [MacLen]uint8 +type Direction uint8 + +// IPAddr encodes v4 and v6 IPs with a fixed length. +// IPv4 addresses are encoded as IPv6 addresses with prefix ::ffff/96 +// as described in https://datatracker.ietf.org/doc/html/rfc4038#section-4.2 +// (same behavior as Go's net.IP type) +type IPAddr [net.IPv6len]uint8 + +// record structure as parsed from eBPF +type RawRecord ebpf.BpfFlowRecordT + +// Record contains accumulated metrics from a flow +type Record struct { + ID ebpf.BpfFlowId + Metrics BpfFlowContent + + // TODO: redundant field from RecordMetrics. Reorganize structs + TimeFlowStart time.Time + TimeFlowEnd time.Time + DNSLatency time.Duration + Interface string + // Duplicate tells whether this flow has another duplicate so it has to be excluded from + // any metrics' aggregation (e.g. bytes/second rates between two pods). + // The reason for this field is that the same flow can be observed from multiple interfaces, + // so the agent needs to choose only a view of the same flow and mark the others as + // "exclude from aggregation". Otherwise rates, sums, etc... values would be multiplied by the + // number of interfaces this flow is observed from. + Duplicate bool + + // AgentIP provides information about the source of the flow (the Agent that traced it) + AgentIP net.IP + // Calculated RTT which is set when record is created by calling NewRecord + TimeFlowRtt time.Duration + DupList []map[string]uint8 + NetworkMonitorEventsMD []config.GenericMap +} + +func NewRecord( + key ebpf.BpfFlowId, + metrics *BpfFlowContent, + currentTime time.Time, + monotonicCurrentTime uint64, +) *Record { + startDelta := time.Duration(monotonicCurrentTime - metrics.StartMonoTimeTs) + endDelta := time.Duration(monotonicCurrentTime - metrics.EndMonoTimeTs) + + var record = Record{ + ID: key, + Metrics: *metrics, + TimeFlowStart: currentTime.Add(-startDelta), + TimeFlowEnd: currentTime.Add(-endDelta), + } + if metrics.AdditionalMetrics != nil { + if metrics.AdditionalMetrics.FlowRtt != 0 { + record.TimeFlowRtt = time.Duration(metrics.AdditionalMetrics.FlowRtt) + } + if metrics.AdditionalMetrics.DnsRecord.Latency != 0 { + record.DNSLatency = time.Duration(metrics.AdditionalMetrics.DnsRecord.Latency) + } + } + record.DupList = make([]map[string]uint8, 0) + record.NetworkMonitorEventsMD = make([]config.GenericMap, 0) + return &record +} + +func networkEventsMDExist(events [MaxNetworkEvents][NetworkEventsMaxEventsMD]uint8, md [NetworkEventsMaxEventsMD]uint8) bool { + for _, e := range events { + if reflect.DeepEqual(e, md) { + return true + } + } + return false +} + +// IP returns the net.IP equivalent object +func IP(ia IPAddr) net.IP { + return ia[:] +} + +// IntEncodeV4 encodes an IPv4 address as an integer (in network encoding, big endian). +// It assumes that the passed IP is already IPv4. Otherwise, it would just encode the +// last 4 bytes of an IPv6 address +func IntEncodeV4(ia [net.IPv6len]uint8) uint32 { + return binary.BigEndian.Uint32(ia[net.IPv6len-net.IPv4len : net.IPv6len]) +} + +// IPAddrFromNetIP returns IPAddr from net.IP +func IPAddrFromNetIP(netIP net.IP) IPAddr { + var arr [net.IPv6len]uint8 + copy(arr[:], (netIP)[0:net.IPv6len]) + return arr +} + +func (ia *IPAddr) MarshalJSON() ([]byte, error) { + return []byte(`"` + IP(*ia).String() + `"`), nil +} + +func (m *MacAddr) String() string { + return fmt.Sprintf("%02X:%02X:%02X:%02X:%02X:%02X", m[0], m[1], m[2], m[3], m[4], m[5]) +} + +func (m *MacAddr) MarshalJSON() ([]byte, error) { + return []byte("\"" + m.String() + "\""), nil +} + +// ReadFrom reads a Record from a binary source, in LittleEndian order +func ReadFrom(reader io.Reader) (*RawRecord, error) { + var fr RawRecord + err := binary.Read(reader, binary.LittleEndian, &fr) + return &fr, err +} + +func AllZerosMetaData(s [NetworkEventsMaxEventsMD]uint8) bool { + for _, v := range s { + if v != 0 { + return false + } + } + return true +} + +func AllZeroIP(ip net.IP) bool { + if ip.Equal(net.IPv4zero) || ip.Equal(net.IPv6zero) { + return true + } + return false +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go new file mode 100644 index 000000000..c1ce04bb8 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow.pb.go @@ -0,0 +1,1103 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v3.19.4 +// source: proto/flow.proto + +package pbflow + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// as defined by field 61 in +// https://www.iana.org/assignments/ipfix/ipfix.xhtml +type Direction int32 + +const ( + Direction_INGRESS Direction = 0 + Direction_EGRESS Direction = 1 +) + +// Enum value maps for Direction. +var ( + Direction_name = map[int32]string{ + 0: "INGRESS", + 1: "EGRESS", + } + Direction_value = map[string]int32{ + "INGRESS": 0, + "EGRESS": 1, + } +) + +func (x Direction) Enum() *Direction { + p := new(Direction) + *p = x + return p +} + +func (x Direction) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Direction) Descriptor() protoreflect.EnumDescriptor { + return file_proto_flow_proto_enumTypes[0].Descriptor() +} + +func (Direction) Type() protoreflect.EnumType { + return &file_proto_flow_proto_enumTypes[0] +} + +func (x Direction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Direction.Descriptor instead. +func (Direction) EnumDescriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{0} +} + +// intentionally empty +type CollectorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectorReply) Reset() { + *x = CollectorReply{} + mi := &file_proto_flow_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CollectorReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectorReply) ProtoMessage() {} + +func (x *CollectorReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead. +func (*CollectorReply) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{0} +} + +type Records struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*Record `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Records) Reset() { + *x = Records{} + mi := &file_proto_flow_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Records) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Records) ProtoMessage() {} + +func (x *Records) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Records.ProtoReflect.Descriptor instead. +func (*Records) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{1} +} + +func (x *Records) GetEntries() []*Record { + if x != nil { + return x.Entries + } + return nil +} + +type DupMapEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Interface string `protobuf:"bytes,1,opt,name=interface,proto3" json:"interface,omitempty"` + Direction Direction `protobuf:"varint,2,opt,name=direction,proto3,enum=pbflow.Direction" json:"direction,omitempty"` +} + +func (x *DupMapEntry) Reset() { + *x = DupMapEntry{} + mi := &file_proto_flow_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DupMapEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DupMapEntry) ProtoMessage() {} + +func (x *DupMapEntry) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DupMapEntry.ProtoReflect.Descriptor instead. +func (*DupMapEntry) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{2} +} + +func (x *DupMapEntry) GetInterface() string { + if x != nil { + return x.Interface + } + return "" +} + +func (x *DupMapEntry) GetDirection() Direction { + if x != nil { + return x.Direction + } + return Direction_INGRESS +} + +type NetworkEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Events map[string]string `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *NetworkEvent) Reset() { + *x = NetworkEvent{} + mi := &file_proto_flow_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NetworkEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkEvent) ProtoMessage() {} + +func (x *NetworkEvent) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkEvent.ProtoReflect.Descriptor instead. +func (*NetworkEvent) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{3} +} + +func (x *NetworkEvent) GetEvents() map[string]string { + if x != nil { + return x.Events + } + return nil +} + +type Record struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // protocol as defined by ETH_P_* in linux/if_ether.h + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/if_ether.h + EthProtocol uint32 `protobuf:"varint,1,opt,name=eth_protocol,json=ethProtocol,proto3" json:"eth_protocol,omitempty"` + Direction Direction `protobuf:"varint,2,opt,name=direction,proto3,enum=pbflow.Direction" json:"direction,omitempty"` + TimeFlowStart *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=time_flow_start,json=timeFlowStart,proto3" json:"time_flow_start,omitempty"` + TimeFlowEnd *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time_flow_end,json=timeFlowEnd,proto3" json:"time_flow_end,omitempty"` + // OSI-layer attributes + DataLink *DataLink `protobuf:"bytes,5,opt,name=data_link,json=dataLink,proto3" json:"data_link,omitempty"` + Network *Network `protobuf:"bytes,6,opt,name=network,proto3" json:"network,omitempty"` + Transport *Transport `protobuf:"bytes,7,opt,name=transport,proto3" json:"transport,omitempty"` + Bytes uint64 `protobuf:"varint,8,opt,name=bytes,proto3" json:"bytes,omitempty"` + Packets uint64 `protobuf:"varint,9,opt,name=packets,proto3" json:"packets,omitempty"` + Interface string `protobuf:"bytes,10,opt,name=interface,proto3" json:"interface,omitempty"` + // if true, the same flow has been recorded via another interface. + // From all the duplicate flows, one will set this value to false and the rest will be true. + Duplicate bool `protobuf:"varint,11,opt,name=duplicate,proto3" json:"duplicate,omitempty"` + // Agent IP address to help identifying the source of the flow + AgentIp *IP `protobuf:"bytes,12,opt,name=agent_ip,json=agentIp,proto3" json:"agent_ip,omitempty"` + Flags uint32 `protobuf:"varint,13,opt,name=flags,proto3" json:"flags,omitempty"` + IcmpType uint32 `protobuf:"varint,14,opt,name=icmp_type,json=icmpType,proto3" json:"icmp_type,omitempty"` + IcmpCode uint32 `protobuf:"varint,15,opt,name=icmp_code,json=icmpCode,proto3" json:"icmp_code,omitempty"` + PktDropBytes uint64 `protobuf:"varint,16,opt,name=pkt_drop_bytes,json=pktDropBytes,proto3" json:"pkt_drop_bytes,omitempty"` + PktDropPackets uint64 `protobuf:"varint,17,opt,name=pkt_drop_packets,json=pktDropPackets,proto3" json:"pkt_drop_packets,omitempty"` + PktDropLatestFlags uint32 `protobuf:"varint,18,opt,name=pkt_drop_latest_flags,json=pktDropLatestFlags,proto3" json:"pkt_drop_latest_flags,omitempty"` + PktDropLatestState uint32 `protobuf:"varint,19,opt,name=pkt_drop_latest_state,json=pktDropLatestState,proto3" json:"pkt_drop_latest_state,omitempty"` + PktDropLatestDropCause uint32 `protobuf:"varint,20,opt,name=pkt_drop_latest_drop_cause,json=pktDropLatestDropCause,proto3" json:"pkt_drop_latest_drop_cause,omitempty"` + DnsId uint32 `protobuf:"varint,21,opt,name=dns_id,json=dnsId,proto3" json:"dns_id,omitempty"` + DnsFlags uint32 `protobuf:"varint,22,opt,name=dns_flags,json=dnsFlags,proto3" json:"dns_flags,omitempty"` + DnsLatency *durationpb.Duration `protobuf:"bytes,23,opt,name=dns_latency,json=dnsLatency,proto3" json:"dns_latency,omitempty"` + TimeFlowRtt *durationpb.Duration `protobuf:"bytes,24,opt,name=time_flow_rtt,json=timeFlowRtt,proto3" json:"time_flow_rtt,omitempty"` + DnsErrno uint32 `protobuf:"varint,25,opt,name=dns_errno,json=dnsErrno,proto3" json:"dns_errno,omitempty"` + DupList []*DupMapEntry `protobuf:"bytes,26,rep,name=dup_list,json=dupList,proto3" json:"dup_list,omitempty"` + NetworkEventsMetadata []*NetworkEvent `protobuf:"bytes,27,rep,name=network_events_metadata,json=networkEventsMetadata,proto3" json:"network_events_metadata,omitempty"` + Xlat *Xlat `protobuf:"bytes,28,opt,name=xlat,proto3" json:"xlat,omitempty"` + Sampling uint32 `protobuf:"varint,29,opt,name=sampling,proto3" json:"sampling,omitempty"` +} + +func (x *Record) Reset() { + *x = Record{} + mi := &file_proto_flow_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Record) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Record) ProtoMessage() {} + +func (x *Record) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Record.ProtoReflect.Descriptor instead. +func (*Record) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{4} +} + +func (x *Record) GetEthProtocol() uint32 { + if x != nil { + return x.EthProtocol + } + return 0 +} + +func (x *Record) GetDirection() Direction { + if x != nil { + return x.Direction + } + return Direction_INGRESS +} + +func (x *Record) GetTimeFlowStart() *timestamppb.Timestamp { + if x != nil { + return x.TimeFlowStart + } + return nil +} + +func (x *Record) GetTimeFlowEnd() *timestamppb.Timestamp { + if x != nil { + return x.TimeFlowEnd + } + return nil +} + +func (x *Record) GetDataLink() *DataLink { + if x != nil { + return x.DataLink + } + return nil +} + +func (x *Record) GetNetwork() *Network { + if x != nil { + return x.Network + } + return nil +} + +func (x *Record) GetTransport() *Transport { + if x != nil { + return x.Transport + } + return nil +} + +func (x *Record) GetBytes() uint64 { + if x != nil { + return x.Bytes + } + return 0 +} + +func (x *Record) GetPackets() uint64 { + if x != nil { + return x.Packets + } + return 0 +} + +func (x *Record) GetInterface() string { + if x != nil { + return x.Interface + } + return "" +} + +func (x *Record) GetDuplicate() bool { + if x != nil { + return x.Duplicate + } + return false +} + +func (x *Record) GetAgentIp() *IP { + if x != nil { + return x.AgentIp + } + return nil +} + +func (x *Record) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *Record) GetIcmpType() uint32 { + if x != nil { + return x.IcmpType + } + return 0 +} + +func (x *Record) GetIcmpCode() uint32 { + if x != nil { + return x.IcmpCode + } + return 0 +} + +func (x *Record) GetPktDropBytes() uint64 { + if x != nil { + return x.PktDropBytes + } + return 0 +} + +func (x *Record) GetPktDropPackets() uint64 { + if x != nil { + return x.PktDropPackets + } + return 0 +} + +func (x *Record) GetPktDropLatestFlags() uint32 { + if x != nil { + return x.PktDropLatestFlags + } + return 0 +} + +func (x *Record) GetPktDropLatestState() uint32 { + if x != nil { + return x.PktDropLatestState + } + return 0 +} + +func (x *Record) GetPktDropLatestDropCause() uint32 { + if x != nil { + return x.PktDropLatestDropCause + } + return 0 +} + +func (x *Record) GetDnsId() uint32 { + if x != nil { + return x.DnsId + } + return 0 +} + +func (x *Record) GetDnsFlags() uint32 { + if x != nil { + return x.DnsFlags + } + return 0 +} + +func (x *Record) GetDnsLatency() *durationpb.Duration { + if x != nil { + return x.DnsLatency + } + return nil +} + +func (x *Record) GetTimeFlowRtt() *durationpb.Duration { + if x != nil { + return x.TimeFlowRtt + } + return nil +} + +func (x *Record) GetDnsErrno() uint32 { + if x != nil { + return x.DnsErrno + } + return 0 +} + +func (x *Record) GetDupList() []*DupMapEntry { + if x != nil { + return x.DupList + } + return nil +} + +func (x *Record) GetNetworkEventsMetadata() []*NetworkEvent { + if x != nil { + return x.NetworkEventsMetadata + } + return nil +} + +func (x *Record) GetXlat() *Xlat { + if x != nil { + return x.Xlat + } + return nil +} + +func (x *Record) GetSampling() uint32 { + if x != nil { + return x.Sampling + } + return 0 +} + +type DataLink struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrcMac uint64 `protobuf:"varint,1,opt,name=src_mac,json=srcMac,proto3" json:"src_mac,omitempty"` + DstMac uint64 `protobuf:"varint,2,opt,name=dst_mac,json=dstMac,proto3" json:"dst_mac,omitempty"` +} + +func (x *DataLink) Reset() { + *x = DataLink{} + mi := &file_proto_flow_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DataLink) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataLink) ProtoMessage() {} + +func (x *DataLink) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataLink.ProtoReflect.Descriptor instead. +func (*DataLink) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{5} +} + +func (x *DataLink) GetSrcMac() uint64 { + if x != nil { + return x.SrcMac + } + return 0 +} + +func (x *DataLink) GetDstMac() uint64 { + if x != nil { + return x.DstMac + } + return 0 +} + +type Network struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrcAddr *IP `protobuf:"bytes,1,opt,name=src_addr,json=srcAddr,proto3" json:"src_addr,omitempty"` + DstAddr *IP `protobuf:"bytes,2,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"` + Dscp uint32 `protobuf:"varint,3,opt,name=dscp,proto3" json:"dscp,omitempty"` +} + +func (x *Network) Reset() { + *x = Network{} + mi := &file_proto_flow_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Network) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Network) ProtoMessage() {} + +func (x *Network) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Network.ProtoReflect.Descriptor instead. +func (*Network) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{6} +} + +func (x *Network) GetSrcAddr() *IP { + if x != nil { + return x.SrcAddr + } + return nil +} + +func (x *Network) GetDstAddr() *IP { + if x != nil { + return x.DstAddr + } + return nil +} + +func (x *Network) GetDscp() uint32 { + if x != nil { + return x.Dscp + } + return 0 +} + +type IP struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IpFamily: + // + // *IP_Ipv4 + // *IP_Ipv6 + IpFamily isIP_IpFamily `protobuf_oneof:"ip_family"` +} + +func (x *IP) Reset() { + *x = IP{} + mi := &file_proto_flow_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IP) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IP) ProtoMessage() {} + +func (x *IP) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IP.ProtoReflect.Descriptor instead. +func (*IP) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{7} +} + +func (m *IP) GetIpFamily() isIP_IpFamily { + if m != nil { + return m.IpFamily + } + return nil +} + +func (x *IP) GetIpv4() uint32 { + if x, ok := x.GetIpFamily().(*IP_Ipv4); ok { + return x.Ipv4 + } + return 0 +} + +func (x *IP) GetIpv6() []byte { + if x, ok := x.GetIpFamily().(*IP_Ipv6); ok { + return x.Ipv6 + } + return nil +} + +type isIP_IpFamily interface { + isIP_IpFamily() +} + +type IP_Ipv4 struct { + Ipv4 uint32 `protobuf:"fixed32,1,opt,name=ipv4,proto3,oneof"` +} + +type IP_Ipv6 struct { + Ipv6 []byte `protobuf:"bytes,2,opt,name=ipv6,proto3,oneof"` +} + +func (*IP_Ipv4) isIP_IpFamily() {} + +func (*IP_Ipv6) isIP_IpFamily() {} + +type Transport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrcPort uint32 `protobuf:"varint,1,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"` + DstPort uint32 `protobuf:"varint,2,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"` + // protocol as defined by IPPROTO_* in linux/in.h + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/in.h + Protocol uint32 `protobuf:"varint,3,opt,name=protocol,proto3" json:"protocol,omitempty"` +} + +func (x *Transport) Reset() { + *x = Transport{} + mi := &file_proto_flow_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Transport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Transport) ProtoMessage() {} + +func (x *Transport) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Transport.ProtoReflect.Descriptor instead. +func (*Transport) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{8} +} + +func (x *Transport) GetSrcPort() uint32 { + if x != nil { + return x.SrcPort + } + return 0 +} + +func (x *Transport) GetDstPort() uint32 { + if x != nil { + return x.DstPort + } + return 0 +} + +func (x *Transport) GetProtocol() uint32 { + if x != nil { + return x.Protocol + } + return 0 +} + +type Xlat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrcAddr *IP `protobuf:"bytes,1,opt,name=src_addr,json=srcAddr,proto3" json:"src_addr,omitempty"` + DstAddr *IP `protobuf:"bytes,2,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"` + SrcPort uint32 `protobuf:"varint,3,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"` + DstPort uint32 `protobuf:"varint,4,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"` + ZoneId uint32 `protobuf:"varint,5,opt,name=zone_id,json=zoneId,proto3" json:"zone_id,omitempty"` + IcmpId uint32 `protobuf:"varint,7,opt,name=icmp_id,json=icmpId,proto3" json:"icmp_id,omitempty"` +} + +func (x *Xlat) Reset() { + *x = Xlat{} + mi := &file_proto_flow_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Xlat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xlat) ProtoMessage() {} + +func (x *Xlat) ProtoReflect() protoreflect.Message { + mi := &file_proto_flow_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xlat.ProtoReflect.Descriptor instead. +func (*Xlat) Descriptor() ([]byte, []int) { + return file_proto_flow_proto_rawDescGZIP(), []int{9} +} + +func (x *Xlat) GetSrcAddr() *IP { + if x != nil { + return x.SrcAddr + } + return nil +} + +func (x *Xlat) GetDstAddr() *IP { + if x != nil { + return x.DstAddr + } + return nil +} + +func (x *Xlat) GetSrcPort() uint32 { + if x != nil { + return x.SrcPort + } + return 0 +} + +func (x *Xlat) GetDstPort() uint32 { + if x != nil { + return x.DstPort + } + return 0 +} + +func (x *Xlat) GetZoneId() uint32 { + if x != nil { + return x.ZoneId + } + return 0 +} + +func (x *Xlat) GetIcmpId() uint32 { + if x != nil { + return x.IcmpId + } + return 0 +} + +var File_proto_flow_proto protoreflect.FileDescriptor + +var file_proto_flow_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x10, 0x0a, 0x0e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x33, 0x0a, + 0x07, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x22, 0x5c, 0x0a, 0x0b, 0x44, 0x75, 0x70, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, + 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x83, 0x01, 0x0a, 0x0c, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, 0x09, 0x0a, 0x06, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x65, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x6c, + 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, + 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x45, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x08, + 0x64, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x29, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x12, 0x2f, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x09, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x61, 0x63, + 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x25, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x69, 0x63, 0x6d, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x63, + 0x6d, 0x70, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, + 0x63, 0x6d, 0x70, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x6b, 0x74, 0x5f, 0x64, + 0x72, 0x6f, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0c, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x6b, 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, + 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x6b, 0x74, 0x5f, 0x64, + 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x4c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x6b, + 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x70, 0x6b, 0x74, 0x44, 0x72, + 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3a, 0x0a, + 0x1a, 0x70, 0x6b, 0x74, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x16, 0x70, 0x6b, 0x74, 0x44, 0x72, 0x6f, 0x70, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x44, 0x72, 0x6f, 0x70, 0x43, 0x61, 0x75, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x64, 0x6e, 0x73, + 0x5f, 0x69, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x64, 0x6e, 0x73, 0x49, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x0b, 0x64, 0x6e, 0x73, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x64, + 0x6e, 0x73, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x74, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x69, 0x6d, + 0x65, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x74, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, + 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x6e, 0x73, + 0x45, 0x72, 0x72, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x08, 0x64, 0x75, 0x70, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x44, 0x75, 0x70, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x75, + 0x70, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4c, 0x0a, 0x17, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x20, 0x0a, 0x04, 0x78, 0x6c, 0x61, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x58, 0x6c, 0x61, 0x74, 0x52, + 0x04, 0x78, 0x6c, 0x61, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x22, 0x3c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x17, 0x0a, + 0x07, 0x73, 0x72, 0x63, 0x5f, 0x6d, 0x61, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, + 0x73, 0x72, 0x63, 0x4d, 0x61, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x6d, 0x61, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x64, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x22, + 0x6b, 0x0a, 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, + 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, + 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, + 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x73, 0x63, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x64, 0x73, 0x63, 0x70, 0x22, 0x3d, 0x0a, 0x02, + 0x49, 0x50, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, + 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x34, 0x12, 0x14, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x42, 0x0b, + 0x0a, 0x09, 0x69, 0x70, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x22, 0x5d, 0x0a, 0x09, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbc, 0x01, 0x0a, 0x04, 0x58, + 0x6c, 0x61, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, + 0x50, 0x52, 0x07, 0x73, 0x72, 0x63, 0x41, 0x64, 0x64, 0x72, 0x12, 0x25, 0x0a, 0x08, 0x64, 0x73, + 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, + 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x73, 0x72, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x64, 0x73, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, + 0x64, 0x73, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x7a, 0x6f, 0x6e, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x7a, 0x6f, 0x6e, 0x65, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x69, 0x63, 0x6d, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x69, 0x63, 0x6d, 0x70, 0x49, 0x64, 0x2a, 0x24, 0x0a, 0x09, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, + 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32, + 0x3e, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, + 0x53, 0x65, 0x6e, 0x64, 0x12, 0x0f, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, + 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x62, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_proto_flow_proto_rawDescOnce sync.Once + file_proto_flow_proto_rawDescData = file_proto_flow_proto_rawDesc +) + +func file_proto_flow_proto_rawDescGZIP() []byte { + file_proto_flow_proto_rawDescOnce.Do(func() { + file_proto_flow_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_flow_proto_rawDescData) + }) + return file_proto_flow_proto_rawDescData +} + +var file_proto_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_proto_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_proto_flow_proto_goTypes = []any{ + (Direction)(0), // 0: pbflow.Direction + (*CollectorReply)(nil), // 1: pbflow.CollectorReply + (*Records)(nil), // 2: pbflow.Records + (*DupMapEntry)(nil), // 3: pbflow.DupMapEntry + (*NetworkEvent)(nil), // 4: pbflow.NetworkEvent + (*Record)(nil), // 5: pbflow.Record + (*DataLink)(nil), // 6: pbflow.DataLink + (*Network)(nil), // 7: pbflow.Network + (*IP)(nil), // 8: pbflow.IP + (*Transport)(nil), // 9: pbflow.Transport + (*Xlat)(nil), // 10: pbflow.Xlat + nil, // 11: pbflow.NetworkEvent.EventsEntry + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 13: google.protobuf.Duration +} +var file_proto_flow_proto_depIdxs = []int32{ + 5, // 0: pbflow.Records.entries:type_name -> pbflow.Record + 0, // 1: pbflow.DupMapEntry.direction:type_name -> pbflow.Direction + 11, // 2: pbflow.NetworkEvent.events:type_name -> pbflow.NetworkEvent.EventsEntry + 0, // 3: pbflow.Record.direction:type_name -> pbflow.Direction + 12, // 4: pbflow.Record.time_flow_start:type_name -> google.protobuf.Timestamp + 12, // 5: pbflow.Record.time_flow_end:type_name -> google.protobuf.Timestamp + 6, // 6: pbflow.Record.data_link:type_name -> pbflow.DataLink + 7, // 7: pbflow.Record.network:type_name -> pbflow.Network + 9, // 8: pbflow.Record.transport:type_name -> pbflow.Transport + 8, // 9: pbflow.Record.agent_ip:type_name -> pbflow.IP + 13, // 10: pbflow.Record.dns_latency:type_name -> google.protobuf.Duration + 13, // 11: pbflow.Record.time_flow_rtt:type_name -> google.protobuf.Duration + 3, // 12: pbflow.Record.dup_list:type_name -> pbflow.DupMapEntry + 4, // 13: pbflow.Record.network_events_metadata:type_name -> pbflow.NetworkEvent + 10, // 14: pbflow.Record.xlat:type_name -> pbflow.Xlat + 8, // 15: pbflow.Network.src_addr:type_name -> pbflow.IP + 8, // 16: pbflow.Network.dst_addr:type_name -> pbflow.IP + 8, // 17: pbflow.Xlat.src_addr:type_name -> pbflow.IP + 8, // 18: pbflow.Xlat.dst_addr:type_name -> pbflow.IP + 2, // 19: pbflow.Collector.Send:input_type -> pbflow.Records + 1, // 20: pbflow.Collector.Send:output_type -> pbflow.CollectorReply + 20, // [20:21] is the sub-list for method output_type + 19, // [19:20] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name +} + +func init() { file_proto_flow_proto_init() } +func file_proto_flow_proto_init() { + if File_proto_flow_proto != nil { + return + } + file_proto_flow_proto_msgTypes[7].OneofWrappers = []any{ + (*IP_Ipv4)(nil), + (*IP_Ipv6)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_flow_proto_rawDesc, + NumEnums: 1, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_flow_proto_goTypes, + DependencyIndexes: file_proto_flow_proto_depIdxs, + EnumInfos: file_proto_flow_proto_enumTypes, + MessageInfos: file_proto_flow_proto_msgTypes, + }.Build() + File_proto_flow_proto = out.File + file_proto_flow_proto_rawDesc = nil + file_proto_flow_proto_goTypes = nil + file_proto_flow_proto_depIdxs = nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go new file mode 100644 index 000000000..ac00dda6d --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/flow_grpc.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.19.4 +// source: proto/flow.proto + +package pbflow + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Collector_Send_FullMethodName = "/pbflow.Collector/Send" +) + +// CollectorClient is the client API for Collector service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CollectorClient interface { + Send(ctx context.Context, in *Records, opts ...grpc.CallOption) (*CollectorReply, error) +} + +type collectorClient struct { + cc grpc.ClientConnInterface +} + +func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient { + return &collectorClient{cc} +} + +func (c *collectorClient) Send(ctx context.Context, in *Records, opts ...grpc.CallOption) (*CollectorReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CollectorReply) + err := c.cc.Invoke(ctx, Collector_Send_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CollectorServer is the server API for Collector service. +// All implementations must embed UnimplementedCollectorServer +// for forward compatibility. +type CollectorServer interface { + Send(context.Context, *Records) (*CollectorReply, error) + mustEmbedUnimplementedCollectorServer() +} + +// UnimplementedCollectorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCollectorServer struct{} + +func (UnimplementedCollectorServer) Send(context.Context, *Records) (*CollectorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Send not implemented") +} +func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {} +func (UnimplementedCollectorServer) testEmbeddedByValue() {} + +// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CollectorServer will +// result in compilation errors. +type UnsafeCollectorServer interface { + mustEmbedUnimplementedCollectorServer() +} + +func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) { + // If the following call pancis, it indicates UnimplementedCollectorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Collector_ServiceDesc, srv) +} + +func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Records) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectorServer).Send(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collector_Send_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectorServer).Send(ctx, req.(*Records)) + } + return interceptor(ctx, in, info, handler) +} + +// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Collector_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pbflow.Collector", + HandlerType: (*CollectorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Send", + Handler: _Collector_Send_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/flow.proto", +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go new file mode 100644 index 000000000..27b7e57c0 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow/proto.go @@ -0,0 +1,285 @@ +package pbflow + +import ( + "encoding/binary" + "net" + + "github.com/netobserv/flowlogs-pipeline/pkg/config" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + ovnmodel "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model" + ovnobserv "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var protoLog = logrus.WithField("component", "pbflow") + +// FlowsToPB is an auxiliary function to convert flow records, as returned by the eBPF agent, +// into protobuf-encoded messages ready to be sent to the collector via GRPC +func FlowsToPB(inputRecords []*model.Record, maxLen int, s *ovnobserv.SampleDecoder) []*Records { + entries := make([]*Record, 0, len(inputRecords)) + for _, record := range inputRecords { + entries = append(entries, FlowToPB(record, s)) + } + var records []*Records + for len(entries) > 0 { + end := len(entries) + if end > maxLen { + end = maxLen + } + records = append(records, &Records{Entries: entries[:end]}) + entries = entries[end:] + } + return records +} + +// FlowToPB is an auxiliary function to convert a single flow record, as returned by the eBPF agent, +// into a protobuf-encoded message ready to be sent to the collector via kafka +func FlowToPB(fr *model.Record, s *ovnobserv.SampleDecoder) *Record { + var pbflowRecord = Record{ + EthProtocol: uint32(fr.Metrics.EthProtocol), + Direction: Direction(fr.ID.Direction), + DataLink: &DataLink{ + SrcMac: macToUint64(&fr.Metrics.SrcMac), + DstMac: macToUint64(&fr.Metrics.DstMac), + }, + Network: &Network{ + Dscp: uint32(fr.Metrics.Dscp), + }, + Transport: &Transport{ + Protocol: uint32(fr.ID.TransportProtocol), + SrcPort: uint32(fr.ID.SrcPort), + DstPort: uint32(fr.ID.DstPort), + }, + IcmpType: uint32(fr.ID.IcmpType), + IcmpCode: uint32(fr.ID.IcmpCode), + Bytes: fr.Metrics.Bytes, + TimeFlowStart: ×tamppb.Timestamp{ + Seconds: fr.TimeFlowStart.Unix(), + Nanos: int32(fr.TimeFlowStart.Nanosecond()), + }, + TimeFlowEnd: ×tamppb.Timestamp{ + Seconds: fr.TimeFlowEnd.Unix(), + Nanos: int32(fr.TimeFlowEnd.Nanosecond()), + }, + Packets: uint64(fr.Metrics.Packets), + Duplicate: fr.Duplicate, + AgentIp: agentIP(fr.AgentIP), + Flags: uint32(fr.Metrics.Flags), + Interface: fr.Interface, + TimeFlowRtt: durationpb.New(fr.TimeFlowRtt), + Sampling: fr.Metrics.Sampling, + } + if fr.Metrics.AdditionalMetrics != nil { + pbflowRecord.PktDropBytes = fr.Metrics.AdditionalMetrics.PktDrops.Bytes + pbflowRecord.PktDropPackets = uint64(fr.Metrics.AdditionalMetrics.PktDrops.Packets) + pbflowRecord.PktDropLatestFlags = uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestFlags) + pbflowRecord.PktDropLatestState = uint32(fr.Metrics.AdditionalMetrics.PktDrops.LatestState) + pbflowRecord.PktDropLatestDropCause = fr.Metrics.AdditionalMetrics.PktDrops.LatestDropCause + pbflowRecord.DnsId = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Id) + pbflowRecord.DnsFlags = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Flags) + pbflowRecord.DnsErrno = uint32(fr.Metrics.AdditionalMetrics.DnsRecord.Errno) + if fr.Metrics.AdditionalMetrics.DnsRecord.Latency != 0 { + pbflowRecord.DnsLatency = durationpb.New(fr.DNSLatency) + } + pbflowRecord.Xlat = &Xlat{ + SrcPort: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.Sport), + DstPort: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.Dport), + ZoneId: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.ZoneId), + IcmpId: uint32(fr.Metrics.AdditionalMetrics.TranslatedFlow.IcmpId), + } + } + if len(fr.DupList) != 0 { + pbflowRecord.DupList = make([]*DupMapEntry, 0) + for _, m := range fr.DupList { + for key, value := range m { + pbflowRecord.DupList = append(pbflowRecord.DupList, &DupMapEntry{ + Interface: key, + Direction: Direction(value), + }) + } + } + } + if fr.Metrics.EthProtocol == model.IPv6Type { + pbflowRecord.Network.SrcAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.ID.SrcIp[:]}} + pbflowRecord.Network.DstAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.ID.DstIp[:]}} + if fr.Metrics.AdditionalMetrics != nil { + pbflowRecord.Xlat.SrcAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr[:]}} + pbflowRecord.Xlat.DstAddr = &IP{IpFamily: &IP_Ipv6{Ipv6: fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr[:]}} + } + } else { + pbflowRecord.Network.SrcAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.ID.SrcIp)}} + pbflowRecord.Network.DstAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.ID.DstIp)}} + if fr.Metrics.AdditionalMetrics != nil { + pbflowRecord.Xlat.SrcAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.Metrics.AdditionalMetrics.TranslatedFlow.Saddr)}} + pbflowRecord.Xlat.DstAddr = &IP{IpFamily: &IP_Ipv4{Ipv4: model.IntEncodeV4(fr.Metrics.AdditionalMetrics.TranslatedFlow.Daddr)}} + } + } + if s != nil && fr.Metrics.AdditionalMetrics != nil { + seen := make(map[string]bool) + pbflowRecord.NetworkEventsMetadata = make([]*NetworkEvent, 0) + for _, metadata := range fr.Metrics.AdditionalMetrics.NetworkEvents { + var pbEvent NetworkEvent + if !model.AllZerosMetaData(metadata) { + if md, err := s.DecodeCookie8Bytes(metadata); err == nil { + acl, ok := md.(*ovnmodel.ACLEvent) + mdStr := md.String() + protoLog.Debugf("Network Events Metadata %v decoded Cookie: %v decoded string: %s", metadata, md, mdStr) + if !seen[mdStr] { + if ok { + pbEvent = NetworkEvent{ + Events: map[string]string{ + "Action": acl.Action, + "Type": acl.Actor, + "Feature": "acl", + "Name": acl.Name, + "Namespace": acl.Namespace, + "Direction": acl.Direction, + }, + } + } else { + pbEvent = NetworkEvent{ + Events: map[string]string{ + "Message": mdStr, + }, + } + } + pbflowRecord.NetworkEventsMetadata = append(pbflowRecord.NetworkEventsMetadata, &pbEvent) + seen[mdStr] = true + } + } else { + protoLog.Errorf("unable to decode Network events cookie: %v", err) + } + } + } + } + return &pbflowRecord +} + +func PBToFlow(pb *Record) *model.Record { + if pb == nil { + return nil + } + out := model.Record{ + ID: ebpf.BpfFlowId{ + Direction: uint8(pb.Direction), + TransportProtocol: uint8(pb.Transport.Protocol), + SrcIp: ipToIPAddr(pb.Network.GetSrcAddr()), + DstIp: ipToIPAddr(pb.Network.GetDstAddr()), + SrcPort: uint16(pb.Transport.SrcPort), + DstPort: uint16(pb.Transport.DstPort), + IcmpType: uint8(pb.IcmpType), + IcmpCode: uint8(pb.IcmpCode), + }, + Metrics: model.BpfFlowContent{ + BpfFlowMetrics: &ebpf.BpfFlowMetrics{ + EthProtocol: uint16(pb.EthProtocol), + SrcMac: macToUint8(pb.DataLink.GetSrcMac()), + DstMac: macToUint8(pb.DataLink.GetDstMac()), + Bytes: pb.Bytes, + Packets: uint32(pb.Packets), + Flags: uint16(pb.Flags), + Dscp: uint8(pb.Network.Dscp), + Sampling: pb.Sampling, + }, + AdditionalMetrics: &ebpf.BpfAdditionalMetrics{ + PktDrops: ebpf.BpfPktDropsT{ + Bytes: pb.PktDropBytes, + Packets: uint32(pb.PktDropPackets), + LatestFlags: uint16(pb.PktDropLatestFlags), + LatestState: uint8(pb.PktDropLatestState), + LatestDropCause: pb.PktDropLatestDropCause, + }, + DnsRecord: ebpf.BpfDnsRecordT{ + Id: uint16(pb.DnsId), + Flags: uint16(pb.DnsFlags), + Errno: uint8(pb.DnsErrno), + Latency: uint64(pb.DnsLatency.AsDuration()), + }, + TranslatedFlow: ebpf.BpfTranslatedFlowT{ + Saddr: ipToIPAddr(pb.Xlat.GetSrcAddr()), + Daddr: ipToIPAddr(pb.Xlat.GetDstAddr()), + Sport: uint16(pb.Xlat.GetSrcPort()), + Dport: uint16(pb.Xlat.GetDstPort()), + ZoneId: uint16(pb.Xlat.GetZoneId()), + IcmpId: uint8(pb.Xlat.GetIcmpId()), + }, + }, + }, + TimeFlowStart: pb.TimeFlowStart.AsTime(), + TimeFlowEnd: pb.TimeFlowEnd.AsTime(), + AgentIP: pbIPToNetIP(pb.AgentIp), + Duplicate: pb.Duplicate, + Interface: pb.Interface, + TimeFlowRtt: pb.TimeFlowRtt.AsDuration(), + DNSLatency: pb.DnsLatency.AsDuration(), + } + + if len(pb.GetDupList()) != 0 { + for _, entry := range pb.GetDupList() { + intf := entry.Interface + dir := uint8(entry.Direction) + out.DupList = append(out.DupList, map[string]uint8{intf: dir}) + } + } + if len(pb.GetNetworkEventsMetadata()) != 0 { + for _, e := range pb.GetNetworkEventsMetadata() { + m := config.GenericMap{} + for k, v := range e.Events { + m[k] = v + } + out.NetworkMonitorEventsMD = append(out.NetworkMonitorEventsMD, m) + } + protoLog.Debugf("decoded Network events monitor metadata: %v", out.NetworkMonitorEventsMD) + } + + return &out +} + +// Mac bytes are encoded in the same order as in the array. This is, a Mac +// like 11:22:33:44:55:66 will be encoded as 0x112233445566 +func macToUint64(m *[model.MacLen]uint8) uint64 { + return uint64(m[5]) | + (uint64(m[4]) << 8) | + (uint64(m[3]) << 16) | + (uint64(m[2]) << 24) | + (uint64(m[1]) << 32) | + (uint64(m[0]) << 40) +} + +func agentIP(nip net.IP) *IP { + if ip := nip.To4(); ip != nil { + return &IP{IpFamily: &IP_Ipv4{Ipv4: binary.BigEndian.Uint32(ip)}} + } + // IPv6 address + return &IP{IpFamily: &IP_Ipv6{Ipv6: nip}} +} + +func pbIPToNetIP(ip *IP) net.IP { + if ip.GetIpv6() != nil { + return net.IP(ip.GetIpv6()) + } + n := ip.GetIpv4() + return net.IPv4( + byte((n>>24)&0xFF), + byte((n>>16)&0xFF), + byte((n>>8)&0xFF), + byte(n&0xFF)) +} + +func ipToIPAddr(ip *IP) model.IPAddr { + return model.IPAddrFromNetIP(pbIPToNetIP(ip)) +} + +func macToUint8(mac uint64) [6]uint8 { + return [6]uint8{ + uint8(mac >> 40), + uint8(mac >> 32), + uint8(mac >> 24), + uint8(mac >> 16), + uint8(mac >> 8), + uint8(mac), + } +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go new file mode 100644 index 000000000..30916b7d4 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v3.19.4 +// source: proto/packet.proto + +package pbpacket + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The request message containing the Packet +type Packet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pcap *anypb.Any `protobuf:"bytes,1,opt,name=pcap,proto3" json:"pcap,omitempty"` +} + +func (x *Packet) Reset() { + *x = Packet{} + mi := &file_proto_packet_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Packet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Packet) ProtoMessage() {} + +func (x *Packet) ProtoReflect() protoreflect.Message { + mi := &file_proto_packet_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Packet.ProtoReflect.Descriptor instead. +func (*Packet) Descriptor() ([]byte, []int) { + return file_proto_packet_proto_rawDescGZIP(), []int{0} +} + +func (x *Packet) GetPcap() *anypb.Any { + if x != nil { + return x.Pcap + } + return nil +} + +// intentionally empty +type CollectorReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CollectorReply) Reset() { + *x = CollectorReply{} + mi := &file_proto_packet_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CollectorReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CollectorReply) ProtoMessage() {} + +func (x *CollectorReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_packet_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CollectorReply.ProtoReflect.Descriptor instead. +func (*CollectorReply) Descriptor() ([]byte, []int) { + return file_proto_packet_proto_rawDescGZIP(), []int{1} +} + +var File_proto_packet_proto protoreflect.FileDescriptor + +var file_proto_packet_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x06, 0x50, 0x61, 0x63, + 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x70, 0x63, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x70, 0x63, 0x61, 0x70, 0x22, 0x10, 0x0a, + 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, + 0x41, 0x0a, 0x09, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x04, + 0x53, 0x65, 0x6e, 0x64, 0x12, 0x10, 0x2e, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x2e, + 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x70, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_packet_proto_rawDescOnce sync.Once + file_proto_packet_proto_rawDescData = file_proto_packet_proto_rawDesc +) + +func file_proto_packet_proto_rawDescGZIP() []byte { + file_proto_packet_proto_rawDescOnce.Do(func() { + file_proto_packet_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_packet_proto_rawDescData) + }) + return file_proto_packet_proto_rawDescData +} + +var file_proto_packet_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_packet_proto_goTypes = []any{ + (*Packet)(nil), // 0: pbpacket.Packet + (*CollectorReply)(nil), // 1: pbpacket.CollectorReply + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_proto_packet_proto_depIdxs = []int32{ + 2, // 0: pbpacket.Packet.pcap:type_name -> google.protobuf.Any + 0, // 1: pbpacket.Collector.Send:input_type -> pbpacket.Packet + 1, // 2: pbpacket.Collector.Send:output_type -> pbpacket.CollectorReply + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_packet_proto_init() } +func file_proto_packet_proto_init() { + if File_proto_packet_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_packet_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_packet_proto_goTypes, + DependencyIndexes: file_proto_packet_proto_depIdxs, + MessageInfos: file_proto_packet_proto_msgTypes, + }.Build() + File_proto_packet_proto = out.File + file_proto_packet_proto_rawDesc = nil + file_proto_packet_proto_goTypes = nil + file_proto_packet_proto_depIdxs = nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go new file mode 100644 index 000000000..fce168986 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket/packet_grpc.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.19.4 +// source: proto/packet.proto + +package pbpacket + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Collector_Send_FullMethodName = "/pbpacket.Collector/Send" +) + +// CollectorClient is the client API for Collector service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CollectorClient interface { + Send(ctx context.Context, in *Packet, opts ...grpc.CallOption) (*CollectorReply, error) +} + +type collectorClient struct { + cc grpc.ClientConnInterface +} + +func NewCollectorClient(cc grpc.ClientConnInterface) CollectorClient { + return &collectorClient{cc} +} + +func (c *collectorClient) Send(ctx context.Context, in *Packet, opts ...grpc.CallOption) (*CollectorReply, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CollectorReply) + err := c.cc.Invoke(ctx, Collector_Send_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CollectorServer is the server API for Collector service. +// All implementations must embed UnimplementedCollectorServer +// for forward compatibility. +type CollectorServer interface { + Send(context.Context, *Packet) (*CollectorReply, error) + mustEmbedUnimplementedCollectorServer() +} + +// UnimplementedCollectorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCollectorServer struct{} + +func (UnimplementedCollectorServer) Send(context.Context, *Packet) (*CollectorReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Send not implemented") +} +func (UnimplementedCollectorServer) mustEmbedUnimplementedCollectorServer() {} +func (UnimplementedCollectorServer) testEmbeddedByValue() {} + +// UnsafeCollectorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CollectorServer will +// result in compilation errors. +type UnsafeCollectorServer interface { + mustEmbedUnimplementedCollectorServer() +} + +func RegisterCollectorServer(s grpc.ServiceRegistrar, srv CollectorServer) { + // If the following call pancis, it indicates UnimplementedCollectorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Collector_ServiceDesc, srv) +} + +func _Collector_Send_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Packet) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CollectorServer).Send(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Collector_Send_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CollectorServer).Send(ctx, req.(*Packet)) + } + return interceptor(ctx, in, info, handler) +} + +// Collector_ServiceDesc is the grpc.ServiceDesc for Collector service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Collector_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pbpacket.Collector", + HandlerType: (*CollectorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Send", + Handler: _Collector_Send_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/packet.proto", +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go new file mode 100644 index 000000000..c50639a2e --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus/prom_server.go @@ -0,0 +1,103 @@ +package prometheus + +import ( + "crypto/tls" + "fmt" + "net/http" + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + + prom "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/sirupsen/logrus" +) + +var ( + plog = logrus.WithField("component", "prometheus") + maybePanic = plog.Fatalf +) + +// InitializePrometheus starts the global Prometheus server, used for operational metrics and prom-encode stages if they don't override the server settings +func InitializePrometheus(settings *metrics.Settings) *http.Server { + return StartServerAsync(settings, nil) +} + +// StartServerAsync listens for prometheus resource usage requests +func StartServerAsync(conn *metrics.Settings, registry *prom.Registry) *http.Server { + // create prometheus server for operational metrics + // if value of address is empty, then by default it will take 0.0.0.0 + port := conn.Port + if port == 0 { + port = 9090 + } + addr := fmt.Sprintf("%s:%v", conn.Address, port) + plog.Infof("StartServerAsync: addr = %s", addr) + + httpServer := &http.Server{ + Addr: addr, + // TLS clients must use TLS 1.2 or higher + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + // The Handler function provides a default handler to expose metrics + // via an HTTP server. "/metrics" is the usual endpoint for that. + mux := http.NewServeMux() + if registry == nil { + mux.Handle("/metrics", promhttp.Handler()) + } else { + mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) + } + httpServer.Handler = mux + httpServer = defaultServer(httpServer) + + go func() { + var err error + if conn.TLS != nil { + err = httpServer.ListenAndServeTLS(conn.TLS.CertPath, conn.TLS.KeyPath) + } else { + err = httpServer.ListenAndServe() + } + if err != nil && err != http.ErrServerClosed { + maybePanic("error in http.ListenAndServe: %v", err) + } + }() + + return httpServer +} + +func defaultServer(srv *http.Server) *http.Server { + // defaults taken from https://bruinsslot.jp/post/go-secure-webserver/ can be overriden by caller + if srv.Handler != nil { + // No more than 2MB body + srv.Handler = http.MaxBytesHandler(srv.Handler, 2<<20) + } else { + plog.Warnf("Handler not yet set on server while securing defaults. Make sure a MaxByte middleware is used.") + } + if srv.ReadTimeout == 0 { + srv.ReadTimeout = 10 * time.Second + } + if srv.ReadHeaderTimeout == 0 { + srv.ReadHeaderTimeout = 5 * time.Second + } + if srv.WriteTimeout == 0 { + srv.WriteTimeout = 10 * time.Second + } + if srv.IdleTimeout == 0 { + srv.IdleTimeout = 120 * time.Second + } + if srv.MaxHeaderBytes == 0 { + srv.MaxHeaderBytes = 1 << 20 // 1MB + } + if srv.TLSConfig == nil { + srv.TLSConfig = &tls.Config{} + } + if srv.TLSConfig.MinVersion == 0 { + srv.TLSConfig.MinVersion = tls.VersionTLS13 + } + // Disable http/2 + srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0) + + return srv +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go new file mode 100644 index 000000000..b29e2dcfb --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/flow_filter.go @@ -0,0 +1,266 @@ +package tracer + +import ( + "fmt" + "net" + "strconv" + "strings" + "syscall" + + cilium "github.com/cilium/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type FilterConfig struct { + FilterDirection string + FilterIPCIDR string + FilterProtocol string + FilterSourcePort intstr.IntOrString + FilterDestinationPort intstr.IntOrString + FilterPort intstr.IntOrString + FilterIcmpType int + FilterIcmpCode int + FilterPeerIP string + FilterAction string + FilterTCPFlags string + FilterDrops bool + FilterSample uint32 +} + +type Filter struct { + // eBPF objs to create/update eBPF maps + objects *ebpf.BpfObjects + config []*FilterConfig +} + +func NewFilter(objects *ebpf.BpfObjects, cfg []*FilterConfig) *Filter { + return &Filter{ + objects: objects, + config: cfg, + } +} + +func (f *Filter) ProgramFilter() error { + + for _, config := range f.config { + log.Infof("Flow filter config: %v", f.config) + key, err := f.getFilterKey(config) + if err != nil { + return fmt.Errorf("failed to get filter key: %w", err) + } + + val, err := f.getFilterValue(config) + if err != nil { + return fmt.Errorf("failed to get filter value: %w", err) + } + + err = f.objects.FilterMap.Update(key, val, cilium.UpdateAny) + if err != nil { + return fmt.Errorf("failed to update filter map: %w", err) + } + + log.Infof("Programmed filter with key: %v, value: %v", key, val) + } + return nil +} + +func (f *Filter) getFilterKey(config *FilterConfig) (ebpf.BpfFilterKeyT, error) { + key := ebpf.BpfFilterKeyT{} + if config.FilterIPCIDR == "" { + config.FilterIPCIDR = "0.0.0.0/0" + } + ip, ipNet, err := net.ParseCIDR(config.FilterIPCIDR) + if err != nil { + return key, fmt.Errorf("failed to parse FlowFilterIPCIDR: %w", err) + } + if ip.To4() != nil { + copy(key.IpData[:], ip.To4()) + } else { + copy(key.IpData[:], ip.To16()) + } + pfLen, _ := ipNet.Mask.Size() + key.PrefixLen = uint32(pfLen) + + return key, nil +} + +// nolint:cyclop +func (f *Filter) getFilterValue(config *FilterConfig) (ebpf.BpfFilterValueT, error) { + val := ebpf.BpfFilterValueT{} + + switch config.FilterDirection { + case "Ingress": + val.Direction = ebpf.BpfDirectionTINGRESS + case "Egress": + val.Direction = ebpf.BpfDirectionTEGRESS + default: + val.Direction = ebpf.BpfDirectionTMAX_DIRECTION + } + + switch config.FilterAction { + case "Reject": + val.Action = ebpf.BpfFilterActionTREJECT + case "Accept": + val.Action = ebpf.BpfFilterActionTACCEPT + default: + val.Action = ebpf.BpfFilterActionTMAX_FILTER_ACTIONS + } + + switch config.FilterProtocol { + case "TCP": + val.Protocol = syscall.IPPROTO_TCP + case "UDP": + val.Protocol = syscall.IPPROTO_UDP + case "SCTP": + val.Protocol = syscall.IPPROTO_SCTP + case "ICMP": + val.Protocol = syscall.IPPROTO_ICMP + case "ICMPv6": + val.Protocol = syscall.IPPROTO_ICMPV6 + } + + val.DstPortStart, val.DstPortEnd = getDstPortsRange(config) + val.DstPort1, val.DstPort2 = getDstPorts(config) + val.SrcPortStart, val.SrcPortEnd = getSrcPortsRange(config) + val.SrcPort1, val.SrcPort2 = getSrcPorts(config) + val.PortStart, val.PortEnd = getPortsRange(config) + val.Port1, val.Port2 = getPorts(config) + val.IcmpType = uint8(config.FilterIcmpType) + val.IcmpCode = uint8(config.FilterIcmpCode) + + if config.FilterPeerIP != "" { + ip := net.ParseIP(config.FilterPeerIP) + if ip.To4() != nil { + copy(val.Ip[:], ip.To4()) + } else { + copy(val.Ip[:], ip.To16()) + } + } + + switch config.FilterTCPFlags { + case "SYN": + val.TcpFlags = ebpf.BpfTcpFlagsTSYN_FLAG + case "SYN-ACK": + val.TcpFlags = ebpf.BpfTcpFlagsTSYN_ACK_FLAG + case "ACK": + val.TcpFlags = ebpf.BpfTcpFlagsTACK_FLAG + case "FIN": + val.TcpFlags = ebpf.BpfTcpFlagsTFIN_FLAG + case "RST": + val.TcpFlags = ebpf.BpfTcpFlagsTRST_FLAG + case "PUSH": + val.TcpFlags = ebpf.BpfTcpFlagsTPSH_FLAG + case "URG": + val.TcpFlags = ebpf.BpfTcpFlagsTURG_FLAG + case "ECE": + val.TcpFlags = ebpf.BpfTcpFlagsTECE_FLAG + case "CWR": + val.TcpFlags = ebpf.BpfTcpFlagsTCWR_FLAG + case "FIN-ACK": + val.TcpFlags = ebpf.BpfTcpFlagsTFIN_ACK_FLAG + case "RST-ACK": + val.TcpFlags = ebpf.BpfTcpFlagsTRST_ACK_FLAG + } + + if config.FilterDrops { + val.FilterDrops = 1 + } + + if config.FilterSample != 0 { + val.Sample = config.FilterSample + } + return val, nil +} + +func getSrcPortsRange(config *FilterConfig) (uint16, uint16) { + if config.FilterSourcePort.Type == intstr.Int { + return uint16(config.FilterSourcePort.IntVal), 0 + } + start, end, err := getPortsFromString(config.FilterSourcePort.String(), "-") + if err != nil { + return 0, 0 + } + return start, end +} + +func getSrcPorts(config *FilterConfig) (uint16, uint16) { + port1, port2, err := getPortsFromString(config.FilterSourcePort.String(), ",") + if err != nil { + return 0, 0 + } + return port1, port2 +} + +func getDstPortsRange(config *FilterConfig) (uint16, uint16) { + if config.FilterDestinationPort.Type == intstr.Int { + return uint16(config.FilterDestinationPort.IntVal), 0 + } + start, end, err := getPortsFromString(config.FilterDestinationPort.String(), "-") + if err != nil { + return 0, 0 + } + return start, end +} + +func getDstPorts(config *FilterConfig) (uint16, uint16) { + port1, port2, err := getPortsFromString(config.FilterDestinationPort.String(), ",") + if err != nil { + return 0, 0 + } + return port1, port2 +} + +func getPortsRange(config *FilterConfig) (uint16, uint16) { + if config.FilterPort.Type == intstr.Int { + return uint16(config.FilterPort.IntVal), 0 + } + start, end, err := getPortsFromString(config.FilterPort.String(), "-") + if err != nil { + return 0, 0 + } + return start, end +} + +func getPorts(config *FilterConfig) (uint16, uint16) { + port1, port2, err := getPortsFromString(config.FilterPort.String(), ",") + if err != nil { + return 0, 0 + } + return port1, port2 +} + +func getPortsFromString(s, sep string) (uint16, uint16, error) { + ps := strings.SplitN(s, sep, 2) + if len(ps) != 2 { + return 0, 0, fmt.Errorf("invalid ports range. Expected two integers separated by %s but found %s", sep, s) + } + startPort, err := strconv.ParseUint(ps[0], 10, 16) + if err != nil { + return 0, 0, fmt.Errorf("invalid start port number %w", err) + } + endPort, err := strconv.ParseUint(ps[1], 10, 16) + if err != nil { + return 0, 0, fmt.Errorf("invalid end port number %w", err) + } + if sep == "-" && startPort > endPort { + return 0, 0, fmt.Errorf("invalid port range. Start port is greater than end port") + } + if startPort == endPort { + return 0, 0, fmt.Errorf("invalid port range. Start and end port are equal. Remove the %s and enter a single port", sep) + } + if startPort == 0 { + return 0, 0, fmt.Errorf("invalid start port 0") + } + return uint16(startPort), uint16(endPort), nil +} + +func ConvertFilterPortsToInstr(intPort int32, rangePorts, ports string) intstr.IntOrString { + if rangePorts != "" { + return intstr.FromString(rangePorts) + } + if ports != "" { + return intstr.FromString(ports) + } + return intstr.FromInt32(intPort) +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go new file mode 100644 index 000000000..6b9ab6cc3 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer.go @@ -0,0 +1,1674 @@ +package tracer + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path" + "strings" + "time" + + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces" + "github.com/netobserv/netobserv-ebpf-agent/pkg/kernel" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" + "github.com/prometheus/client_golang/prometheus" + + cilium "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/ringbuf" + "github.com/cilium/ebpf/rlimit" + "github.com/gavv/monotime" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" + kerrors "k8s.io/apimachinery/pkg/util/errors" +) + +const ( + qdiscType = "clsact" + // ebpf map names as defined in bpf/maps_definition.h + aggregatedFlowsMap = "aggregated_flows" + additionalFlowMetrics = "additional_flow_metrics" + dnsLatencyMap = "dns_flows" + // constants defined in flows.c as "volatile const" + constSampling = "sampling" + constTraceMessages = "trace_messages" + constEnableRtt = "enable_rtt" + constEnableDNSTracking = "enable_dns_tracking" + constDNSTrackingPort = "dns_port" + dnsDefaultPort = 53 + constEnableFlowFiltering = "enable_flows_filtering" + constEnableNetworkEventsMonitoring = "enable_network_events_monitoring" + constNetworkEventsMonitoringGroupID = "network_events_monitoring_groupid" + constEnablePktTranslation = "enable_pkt_translation_tracking" + pktDropHook = "kfree_skb" + constPcaEnable = "enable_pca" + pcaRecordsMap = "packet_record" + tcEgressFilterName = "tc/tc_egress_flow_parse" + tcIngressFilterName = "tc/tc_ingress_flow_parse" + tcpFentryHook = "tcp_rcv_fentry" + tcpRcvKprobe = "tcp_rcv_kprobe" + rhNetworkEventsMonitoringHook = "rh_psample_sample_packet" + networkEventsMonitoringHook = "psample_sample_packet" + defaultNetworkEventsGroupID = 10 +) + +var log = logrus.WithField("component", "ebpf.FlowFetcher") +var plog = logrus.WithField("component", "ebpf.PacketFetcher") + +// FlowFetcher reads and forwards the Flows from the Traffic Control hooks in the eBPF kernel space. +// It provides access both to flows that are aggregated in the kernel space (via PerfCPU hashmap) +// and to flows that are forwarded by the kernel via ringbuffer because could not be aggregated +// in the map +type FlowFetcher struct { + objects *ebpf.BpfObjects + qdiscs map[ifaces.Interface]*netlink.GenericQdisc + egressFilters map[ifaces.Interface]*netlink.BpfFilter + ingressFilters map[ifaces.Interface]*netlink.BpfFilter + ringbufReader *ringbuf.Reader + cacheMaxSize int + enableIngress bool + enableEgress bool + pktDropsTracePoint link.Link + rttFentryLink link.Link + rttKprobeLink link.Link + egressTCXLink map[ifaces.Interface]link.Link + ingressTCXLink map[ifaces.Interface]link.Link + networkEventsMonitoringLink link.Link + nfNatManIPLink link.Link + lookupAndDeleteSupported bool + useEbpfManager bool + pinDir string +} + +type FlowFetcherConfig struct { + EnableIngress bool + EnableEgress bool + Debug bool + Sampling int + CacheMaxSize int + EnablePktDrops bool + EnableDNSTracker bool + DNSTrackerPort uint16 + EnableRTT bool + EnableNetworkEventsMonitoring bool + NetworkEventsMonitoringGroupID int + EnableFlowFilter bool + EnablePCA bool + EnablePktTranslation bool + UseEbpfManager bool + BpfManBpfFSPath string + FilterConfig []*FilterConfig +} + +// nolint:golint,cyclop +func NewFlowFetcher(cfg *FlowFetcherConfig) (*FlowFetcher, error) { + var pktDropsLink, networkEventsMonitoringLink, rttFentryLink, rttKprobeLink link.Link + var nfNatManIPLink link.Link + var err error + objects := ebpf.BpfObjects{} + var pinDir string + + if !cfg.UseEbpfManager { + if err := rlimit.RemoveMemlock(); err != nil { + log.WithError(err). + Warn("can't remove mem lock. The agent will not be able to start eBPF programs") + } + spec, err := ebpf.LoadBpf() + if err != nil { + return nil, fmt.Errorf("loading BPF data: %w", err) + } + + // Resize maps according to user-provided configuration + spec.Maps[aggregatedFlowsMap].MaxEntries = uint32(cfg.CacheMaxSize) + if isEBPFFeaturesEnabled(cfg) { + spec.Maps[additionalFlowMetrics].MaxEntries = uint32(cfg.CacheMaxSize) + } else { + spec.Maps[additionalFlowMetrics].MaxEntries = 1 + } + // remove pinning from all maps + maps2Name := []string{"aggregated_flows", "additional_flow_metrics", "direct_flows", "dns_flows", "filter_map", "global_counters", "packet_record"} + for _, m := range maps2Name { + spec.Maps[m].Pinning = 0 + } + + traceMsgs := 0 + if cfg.Debug { + traceMsgs = 1 + } + + enableRtt := 0 + if cfg.EnableRTT { + enableRtt = 1 + } + + enableDNSTracking := 0 + dnsTrackerPort := uint16(dnsDefaultPort) + if cfg.EnableDNSTracker { + enableDNSTracking = 1 + if cfg.DNSTrackerPort != 0 { + dnsTrackerPort = cfg.DNSTrackerPort + } + } + + if enableDNSTracking == 0 { + spec.Maps[dnsLatencyMap].MaxEntries = 1 + } + + enableFlowFiltering := 0 + if cfg.EnableFlowFilter { + enableFlowFiltering = 1 + } + enableNetworkEventsMonitoring := 0 + if cfg.EnableNetworkEventsMonitoring { + enableNetworkEventsMonitoring = 1 + } + networkEventsMonitoringGroupID := defaultNetworkEventsGroupID + if cfg.NetworkEventsMonitoringGroupID > 0 { + networkEventsMonitoringGroupID = cfg.NetworkEventsMonitoringGroupID + } + enablePktTranslation := 0 + if cfg.EnablePktTranslation { + enablePktTranslation = 1 + } + if err := spec.RewriteConstants(map[string]interface{}{ + constSampling: uint32(cfg.Sampling), + constTraceMessages: uint8(traceMsgs), + constEnableRtt: uint8(enableRtt), + constEnableDNSTracking: uint8(enableDNSTracking), + constDNSTrackingPort: dnsTrackerPort, + constEnableFlowFiltering: uint8(enableFlowFiltering), + constEnableNetworkEventsMonitoring: uint8(enableNetworkEventsMonitoring), + constNetworkEventsMonitoringGroupID: uint8(networkEventsMonitoringGroupID), + constEnablePktTranslation: uint8(enablePktTranslation), + }); err != nil { + return nil, fmt.Errorf("rewriting BPF constants definition: %w", err) + } + + oldKernel := kernel.IsKernelOlderThan("5.14.0") + if oldKernel { + log.Infof("kernel older than 5.14.0 detected: not all hooks are supported") + } + rtOldKernel := kernel.IsRealTimeKernel() && kernel.IsKernelOlderThan("5.14.0-292") + if rtOldKernel { + log.Infof("kernel is realtime and older than 5.14.0-292 not all hooks are supported") + } + supportNetworkEvents := !kernel.IsKernelOlderThan("5.14.0-427") + objects, err = kernelSpecificLoadAndAssign(oldKernel, rtOldKernel, supportNetworkEvents, spec, pinDir) + if err != nil { + return nil, err + } + + log.Debugf("Deleting specs for PCA") + // Deleting specs for PCA + // Always set pcaRecordsMap to the minimum in FlowFetcher - PCA and Flow Fetcher are mutually exclusive. + spec.Maps[pcaRecordsMap].MaxEntries = 1 + + objects.TcxEgressPcaParse = nil + objects.TcIngressPcaParse = nil + delete(spec.Programs, constPcaEnable) + + if cfg.EnablePktDrops && !oldKernel && !rtOldKernel { + pktDropsLink, err = link.Tracepoint("skb", pktDropHook, objects.KfreeSkb, nil) + if err != nil { + return nil, fmt.Errorf("failed to attach the BPF program to kfree_skb tracepoint: %w", err) + } + } + + if cfg.EnableNetworkEventsMonitoring { + if supportNetworkEvents { + // Enable the following logic with RHEL9.6 when its available + if !kernel.IsKernelOlderThan("5.16.0") { + //revive:disable + /* + networkEventsMonitoringLink, err = link.Kprobe(networkEventsMonitoringHook, objects.NetworkEventsMonitoring, nil) + if err != nil { + return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err) + } + */ + } else { + log.Infof("kernel older than 5.16.0 detected: use custom network_events_monitoring hook") + networkEventsMonitoringLink, err = link.Kprobe(rhNetworkEventsMonitoringHook, objects.RhNetworkEventsMonitoring, nil) + if err != nil { + return nil, fmt.Errorf("failed to attach the BPF program network events monitoring kprobe: %w", err) + } + } + } else { + log.Infof("kernel older than 5.14.0-427 detected: it does not support network_events_monitoring hook, skip") + } + } + + if cfg.EnableRTT { + if !oldKernel { + rttFentryLink, err = link.AttachTracing(link.TracingOptions{ + Program: objects.BpfPrograms.TcpRcvFentry, + }) + if err == nil { + goto next + } + if err != nil { + log.Warningf("failed to attach the BPF program to tcpReceiveFentry: %v fallback to use kprobe", err) + // Fall through to use kprobe + } + } + // try to use kprobe for older kernels + if !rtOldKernel { + rttKprobeLink, err = link.Kprobe("tcp_rcv_established", objects.TcpRcvKprobe, nil) + if err != nil { + log.Warningf("failed to attach the BPF program to kprobe: %v", err) + return nil, fmt.Errorf("failed to attach the BPF program to tcpReceiveKprobe: %w", err) + } + } + } + next: + if cfg.EnablePktTranslation { + nfNatManIPLink, err = link.Kprobe("nf_nat_manip_pkt", objects.TrackNatManipPkt, nil) + if err != nil { + log.Warningf("failed to attach the BPF program to nat_manip kprobe: %v", err) + return nil, fmt.Errorf("failed to attach the BPF program to nat_manip kprobe: %w", err) + } + } + } else { + pinDir = cfg.BpfManBpfFSPath + opts := &cilium.LoadPinOptions{ + ReadOnly: false, + WriteOnly: false, + Flags: 0, + } + + log.Info("BPFManager mode: loading aggregated flows pinned maps") + mPath := path.Join(pinDir, "aggregated_flows") + objects.BpfMaps.AggregatedFlows, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Info("BPFManager mode: loading additional flow metrics pinned maps") + mPath = path.Join(pinDir, "additional_flow_metrics") + objects.BpfMaps.AdditionalFlowMetrics, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Info("BPFManager mode: loading direct flows pinned maps") + mPath = path.Join(pinDir, "direct_flows") + objects.BpfMaps.DirectFlows, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Infof("BPFManager mode: loading DNS flows pinned maps") + mPath = path.Join(pinDir, "dns_flows") + objects.BpfMaps.DnsFlows, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Infof("BPFManager mode: loading filter pinned maps") + mPath = path.Join(pinDir, "filter_map") + objects.BpfMaps.FilterMap, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Infof("BPFManager mode: loading global counters pinned maps") + mPath = path.Join(pinDir, "global_counters") + objects.BpfMaps.GlobalCounters, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + log.Infof("BPFManager mode: loading packet record pinned maps") + mPath = path.Join(pinDir, "packet_record") + objects.BpfMaps.PacketRecord, err = cilium.LoadPinnedMap(mPath, opts) + if err != nil { + return nil, fmt.Errorf("failed to load %s: %w", mPath, err) + } + } + + if cfg.EnableFlowFilter { + f := NewFilter(&objects, cfg.FilterConfig) + if err := f.ProgramFilter(); err != nil { + return nil, fmt.Errorf("programming flow filter: %w", err) + } + } + + flows, err := ringbuf.NewReader(objects.BpfMaps.DirectFlows) + if err != nil { + return nil, fmt.Errorf("accessing to ringbuffer: %w", err) + } + + return &FlowFetcher{ + objects: &objects, + ringbufReader: flows, + egressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{}, + cacheMaxSize: cfg.CacheMaxSize, + enableIngress: cfg.EnableIngress, + enableEgress: cfg.EnableEgress, + pktDropsTracePoint: pktDropsLink, + rttFentryLink: rttFentryLink, + rttKprobeLink: rttKprobeLink, + nfNatManIPLink: nfNatManIPLink, + egressTCXLink: map[ifaces.Interface]link.Link{}, + ingressTCXLink: map[ifaces.Interface]link.Link{}, + networkEventsMonitoringLink: networkEventsMonitoringLink, + lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported + useEbpfManager: cfg.UseEbpfManager, + pinDir: pinDir, + }, nil +} + +func isEBPFFeaturesEnabled(cfg *FlowFetcherConfig) bool { + if cfg.EnableNetworkEventsMonitoring || cfg.EnableRTT || cfg.EnablePktDrops || cfg.EnableDNSTracker || cfg.EnablePktTranslation { + return true + } + return false +} + +func (m *FlowFetcher) AttachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("failed to setns to %s: %w", iface.NetNS, err) + } + } + + if m.enableEgress { + egrLink, err := link.AttachTCX(link.TCXOptions{ + Program: m.objects.BpfPrograms.TcxEgressFlowParse, + Attach: cilium.AttachTCXEgress, + Interface: iface.Index, + }) + if err != nil { + if errors.Is(err, fs.ErrExist) { + // The interface already has a TCX egress hook + log.WithField("iface", iface.Name).Debug("interface already has a TCX egress hook ignore") + } else { + return fmt.Errorf("failed to attach TCX egress: %w", err) + } + } + m.egressTCXLink[iface] = egrLink + ilog.WithField("interface", iface.Name).Debug("successfully attach egressTCX hook") + } + + if m.enableIngress { + ingLink, err := link.AttachTCX(link.TCXOptions{ + Program: m.objects.BpfPrograms.TcxIngressFlowParse, + Attach: cilium.AttachTCXIngress, + Interface: iface.Index, + }) + if err != nil { + if errors.Is(err, fs.ErrExist) { + // The interface already has a TCX ingress hook + log.WithField("iface", iface.Name).Debug("interface already has a TCX ingress hook ignore") + } else { + return fmt.Errorf("failed to attach TCX ingress: %w", err) + } + } + m.ingressTCXLink[iface] = ingLink + ilog.WithField("interface", iface.Name).Debug("successfully attach ingressTCX hook") + } + + return nil +} + +func (m *FlowFetcher) DetachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("failed to setns to %s: %w", iface.NetNS, err) + } + } + if m.enableEgress { + if l := m.egressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close egress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook") + } else { + return fmt.Errorf("egress link does not have a TCX egress hook") + } + } + + if m.enableIngress { + if l := m.ingressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close ingress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook") + } else { + return fmt.Errorf("ingress link does not have a TCX ingress hook") + } + } + + return nil +} + +func removeTCFilters(ifName string, tcDir uint32) error { + link, err := netlink.LinkByName(ifName) + if err != nil { + return err + } + + filters, err := netlink.FilterList(link, tcDir) + if err != nil { + return err + } + var errs []error + for _, f := range filters { + if err := netlink.FilterDel(f); err != nil { + errs = append(errs, err) + } + } + + return kerrors.NewAggregate(errs) +} + +func unregister(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + ilog.Debugf("looking for previously installed TC filters on %s", iface.Name) + links, err := netlink.LinkList() + if err != nil { + return fmt.Errorf("retrieving all netlink devices: %w", err) + } + + egressDevs := []netlink.Link{} + ingressDevs := []netlink.Link{} + for _, l := range links { + if l.Attrs().Name != iface.Name { + continue + } + ingressFilters, err := netlink.FilterList(l, netlink.HANDLE_MIN_INGRESS) + if err != nil { + return fmt.Errorf("listing ingress filters: %w", err) + } + for _, filter := range ingressFilters { + if bpfFilter, ok := filter.(*netlink.BpfFilter); ok { + if strings.HasPrefix(bpfFilter.Name, tcIngressFilterName) { + ingressDevs = append(ingressDevs, l) + } + } + } + + egressFilters, err := netlink.FilterList(l, netlink.HANDLE_MIN_EGRESS) + if err != nil { + return fmt.Errorf("listing egress filters: %w", err) + } + for _, filter := range egressFilters { + if bpfFilter, ok := filter.(*netlink.BpfFilter); ok { + if strings.HasPrefix(bpfFilter.Name, tcEgressFilterName) { + egressDevs = append(egressDevs, l) + } + } + } + } + + for _, dev := range ingressDevs { + ilog.Debugf("removing ingress stale tc filters from %s", dev.Attrs().Name) + err = removeTCFilters(dev.Attrs().Name, netlink.HANDLE_MIN_INGRESS) + if err != nil { + ilog.WithError(err).Errorf("couldn't remove ingress tc filters from %s", dev.Attrs().Name) + } + } + + for _, dev := range egressDevs { + ilog.Debugf("removing egress stale tc filters from %s", dev.Attrs().Name) + err = removeTCFilters(dev.Attrs().Name, netlink.HANDLE_MIN_EGRESS) + if err != nil { + ilog.WithError(err).Errorf("couldn't remove egress tc filters from %s", dev.Attrs().Name) + } + } + + return nil +} + +func (m *FlowFetcher) UnRegister(iface ifaces.Interface) error { + // qdiscs, ingress and egress filters are automatically deleted so we don't need to + // specifically detach them from the ebpfFetcher + return unregister(iface) +} + +// Register and links the eBPF fetcher into the system. The program should invoke Unregister +// before exiting. +func (m *FlowFetcher) Register(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + handle, err := netlink.NewHandleAt(iface.NetNS) + if err != nil { + return fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err) + } + defer handle.Close() + + // Load pre-compiled programs and maps into the kernel, and rewrites the configuration + ipvlan, err := handle.LinkByIndex(iface.Index) + if err != nil { + return fmt.Errorf("failed to lookup ipvlan device %d (%s): %w", iface.Index, iface.Name, err) + } + qdiscAttrs := netlink.QdiscAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Handle: netlink.MakeHandle(0xffff, 0), + Parent: netlink.HANDLE_CLSACT, + } + qdisc := &netlink.GenericQdisc{ + QdiscAttrs: qdiscAttrs, + QdiscType: qdiscType, + } + if err := handle.QdiscDel(qdisc); err == nil { + ilog.Warn("qdisc clsact already existed. Deleted it") + } + if err := handle.QdiscAdd(qdisc); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("qdisc clsact already exists. Ignoring") + } else { + return fmt.Errorf("failed to create clsact qdisc on %d (%s): %w", iface.Index, iface.Name, err) + } + } + m.qdiscs[iface] = qdisc + + // Remove previously installed filters + if err := unregister(iface); err != nil { + return fmt.Errorf("failed to remove previous filters: %w", err) + } + + if err := m.registerEgress(iface, ipvlan, handle); err != nil { + return err + } + + return m.registerIngress(iface, ipvlan, handle) +} + +func (m *FlowFetcher) registerEgress(iface ifaces.Interface, ipvlan netlink.Link, handle *netlink.Handle) error { + ilog := log.WithField("iface", iface) + if !m.enableEgress { + ilog.Debug("ignoring egress traffic, according to user configuration") + return nil + } + // Fetch events on egress + egressAttrs := netlink.FilterAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Parent: netlink.HANDLE_MIN_EGRESS, + Handle: netlink.MakeHandle(0, 1), + Protocol: 3, + Priority: 1, + } + egressFilter := &netlink.BpfFilter{ + FilterAttrs: egressAttrs, + Fd: m.objects.TcEgressFlowParse.FD(), + Name: tcEgressFilterName, + DirectAction: true, + } + if err := handle.FilterDel(egressFilter); err == nil { + ilog.Warn("egress filter already existed. Deleted it") + } + if err := handle.FilterAdd(egressFilter); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("egress filter already exists. Ignoring") + } else { + return fmt.Errorf("failed to create egress filter: %w", err) + } + } + m.egressFilters[iface] = egressFilter + return nil +} + +func (m *FlowFetcher) registerIngress(iface ifaces.Interface, ipvlan netlink.Link, handle *netlink.Handle) error { + ilog := log.WithField("iface", iface) + if !m.enableIngress { + ilog.Debug("ignoring ingress traffic, according to user configuration") + return nil + } + // Fetch events on ingress + ingressAttrs := netlink.FilterAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Parent: netlink.HANDLE_MIN_INGRESS, + Handle: netlink.MakeHandle(0, 1), + Protocol: unix.ETH_P_ALL, + Priority: 1, + } + ingressFilter := &netlink.BpfFilter{ + FilterAttrs: ingressAttrs, + Fd: m.objects.TcIngressFlowParse.FD(), + Name: tcIngressFilterName, + DirectAction: true, + } + if err := handle.FilterDel(ingressFilter); err == nil { + ilog.Warn("ingress filter already existed. Deleted it") + } + if err := handle.FilterAdd(ingressFilter); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("ingress filter already exists. Ignoring") + } else { + return fmt.Errorf("failed to create ingress filter: %w", err) + } + } + m.ingressFilters[iface] = ingressFilter + return nil +} + +// Close the eBPF fetcher from the system. +// We don't need a "Close(iface)" method because the filters and qdiscs +// are automatically removed when the interface is down +// nolint:cyclop +func (m *FlowFetcher) Close() error { + log.Debug("unregistering eBPF objects") + + var errs []error + + if m.pktDropsTracePoint != nil { + if err := m.pktDropsTracePoint.Close(); err != nil { + errs = append(errs, err) + } + } + if m.rttFentryLink != nil { + if err := m.rttFentryLink.Close(); err != nil { + errs = append(errs, err) + } + } + if m.rttKprobeLink != nil { + if err := m.rttKprobeLink.Close(); err != nil { + errs = append(errs, err) + } + } + if m.networkEventsMonitoringLink != nil { + if err := m.networkEventsMonitoringLink.Close(); err != nil { + errs = append(errs, err) + } + } + if m.nfNatManIPLink != nil { + if err := m.nfNatManIPLink.Close(); err != nil { + errs = append(errs, err) + } + } + // m.ringbufReader.Read is a blocking operation, so we need to close the ring buffer + // from another goroutine to avoid the system not being able to exit if there + // isn't traffic in a given interface + if m.ringbufReader != nil { + if err := m.ringbufReader.Close(); err != nil { + errs = append(errs, err) + } + } + if m.objects != nil { + if err := m.objects.TcEgressFlowParse.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.TcIngressFlowParse.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.TcxEgressFlowParse.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.TcxIngressFlowParse.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.AggregatedFlows.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.DirectFlows.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.DnsFlows.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.GlobalCounters.Close(); err != nil { + errs = append(errs, err) + } + if err := m.objects.FilterMap.Close(); err != nil { + errs = append(errs, err) + } + if len(errs) == 0 { + m.objects = nil + } + } + + for iface, ef := range m.egressFilters { + log := log.WithField("interface", iface) + log.Debug("deleting egress filter") + if err := doIgnoreNoDev(netlink.FilterDel, netlink.Filter(ef), log); err != nil { + errs = append(errs, fmt.Errorf("deleting egress filter: %w", err)) + } + } + m.egressFilters = map[ifaces.Interface]*netlink.BpfFilter{} + for iface, igf := range m.ingressFilters { + log := log.WithField("interface", iface) + log.Debug("deleting ingress filter") + if err := doIgnoreNoDev(netlink.FilterDel, netlink.Filter(igf), log); err != nil { + errs = append(errs, fmt.Errorf("deleting ingress filter: %w", err)) + } + } + m.ingressFilters = map[ifaces.Interface]*netlink.BpfFilter{} + for iface, qd := range m.qdiscs { + log := log.WithField("interface", iface) + log.Debug("deleting Qdisc") + if err := doIgnoreNoDev(netlink.QdiscDel, netlink.Qdisc(qd), log); err != nil { + errs = append(errs, fmt.Errorf("deleting qdisc: %w", err)) + } + } + m.qdiscs = map[ifaces.Interface]*netlink.GenericQdisc{} + + for iface, l := range m.egressTCXLink { + log := log.WithField("interface", iface) + log.Debug("detach egress TCX hook") + l.Close() + } + m.egressTCXLink = map[ifaces.Interface]link.Link{} + for iface, l := range m.ingressTCXLink { + log := log.WithField("interface", iface) + log.Debug("detach ingress TCX hook") + l.Close() + } + m.ingressTCXLink = map[ifaces.Interface]link.Link{} + + if !m.useEbpfManager { + if err := m.removeAllPins(); err != nil { + errs = append(errs, err) + } + } + + if len(errs) == 0 { + return nil + } + + var errStrings []string + for _, err := range errs { + errStrings = append(errStrings, err.Error()) + } + return errors.New(`errors: "` + strings.Join(errStrings, `", "`) + `"`) +} + +// removeAllPins removes all pins. +func (m *FlowFetcher) removeAllPins() error { + files, err := os.ReadDir(m.pinDir) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + for _, file := range files { + if err := os.Remove(path.Join(m.pinDir, file.Name())); err != nil && !os.IsNotExist(err) { + return err + } + } + if err := os.Remove(m.pinDir); err != nil { + return err + } + return nil +} + +// doIgnoreNoDev runs the provided syscall over the provided device and ignores the error +// if the cause is a non-existing device (just logs the error as debug). +// If the agent is deployed as part of the Network Observability pipeline, normally +// undeploying the FlowCollector could cause the agent to try to remove resources +// from Pods that have been removed immediately before (e.g. flowlogs-pipeline or the +// console plugin), so we avoid logging some errors that would unnecessarily raise the +// user's attention. +// This function uses generics because the set of provided functions accept different argument +// types. +func doIgnoreNoDev[T any](sysCall func(T) error, dev T, log *logrus.Entry) error { + if err := sysCall(dev); err != nil { + if errors.Is(err, unix.ENODEV) { + log.WithError(err).Error("can't delete. Ignore this error if other pods or interfaces " + + " are also being deleted at this moment. For example, if you are undeploying " + + " a FlowCollector or Deployment where this agent is part of") + } else { + return err + } + } + return nil +} + +func (m *FlowFetcher) ReadRingBuf() (ringbuf.Record, error) { + return m.ringbufReader.Read() +} + +// LookupAndDeleteMap reads all the entries from the eBPF map and removes them from it. +// TODO: detect whether BatchLookupAndDelete is supported (Kernel>=5.6) and use it selectively +// Supported Lookup/Delete operations by kernel: https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md +func (m *FlowFetcher) LookupAndDeleteMap(met *metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent { + if !m.lookupAndDeleteSupported { + return m.legacyLookupAndDeleteMap(met) + } + + flowMap := m.objects.AggregatedFlows + + iterator := flowMap.Iterate() + var flows = make(map[ebpf.BpfFlowId]model.BpfFlowContent, m.cacheMaxSize) + var ids []ebpf.BpfFlowId + var id ebpf.BpfFlowId + var baseMetrics ebpf.BpfFlowMetrics + var additionalMetrics []ebpf.BpfAdditionalMetrics + + // First, get all ids and don't care about metrics (we need lookup+delete to be atomic) + for iterator.Next(&id, &baseMetrics) { + ids = append(ids, id) + } + + count := 0 + // Run the atomic Lookup+Delete; if new ids have been inserted in the meantime, they'll be fetched next time + for i, id := range ids { + count++ + if err := flowMap.LookupAndDelete(&id, &baseMetrics); err != nil { + if i == 0 && errors.Is(err, cilium.ErrNotSupported) { + log.WithError(err).Warnf("switching to legacy mode") + m.lookupAndDeleteSupported = false + return m.legacyLookupAndDeleteMap(met) + } + log.WithError(err).WithField("flowId", id).Warnf("couldn't lookup/delete flow entry") + met.Errors.WithErrorName("flow-fetcher", "CannotDeleteFlows").Inc() + continue + } + flowPayload := model.BpfFlowContent{BpfFlowMetrics: &ebpf.BpfFlowMetrics{}} + flowPayload.AccumulateBase(&baseMetrics) + + // Fetch additional metrics; ids are without direction and interface + shorterID := id + shorterID.Direction = 0 + shorterID.IfIndex = 0 + if err := m.objects.AdditionalFlowMetrics.LookupAndDelete(&shorterID, &additionalMetrics); err != nil { + if !errors.Is(err, cilium.ErrKeyNotExist) { + log.WithError(err).WithField("flowId", id).Warnf("couldn't lookup/delete additional metrics entry") + met.Errors.WithErrorName("flow-fetcher", "CannotDeleteAdditionalMetric").Inc() + } + } else { + for i := range additionalMetrics { + flowPayload.AccumulateAdditional(&additionalMetrics[i]) + } + } + flows[id] = flowPayload + } + met.BufferSizeGauge.WithBufferName("hashmap-total").Set(float64(count)) + met.BufferSizeGauge.WithBufferName("hashmap-unique").Set(float64(len(flows))) + + m.ReadGlobalCounter(met) + return flows +} + +// ReadGlobalCounter reads the global counter and updates drop flows counter metrics +func (m *FlowFetcher) ReadGlobalCounter(met *metrics.Metrics) { + var allCPUValue []uint32 + globalCounters := map[ebpf.BpfGlobalCountersKeyT]prometheus.Counter{ + ebpf.BpfGlobalCountersKeyTHASHMAP_FLOWS_DROPPED: met.DroppedFlowsCounter.WithSourceAndReason("flow-fetcher", "CannotUpdateFlowsHashMap"), + ebpf.BpfGlobalCountersKeyTHASHMAP_FAIL_UPDATE_DNS: met.DroppedFlowsCounter.WithSourceAndReason("flow-fetcher", "CannotUpdateDNSHashMap"), + ebpf.BpfGlobalCountersKeyTFILTER_REJECT: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterReject"), + ebpf.BpfGlobalCountersKeyTFILTER_ACCEPT: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterAccept"), + ebpf.BpfGlobalCountersKeyTFILTER_NOMATCH: met.FilteredFlowsCounter.WithSourceAndReason("flow-filtering", "FilterNoMatch"), + ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrors"), + ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_GROUPID_MISMATCH: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrorsGroupIDMismatch"), + ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_ERR_UPDATE_MAP_FLOWS: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsErrorsFlowMapUpdate"), + ebpf.BpfGlobalCountersKeyTNETWORK_EVENTS_GOOD: met.NetworkEventsCounter.WithSourceAndReason("network-events", "NetworkEventsGoodEvent"), + } + zeroCounters := make([]uint32, cilium.MustPossibleCPU()) + for key := ebpf.BpfGlobalCountersKeyT(0); key < ebpf.BpfGlobalCountersKeyTMAX_COUNTERS; key++ { + if err := m.objects.GlobalCounters.Lookup(key, &allCPUValue); err != nil { + log.WithError(err).Warnf("couldn't read global counter") + return + } + metric := globalCounters[key] + if metric != nil { + // aggregate all the counters + for _, counter := range allCPUValue { + metric.Add(float64(counter)) + } + } + // reset the global counter-map entry + if err := m.objects.GlobalCounters.Put(key, zeroCounters); err != nil { + log.WithError(err).Warnf("coudn't reset global counter") + return + } + } +} + +// DeleteMapsStaleEntries Look for any stale entries in the features maps and delete them +func (m *FlowFetcher) DeleteMapsStaleEntries(timeOut time.Duration) { + m.lookupAndDeleteDNSMap(timeOut) +} + +// lookupAndDeleteDNSMap iterate over DNS queries map and delete any stale DNS requests +// entries which never get responses for. +func (m *FlowFetcher) lookupAndDeleteDNSMap(timeOut time.Duration) { + monotonicTimeNow := monotime.Now() + dnsMap := m.objects.DnsFlows + var dnsKey ebpf.BpfDnsFlowId + var keysToDelete []ebpf.BpfDnsFlowId + var dnsVal uint64 + + if dnsMap != nil { + // Ideally the Lookup + Delete should be atomic, however we cannot use LookupAndDelete since the deletion is conditional + // Do not delete while iterating, as it causes severe performance degradation + iterator := dnsMap.Iterate() + for iterator.Next(&dnsKey, &dnsVal) { + if time.Duration(uint64(monotonicTimeNow)-dnsVal) >= timeOut { + keysToDelete = append(keysToDelete, dnsKey) + } + } + for _, dnsKey = range keysToDelete { + if err := dnsMap.Delete(dnsKey); err != nil { + log.WithError(err).WithField("dnsKey", dnsKey).Warnf("couldn't delete DNS record entry") + } + } + } +} + +// kernelSpecificLoadAndAssign based on a kernel version, it will load only the supported eBPF hooks +func kernelSpecificLoadAndAssign(oldKernel, rtKernel, supportNetworkEvents bool, spec *cilium.CollectionSpec, pinDir string) (ebpf.BpfObjects, error) { + objects := ebpf.BpfObjects{} + + // Helper to remove common hooks + removeCommonHooks := func() { + delete(spec.Programs, pktDropHook) + delete(spec.Programs, rhNetworkEventsMonitoringHook) + } + + // Helper to load and assign BPF objects + loadAndAssign := func(objects interface{}) error { + if err := spec.LoadAndAssign(objects, &cilium.CollectionOptions{Maps: cilium.MapOptions{PinPath: pinDir}}); err != nil { + var ve *cilium.VerifierError + if errors.As(err, &ve) { + log.Infof("Verifier error: %+v", ve) + } + return fmt.Errorf("loading and assigning BPF objects: %w", err) + } + return nil + } + + // Configure BPF programs based on the kernel type + switch { + case oldKernel && rtKernel: + type newBpfPrograms struct { + TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"` + TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"` + TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"` + TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"` + TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"` + TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"` + } + type newBpfObjects struct { + newBpfPrograms + ebpf.BpfMaps + } + var newObjects newBpfObjects + removeCommonHooks() + delete(spec.Programs, tcpRcvKprobe) + delete(spec.Programs, tcpFentryHook) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = ebpf.BpfObjects{ + BpfPrograms: ebpf.BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TrackNatManipPkt: newObjects.TrackNatManipPkt, + TcpRcvKprobe: nil, + TcpRcvFentry: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: ebpf.BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case oldKernel: + type newBpfPrograms struct { + TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"` + TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"` + TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"` + TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"` + TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"` + TCPRcvKprobe *cilium.Program `ebpf:"tcp_rcv_kprobe"` + TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"` + } + type newBpfObjects struct { + newBpfPrograms + ebpf.BpfMaps + } + var newObjects newBpfObjects + removeCommonHooks() + delete(spec.Programs, tcpFentryHook) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = ebpf.BpfObjects{ + BpfPrograms: ebpf.BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvKprobe: newObjects.TCPRcvKprobe, + TrackNatManipPkt: newObjects.TrackNatManipPkt, + TcpRcvFentry: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: ebpf.BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case rtKernel: + type newBpfPrograms struct { + TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"` + TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"` + TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"` + TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"` + TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"` + TCPRcvFentry *cilium.Program `ebpf:"tcp_rcv_fentry"` + TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"` + } + type newBpfObjects struct { + newBpfPrograms + ebpf.BpfMaps + } + var newObjects newBpfObjects + removeCommonHooks() + delete(spec.Programs, tcpRcvKprobe) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = ebpf.BpfObjects{ + BpfPrograms: ebpf.BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvFentry: newObjects.TCPRcvFentry, + TrackNatManipPkt: newObjects.TrackNatManipPkt, + TcpRcvKprobe: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: ebpf.BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + case !supportNetworkEvents: + type newBpfPrograms struct { + TcEgressFlowParse *cilium.Program `ebpf:"tc_egress_flow_parse"` + TcIngressFlowParse *cilium.Program `ebpf:"tc_ingress_flow_parse"` + TcxEgressFlowParse *cilium.Program `ebpf:"tcx_egress_flow_parse"` + TcxIngressFlowParse *cilium.Program `ebpf:"tcx_ingress_flow_parse"` + TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"` + TCPRcvFentry *cilium.Program `ebpf:"tcp_rcv_fentry"` + TCPRcvKprobe *cilium.Program `ebpf:"tcp_rcv_kprobe"` + KfreeSkb *cilium.Program `ebpf:"kfree_skb"` + TrackNatManipPkt *cilium.Program `ebpf:"track_nat_manip_pkt"` + } + type newBpfObjects struct { + newBpfPrograms + ebpf.BpfMaps + } + var newObjects newBpfObjects + delete(spec.Programs, rhNetworkEventsMonitoringHook) + + if err := loadAndAssign(&newObjects); err != nil { + return objects, err + } + + objects = ebpf.BpfObjects{ + BpfPrograms: ebpf.BpfPrograms{ + TcEgressFlowParse: newObjects.TcEgressFlowParse, + TcIngressFlowParse: newObjects.TcIngressFlowParse, + TcxEgressFlowParse: newObjects.TcxEgressFlowParse, + TcxIngressFlowParse: newObjects.TcxIngressFlowParse, + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcpRcvFentry: newObjects.TCPRcvFentry, + TcpRcvKprobe: newObjects.TCPRcvKprobe, + KfreeSkb: newObjects.KfreeSkb, + TrackNatManipPkt: newObjects.TrackNatManipPkt, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: ebpf.BpfMaps{ + DirectFlows: newObjects.DirectFlows, + AggregatedFlows: newObjects.AggregatedFlows, + DnsFlows: newObjects.DnsFlows, + FilterMap: newObjects.FilterMap, + GlobalCounters: newObjects.GlobalCounters, + }, + } + + default: + if err := loadAndAssign(&objects); err != nil { + return objects, err + } + } + + // Release cached kernel BTF memory + btf.FlushKernelSpec() + + return objects, nil +} + +// It provides access to packets from the kernel space (via PerfCPU hashmap) +type PacketFetcher struct { + objects *ebpf.BpfObjects + qdiscs map[ifaces.Interface]*netlink.GenericQdisc + egressFilters map[ifaces.Interface]*netlink.BpfFilter + ingressFilters map[ifaces.Interface]*netlink.BpfFilter + perfReader *perf.Reader + cacheMaxSize int + enableIngress bool + enableEgress bool + egressTCXLink map[ifaces.Interface]link.Link + ingressTCXLink map[ifaces.Interface]link.Link + lookupAndDeleteSupported bool +} + +func NewPacketFetcher(cfg *FlowFetcherConfig) (*PacketFetcher, error) { + if err := rlimit.RemoveMemlock(); err != nil { + log.WithError(err). + Warn("can't remove mem lock. The agent could not be able to start eBPF programs") + } + + objects := ebpf.BpfObjects{} + spec, err := ebpf.LoadBpf() + if err != nil { + return nil, err + } + pcaEnable := 0 + if cfg.EnablePCA { + pcaEnable = 1 + } + + if err := spec.RewriteConstants(map[string]interface{}{ + constSampling: uint32(cfg.Sampling), + constPcaEnable: uint8(pcaEnable), + }); err != nil { + return nil, fmt.Errorf("rewriting BPF constants definition: %w", err) + } + + type pcaBpfPrograms struct { + TcEgressPcaParse *cilium.Program `ebpf:"tc_egress_pca_parse"` + TcIngressPcaParse *cilium.Program `ebpf:"tc_ingress_pca_parse"` + TcxEgressPcaParse *cilium.Program `ebpf:"tcx_egress_pca_parse"` + TcxIngressPcaParse *cilium.Program `ebpf:"tcx_ingress_pca_parse"` + } + type newBpfObjects struct { + pcaBpfPrograms + ebpf.BpfMaps + } + var newObjects newBpfObjects + delete(spec.Programs, pktDropHook) + delete(spec.Programs, rhNetworkEventsMonitoringHook) + delete(spec.Programs, tcpRcvKprobe) + delete(spec.Programs, tcpFentryHook) + delete(spec.Programs, aggregatedFlowsMap) + delete(spec.Programs, additionalFlowMetrics) + delete(spec.Programs, constSampling) + delete(spec.Programs, constTraceMessages) + delete(spec.Programs, constEnableDNSTracking) + delete(spec.Programs, constDNSTrackingPort) + delete(spec.Programs, constEnableRtt) + delete(spec.Programs, constEnableFlowFiltering) + delete(spec.Programs, constEnableNetworkEventsMonitoring) + delete(spec.Programs, constNetworkEventsMonitoringGroupID) + + if err := spec.LoadAndAssign(&newObjects, nil); err != nil { + var ve *cilium.VerifierError + if errors.As(err, &ve) { + // Using %+v will print the whole verifier error, not just the last + // few lines. + plog.Infof("Verifier error: %+v", ve) + } + return nil, fmt.Errorf("loading and assigning BPF objects: %w", err) + } + + objects = ebpf.BpfObjects{ + BpfPrograms: ebpf.BpfPrograms{ + TcEgressPcaParse: newObjects.TcEgressPcaParse, + TcIngressPcaParse: newObjects.TcIngressPcaParse, + TcxEgressPcaParse: newObjects.TcxEgressPcaParse, + TcxIngressPcaParse: newObjects.TcxIngressPcaParse, + TcEgressFlowParse: nil, + TcIngressFlowParse: nil, + TcxEgressFlowParse: nil, + TcxIngressFlowParse: nil, + TcpRcvFentry: nil, + TcpRcvKprobe: nil, + KfreeSkb: nil, + RhNetworkEventsMonitoring: nil, + }, + BpfMaps: ebpf.BpfMaps{ + PacketRecord: newObjects.PacketRecord, + FilterMap: newObjects.FilterMap, + }, + } + + f := NewFilter(&objects, cfg.FilterConfig) + if err := f.ProgramFilter(); err != nil { + return nil, fmt.Errorf("programming flow filter: %w", err) + } + + // read packets from igress+egress perf array + packets, err := perf.NewReader(objects.PacketRecord, os.Getpagesize()) + if err != nil { + return nil, fmt.Errorf("accessing to perf: %w", err) + } + + return &PacketFetcher{ + objects: &objects, + perfReader: packets, + egressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + ingressFilters: map[ifaces.Interface]*netlink.BpfFilter{}, + qdiscs: map[ifaces.Interface]*netlink.GenericQdisc{}, + cacheMaxSize: cfg.CacheMaxSize, + enableIngress: cfg.EnableIngress, + enableEgress: cfg.EnableEgress, + egressTCXLink: map[ifaces.Interface]link.Link{}, + ingressTCXLink: map[ifaces.Interface]link.Link{}, + lookupAndDeleteSupported: true, // this will be turned off later if found to be not supported + }, nil +} + +func registerInterface(iface ifaces.Interface) (*netlink.GenericQdisc, netlink.Link, error) { + ilog := plog.WithField("iface", iface) + handle, err := netlink.NewHandleAt(iface.NetNS) + if err != nil { + return nil, nil, fmt.Errorf("failed to create handle for netns (%s): %w", iface.NetNS.String(), err) + } + defer handle.Close() + + // Load pre-compiled programs and maps into the kernel, and rewrites the configuration + ipvlan, err := handle.LinkByIndex(iface.Index) + if err != nil { + return nil, nil, fmt.Errorf("failed to lookup ipvlan device %d (%s): %w", iface.Index, iface.Name, err) + } + qdiscAttrs := netlink.QdiscAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Handle: netlink.MakeHandle(0xffff, 0), + Parent: netlink.HANDLE_CLSACT, + } + qdisc := &netlink.GenericQdisc{ + QdiscAttrs: qdiscAttrs, + QdiscType: qdiscType, + } + if err := handle.QdiscDel(qdisc); err == nil { + ilog.Warn("qdisc clsact already existed. Deleted it") + } + if err := handle.QdiscAdd(qdisc); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("qdisc clsact already exists. Ignoring") + } else { + return nil, nil, fmt.Errorf("failed to create clsact qdisc on %d (%s): %w", iface.Index, iface.Name, err) + } + } + return qdisc, ipvlan, nil +} + +func (p *PacketFetcher) UnRegister(iface ifaces.Interface) error { + // qdiscs, ingress and egress filters are automatically deleted so we don't need to + // specifically detach them from the ebpfFetcher + return unregister(iface) +} + +func (p *PacketFetcher) Register(iface ifaces.Interface) error { + qdisc, ipvlan, err := registerInterface(iface) + if err != nil { + return err + } + p.qdiscs[iface] = qdisc + + if err := p.registerEgress(iface, ipvlan); err != nil { + return err + } + return p.registerIngress(iface, ipvlan) +} + +func (p *PacketFetcher) DetachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("PCA failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("PCA failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("PCA failed to setns to %s: %w", iface.NetNS, err) + } + } + if p.enableEgress { + if l := p.egressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close egress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach egressTCX hook") + } else { + return fmt.Errorf("egress link does not support TCX hook") + } + } + + if p.enableIngress { + if l := p.ingressTCXLink[iface]; l != nil { + if err := l.Close(); err != nil { + return fmt.Errorf("TCX: failed to close ingress link: %w", err) + } + ilog.WithField("interface", iface.Name).Debug("successfully detach ingressTCX hook") + } else { + return fmt.Errorf("ingress link does not support TCX hook") + } + } + return nil +} + +func (p *PacketFetcher) AttachTCX(iface ifaces.Interface) error { + ilog := log.WithField("iface", iface) + if iface.NetNS != netns.None() { + originalNs, err := netns.Get() + if err != nil { + return fmt.Errorf("PCA failed to get current netns: %w", err) + } + defer func() { + if err := netns.Set(originalNs); err != nil { + ilog.WithError(err).Error("PCA failed to set netns back") + } + originalNs.Close() + }() + if err := unix.Setns(int(iface.NetNS), unix.CLONE_NEWNET); err != nil { + return fmt.Errorf("PCA failed to setns to %s: %w", iface.NetNS, err) + } + } + + if p.enableEgress { + egrLink, err := link.AttachTCX(link.TCXOptions{ + Program: p.objects.BpfPrograms.TcxEgressPcaParse, + Attach: cilium.AttachTCXEgress, + Interface: iface.Index, + }) + if err != nil { + if errors.Is(err, fs.ErrExist) { + // The interface already has a TCX egress hook + log.WithField("iface", iface.Name).Debug("interface already has a TCX PCA egress hook ignore") + } else { + return fmt.Errorf("failed to attach PCA TCX egress: %w", err) + } + } + p.egressTCXLink[iface] = egrLink + ilog.WithField("interface", iface.Name).Debug("successfully attach PCA egressTCX hook") + } + + if p.enableIngress { + ingLink, err := link.AttachTCX(link.TCXOptions{ + Program: p.objects.BpfPrograms.TcxIngressPcaParse, + Attach: cilium.AttachTCXIngress, + Interface: iface.Index, + }) + if err != nil { + if errors.Is(err, fs.ErrExist) { + // The interface already has a TCX ingress hook + log.WithField("iface", iface.Name).Debug("interface already has a TCX PCA ingress hook ignore") + } else { + return fmt.Errorf("failed to attach PCA TCX ingress: %w", err) + } + } + p.ingressTCXLink[iface] = ingLink + ilog.WithField("interface", iface.Name).Debug("successfully attach PCA ingressTCX hook") + } + + return nil +} + +func fetchEgressEvents(iface ifaces.Interface, ipvlan netlink.Link, parser *cilium.Program, name string) (*netlink.BpfFilter, error) { + ilog := plog.WithField("iface", iface) + egressAttrs := netlink.FilterAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Parent: netlink.HANDLE_MIN_EGRESS, + Handle: netlink.MakeHandle(0, 1), + Protocol: 3, + Priority: 1, + } + egressFilter := &netlink.BpfFilter{ + FilterAttrs: egressAttrs, + Fd: parser.FD(), + Name: "tc/" + name, + DirectAction: true, + } + if err := netlink.FilterDel(egressFilter); err == nil { + ilog.Warn("egress filter already existed. Deleted it") + } + if err := netlink.FilterAdd(egressFilter); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("egress filter already exists. Ignoring") + } else { + return nil, fmt.Errorf("failed to create egress filter: %w", err) + } + } + return egressFilter, nil + +} + +func (p *PacketFetcher) registerEgress(iface ifaces.Interface, ipvlan netlink.Link) error { + egressFilter, err := fetchEgressEvents(iface, ipvlan, p.objects.TcEgressPcaParse, "tc_egress_pca_parse") + if err != nil { + return err + } + + p.egressFilters[iface] = egressFilter + return nil +} + +func fetchIngressEvents(iface ifaces.Interface, ipvlan netlink.Link, parser *cilium.Program, name string) (*netlink.BpfFilter, error) { + ilog := plog.WithField("iface", iface) + ingressAttrs := netlink.FilterAttrs{ + LinkIndex: ipvlan.Attrs().Index, + Parent: netlink.HANDLE_MIN_INGRESS, + Handle: netlink.MakeHandle(0, 1), + Protocol: 3, + Priority: 1, + } + ingressFilter := &netlink.BpfFilter{ + FilterAttrs: ingressAttrs, + Fd: parser.FD(), + Name: "tc/" + name, + DirectAction: true, + } + if err := netlink.FilterDel(ingressFilter); err == nil { + ilog.Warn("egress filter already existed. Deleted it") + } + if err := netlink.FilterAdd(ingressFilter); err != nil { + if errors.Is(err, fs.ErrExist) { + ilog.WithError(err).Warn("ingress filter already exists. Ignoring") + } else { + return nil, fmt.Errorf("failed to create egress filter: %w", err) + } + } + return ingressFilter, nil + +} + +func (p *PacketFetcher) registerIngress(iface ifaces.Interface, ipvlan netlink.Link) error { + ingressFilter, err := fetchIngressEvents(iface, ipvlan, p.objects.TcIngressPcaParse, "tc_ingress_pca_parse") + if err != nil { + return err + } + + p.ingressFilters[iface] = ingressFilter + return nil +} + +// Close the eBPF fetcher from the system. +// We don't need an "Close(iface)" method because the filters and qdiscs +// are automatically removed when the interface is down +func (p *PacketFetcher) Close() error { + log.Debug("unregistering eBPF objects") + + var errs []error + if p.perfReader != nil { + if err := p.perfReader.Close(); err != nil { + errs = append(errs, err) + } + } + if p.objects != nil { + if err := p.objects.TcEgressPcaParse.Close(); err != nil { + errs = append(errs, err) + } + if err := p.objects.TcIngressPcaParse.Close(); err != nil { + errs = append(errs, err) + } + if err := p.objects.TcxEgressPcaParse.Close(); err != nil { + errs = append(errs, err) + } + if err := p.objects.TcxIngressPcaParse.Close(); err != nil { + errs = append(errs, err) + } + if err := p.objects.PacketRecord.Close(); err != nil { + errs = append(errs, err) + } + p.objects = nil + } + for iface, ef := range p.egressFilters { + log.WithField("interface", iface).Debug("deleting egress filter") + if err := netlink.FilterDel(ef); err != nil { + errs = append(errs, fmt.Errorf("deleting egress filter: %w", err)) + } + } + p.egressFilters = map[ifaces.Interface]*netlink.BpfFilter{} + for iface, igf := range p.ingressFilters { + log.WithField("interface", iface).Debug("deleting ingress filter") + if err := netlink.FilterDel(igf); err != nil { + errs = append(errs, fmt.Errorf("deleting ingress filter: %w", err)) + } + } + p.ingressFilters = map[ifaces.Interface]*netlink.BpfFilter{} + for iface, qd := range p.qdiscs { + log.WithField("interface", iface).Debug("deleting Qdisc") + if err := netlink.QdiscDel(qd); err != nil { + errs = append(errs, fmt.Errorf("deleting qdisc: %w", err)) + } + } + p.qdiscs = map[ifaces.Interface]*netlink.GenericQdisc{} + if len(errs) == 0 { + return nil + } + + for iface, l := range p.egressTCXLink { + log := log.WithField("interface", iface) + log.Debug("detach egress TCX hook") + l.Close() + + } + p.egressTCXLink = map[ifaces.Interface]link.Link{} + for iface, l := range p.ingressTCXLink { + log := log.WithField("interface", iface) + log.Debug("detach ingress TCX hook") + l.Close() + } + p.ingressTCXLink = map[ifaces.Interface]link.Link{} + + var errStrings []string + for _, err := range errs { + errStrings = append(errStrings, err.Error()) + } + return errors.New(`errors: "` + strings.Join(errStrings, `", "`) + `"`) +} + +func (p *PacketFetcher) ReadPerf() (perf.Record, error) { + return p.perfReader.Read() +} + +func (p *PacketFetcher) LookupAndDeleteMap(met *metrics.Metrics) map[int][]*byte { + if !p.lookupAndDeleteSupported { + return p.legacyLookupAndDeleteMap(met) + } + + packetMap := p.objects.PacketRecord + iterator := packetMap.Iterate() + packets := make(map[int][]*byte, p.cacheMaxSize) + var id int + var ids []int + var packet []*byte + + // First, get all ids and ignore content (we need lookup+delete to be atomic) + for iterator.Next(&id, &packet) { + ids = append(ids, id) + } + + // Run the atomic Lookup+Delete; if new ids have been inserted in the meantime, they'll be fetched next time + for i, id := range ids { + if err := packetMap.LookupAndDelete(&id, &packet); err != nil { + if i == 0 && errors.Is(err, cilium.ErrNotSupported) { + log.WithError(err).Warnf("switching to legacy mode") + p.lookupAndDeleteSupported = false + return p.legacyLookupAndDeleteMap(met) + } + log.WithError(err).WithField("packetID", id).Warnf("couldn't delete entry") + met.Errors.WithErrorName("pkt-fetcher", "CannotDeleteEntry").Inc() + } + packets[id] = packet + } + + return packets +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go new file mode 100644 index 000000000..d573146a5 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/tracer/tracer_legacy.go @@ -0,0 +1,58 @@ +package tracer + +import ( + "github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf" + "github.com/netobserv/netobserv-ebpf-agent/pkg/metrics" + "github.com/netobserv/netobserv-ebpf-agent/pkg/model" +) + +// This file contains legacy implementations kept for old kernels + +func (m *FlowFetcher) legacyLookupAndDeleteMap(met *metrics.Metrics) map[ebpf.BpfFlowId]model.BpfFlowContent { + flowMap := m.objects.AggregatedFlows + + iterator := flowMap.Iterate() + var flows = make(map[ebpf.BpfFlowId]model.BpfFlowContent, m.cacheMaxSize) + var id ebpf.BpfFlowId + var metrics []ebpf.BpfFlowMetrics + count := 0 + + // Deleting while iterating is really bad for performance (like, really!) as it causes seeing multiple times the same key + // This is solved in >=4.20 kernels with LookupAndDelete + for iterator.Next(&id, &metrics) { + count++ + if err := flowMap.Delete(id); err != nil { + log.WithError(err).WithField("flowId", id).Warnf("couldn't delete flow entry") + met.Errors.WithErrorName("flow-fetcher-legacy", "CannotDeleteFlows").Inc() + } + // We observed that eBFP PerCPU map might insert multiple times the same key in the map + // (probably due to race conditions) so we need to re-join metrics again at userspace + aggr := model.BpfFlowContent{} + for i := range metrics { + aggr.AccumulateBase(&metrics[i]) + } + flows[id] = aggr + } + met.BufferSizeGauge.WithBufferName("hashmap-legacy-total").Set(float64(count)) + met.BufferSizeGauge.WithBufferName("hashmap-legacy-unique").Set(float64(len(flows))) + + m.ReadGlobalCounter(met) + return flows +} + +func (p *PacketFetcher) legacyLookupAndDeleteMap(met *metrics.Metrics) map[int][]*byte { + packetMap := p.objects.PacketRecord + iterator := packetMap.Iterate() + packets := make(map[int][]*byte, p.cacheMaxSize) + + var id int + var packet []*byte + for iterator.Next(&id, &packet) { + if err := packetMap.Delete(id); err != nil { + log.WithError(err).WithField("packetID ", id).Warnf("couldn't delete entry") + met.Errors.WithErrorName("pkt-fetcher-legacy", "CannotDeleteEntry").Inc() + } + packets[id] = append(packets[id], packet...) + } + return packets +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go new file mode 100644 index 000000000..af3f4d2f6 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets/packets.go @@ -0,0 +1,61 @@ +package packets + +import ( + "encoding/binary" + "fmt" + "time" + + "github.com/gopacket/gopacket" + "github.com/gopacket/gopacket/layers" +) + +// PCAP Magic number is fixed for each endianness. +const pcapMagicNumber = 0xA1B2C3D4 +const versionMajor = 2 +const versionMinor = 4 +const nanosPerMicro = 1000 + +func GetPCAPFileHeader(snaplen uint32, linktype layers.LinkType) []byte { + var buf [24]byte + binary.LittleEndian.PutUint32(buf[0:4], pcapMagicNumber) + binary.LittleEndian.PutUint16(buf[4:6], versionMajor) + binary.LittleEndian.PutUint16(buf[6:8], versionMinor) + binary.LittleEndian.PutUint32(buf[16:20], snaplen) + binary.LittleEndian.PutUint32(buf[20:24], uint32(linktype)) + return buf[:] +} + +func GetPacketHeader(ci gopacket.CaptureInfo) ([]byte, error) { + var buf [16]byte + t := ci.Timestamp + if t.IsZero() { + return nil, fmt.Errorf("incoming packet does not have a timestamp. Ignoring packet") + } + secs := t.Unix() + usecs := t.Nanosecond() / nanosPerMicro + binary.LittleEndian.PutUint32(buf[0:4], uint32(secs)) + binary.LittleEndian.PutUint32(buf[4:8], uint32(usecs)) + binary.LittleEndian.PutUint32(buf[8:12], uint32(ci.CaptureLength)) + binary.LittleEndian.PutUint32(buf[12:16], uint32(ci.Length)) + return buf[:], nil +} + +func GetPacketBytesWithHeader(time time.Time, data []byte) ([]byte, error) { + ci := gopacket.CaptureInfo{ + Timestamp: time, + CaptureLength: len(data), + Length: len(data), + } + if ci.CaptureLength != len(data) { + return nil, fmt.Errorf("capture length %d does not match data length %d", ci.CaptureLength, len(data)) + } + if ci.CaptureLength > ci.Length { + return nil, fmt.Errorf("invalid capture info %+v: capture length > length", ci) + } + b, err := GetPacketHeader(ci) + if err != nil { + return nil, fmt.Errorf("error writing packet header: %w", err) + } + // append 16 byte packet header & data all at once + return append(b, data...), nil +} diff --git a/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go new file mode 100644 index 000000000..f0cb22cf3 --- /dev/null +++ b/vendor/github.com/netobserv/netobserv-ebpf-agent/pkg/utils/utils.go @@ -0,0 +1,16 @@ +package utils + +import ( + "fmt" + "net" +) + +// GetSocket returns socket string in the correct format based on address family +func GetSocket(hostIP string, hostPort int) string { + socket := fmt.Sprintf("%s:%d", hostIP, hostPort) + ipAddr := net.ParseIP(hostIP) + if ipAddr != nil && ipAddr.To4() == nil { + socket = fmt.Sprintf("[%s]:%d", hostIP, hostPort) + } + return socket +} diff --git a/vendor/github.com/netsampler/goflow2/LICENSE b/vendor/github.com/netsampler/goflow2/LICENSE new file mode 100644 index 000000000..a2fee077c --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021, NetSampler +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/netsampler/goflow2/decoders/decoder.go b/vendor/github.com/netsampler/goflow2/decoders/decoder.go new file mode 100644 index 000000000..8eebaf3bd --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/decoder.go @@ -0,0 +1,115 @@ +package decoder + +import ( + "time" +) + +type Message interface{} +type MessageDecoded interface{} + +type DecoderFunc func(Message interface{}) error +type DoneCallback func(string, int, time.Time, time.Time) +type ErrorCallback func(string, int, time.Time, time.Time, error) + +// Worker structure +type Worker struct { + Id int + DecoderParams DecoderParams + WorkerPool chan chan Message + Name string + InMsg chan Message + Quit chan bool +} + +// Create a worker and add it to the pool. +func CreateWorker(workerPool chan chan Message, decoderParams DecoderParams, id int, name string) Worker { + return Worker{ + Id: id, + DecoderParams: decoderParams, + WorkerPool: workerPool, + Name: name, + InMsg: make(chan Message), + Quit: make(chan bool), + } +} + +// Start the worker. Launches a goroutine to process NFv9 messages. +// The worker will add its input channel of NFv9 messages to decode to the pool. +func (w Worker) Start() { + go func() { + //log.Debugf("Worker %v started", w.Id) + for { + select { + case <-w.Quit: + break + case w.WorkerPool <- w.InMsg: + msg := <-w.InMsg + timeTrackStart := time.Now() + err := w.DecoderParams.DecoderFunc(msg) + timeTrackStop := time.Now() + + if err != nil && w.DecoderParams.ErrorCallback != nil { + w.DecoderParams.ErrorCallback(w.Name, w.Id, timeTrackStart, timeTrackStop, err) + } else if err == nil && w.DecoderParams.DoneCallback != nil { + w.DecoderParams.DoneCallback(w.Name, w.Id, timeTrackStart, timeTrackStop) + } + } + } + //log.Debugf("Worker %v done", w.Id) + }() +} + +// Stop the worker. +func (w Worker) Stop() { + //log.Debugf("Stopping worker %v", w.Id) + w.Quit <- true +} + +// Processor structure +type Processor struct { + workerpool chan chan Message + workerlist []Worker + DecoderParams DecoderParams + Name string +} + +// Decoder structure. Define the function to call and the config specific to the type of packets. +type DecoderParams struct { + DecoderFunc DecoderFunc + DoneCallback DoneCallback + ErrorCallback ErrorCallback +} + +// Create a message processor which is going to create all the workers and set-up the pool. +func CreateProcessor(numWorkers int, decoderParams DecoderParams, name string) Processor { + processor := Processor{ + workerpool: make(chan chan Message), + workerlist: make([]Worker, numWorkers), + DecoderParams: decoderParams, + Name: name, + } + for i := 0; i < numWorkers; i++ { + worker := CreateWorker(processor.workerpool, decoderParams, i, name) + processor.workerlist[i] = worker + } + return processor +} + +// Start message processor +func (p Processor) Start() { + for _, worker := range p.workerlist { + worker.Start() + } +} + +func (p Processor) Stop() { + for _, worker := range p.workerlist { + worker.Stop() + } +} + +// Send a message to be decoded to the pool. +func (p Processor) ProcessMessage(msg Message) { + sendChannel := <-p.workerpool + sendChannel <- msg +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go new file mode 100644 index 000000000..954b7d38c --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/ipfix.go @@ -0,0 +1,989 @@ +package netflow + +import ( + "fmt" + "time" +) + +const ( + IPFIX_FIELD_Reserved = 0 + IPFIX_FIELD_octetDeltaCount = 1 + IPFIX_FIELD_packetDeltaCount = 2 + IPFIX_FIELD_deltaFlowCount = 3 + IPFIX_FIELD_protocolIdentifier = 4 + IPFIX_FIELD_ipClassOfService = 5 + IPFIX_FIELD_tcpControlBits = 6 + IPFIX_FIELD_sourceTransportPort = 7 + IPFIX_FIELD_sourceIPv4Address = 8 + IPFIX_FIELD_sourceIPv4PrefixLength = 9 + IPFIX_FIELD_ingressInterface = 10 + IPFIX_FIELD_destinationTransportPort = 11 + IPFIX_FIELD_destinationIPv4Address = 12 + IPFIX_FIELD_destinationIPv4PrefixLength = 13 + IPFIX_FIELD_egressInterface = 14 + IPFIX_FIELD_ipNextHopIPv4Address = 15 + IPFIX_FIELD_bgpSourceAsNumber = 16 + IPFIX_FIELD_bgpDestinationAsNumber = 17 + IPFIX_FIELD_bgpNextHopIPv4Address = 18 + IPFIX_FIELD_postMCastPacketDeltaCount = 19 + IPFIX_FIELD_postMCastOctetDeltaCount = 20 + IPFIX_FIELD_flowEndSysUpTime = 21 + IPFIX_FIELD_flowStartSysUpTime = 22 + IPFIX_FIELD_postOctetDeltaCount = 23 + IPFIX_FIELD_postPacketDeltaCount = 24 + IPFIX_FIELD_minimumIpTotalLength = 25 + IPFIX_FIELD_maximumIpTotalLength = 26 + IPFIX_FIELD_sourceIPv6Address = 27 + IPFIX_FIELD_destinationIPv6Address = 28 + IPFIX_FIELD_sourceIPv6PrefixLength = 29 + IPFIX_FIELD_destinationIPv6PrefixLength = 30 + IPFIX_FIELD_flowLabelIPv6 = 31 + IPFIX_FIELD_icmpTypeCodeIPv4 = 32 + IPFIX_FIELD_igmpType = 33 + IPFIX_FIELD_samplingInterval = 34 + IPFIX_FIELD_samplingAlgorithm = 35 + IPFIX_FIELD_flowActiveTimeout = 36 + IPFIX_FIELD_flowIdleTimeout = 37 + IPFIX_FIELD_engineType = 38 + IPFIX_FIELD_engineId = 39 + IPFIX_FIELD_exportedOctetTotalCount = 40 + IPFIX_FIELD_exportedMessageTotalCount = 41 + IPFIX_FIELD_exportedFlowRecordTotalCount = 42 + IPFIX_FIELD_ipv4RouterSc = 43 + IPFIX_FIELD_sourceIPv4Prefix = 44 + IPFIX_FIELD_destinationIPv4Prefix = 45 + IPFIX_FIELD_mplsTopLabelType = 46 + IPFIX_FIELD_mplsTopLabelIPv4Address = 47 + IPFIX_FIELD_samplerId = 48 + IPFIX_FIELD_samplerMode = 49 + IPFIX_FIELD_samplerRandomInterval = 50 + IPFIX_FIELD_classId = 51 + IPFIX_FIELD_minimumTTL = 52 + IPFIX_FIELD_maximumTTL = 53 + IPFIX_FIELD_fragmentIdentification = 54 + IPFIX_FIELD_postIpClassOfService = 55 + IPFIX_FIELD_sourceMacAddress = 56 + IPFIX_FIELD_postDestinationMacAddress = 57 + IPFIX_FIELD_vlanId = 58 + IPFIX_FIELD_postVlanId = 59 + IPFIX_FIELD_ipVersion = 60 + IPFIX_FIELD_flowDirection = 61 + IPFIX_FIELD_ipNextHopIPv6Address = 62 + IPFIX_FIELD_bgpNextHopIPv6Address = 63 + IPFIX_FIELD_ipv6ExtensionHeaders = 64 + IPFIX_FIELD_mplsTopLabelStackSection = 70 + IPFIX_FIELD_mplsLabelStackSection2 = 71 + IPFIX_FIELD_mplsLabelStackSection3 = 72 + IPFIX_FIELD_mplsLabelStackSection4 = 73 + IPFIX_FIELD_mplsLabelStackSection5 = 74 + IPFIX_FIELD_mplsLabelStackSection6 = 75 + IPFIX_FIELD_mplsLabelStackSection7 = 76 + IPFIX_FIELD_mplsLabelStackSection8 = 77 + IPFIX_FIELD_mplsLabelStackSection9 = 78 + IPFIX_FIELD_mplsLabelStackSection10 = 79 + IPFIX_FIELD_destinationMacAddress = 80 + IPFIX_FIELD_postSourceMacAddress = 81 + IPFIX_FIELD_interfaceName = 82 + IPFIX_FIELD_interfaceDescription = 83 + IPFIX_FIELD_samplerName = 84 + IPFIX_FIELD_octetTotalCount = 85 + IPFIX_FIELD_packetTotalCount = 86 + IPFIX_FIELD_flagsAndSamplerId = 87 + IPFIX_FIELD_fragmentOffset = 88 + IPFIX_FIELD_forwardingStatus = 89 + IPFIX_FIELD_mplsVpnRouteDistinguisher = 90 + IPFIX_FIELD_mplsTopLabelPrefixLength = 91 + IPFIX_FIELD_srcTrafficIndex = 92 + IPFIX_FIELD_dstTrafficIndex = 93 + IPFIX_FIELD_applicationDescription = 94 + IPFIX_FIELD_applicationId = 95 + IPFIX_FIELD_applicationName = 96 + IPFIX_FIELD_postIpDiffServCodePoint = 98 + IPFIX_FIELD_multicastReplicationFactor = 99 + IPFIX_FIELD_className = 100 + IPFIX_FIELD_classificationEngineId = 101 + IPFIX_FIELD_layer2packetSectionOffset = 102 + IPFIX_FIELD_layer2packetSectionSize = 103 + IPFIX_FIELD_layer2packetSectionData = 104 + IPFIX_FIELD_bgpNextAdjacentAsNumber = 128 + IPFIX_FIELD_bgpPrevAdjacentAsNumber = 129 + IPFIX_FIELD_exporterIPv4Address = 130 + IPFIX_FIELD_exporterIPv6Address = 131 + IPFIX_FIELD_droppedOctetDeltaCount = 132 + IPFIX_FIELD_droppedPacketDeltaCount = 133 + IPFIX_FIELD_droppedOctetTotalCount = 134 + IPFIX_FIELD_droppedPacketTotalCount = 135 + IPFIX_FIELD_flowEndReason = 136 + IPFIX_FIELD_commonPropertiesId = 137 + IPFIX_FIELD_observationPointId = 138 + IPFIX_FIELD_icmpTypeCodeIPv6 = 139 + IPFIX_FIELD_mplsTopLabelIPv6Address = 140 + IPFIX_FIELD_lineCardId = 141 + IPFIX_FIELD_portId = 142 + IPFIX_FIELD_meteringProcessId = 143 + IPFIX_FIELD_exportingProcessId = 144 + IPFIX_FIELD_templateId = 145 + IPFIX_FIELD_wlanChannelId = 146 + IPFIX_FIELD_wlanSSID = 147 + IPFIX_FIELD_flowId = 148 + IPFIX_FIELD_observationDomainId = 149 + IPFIX_FIELD_flowStartSeconds = 150 + IPFIX_FIELD_flowEndSeconds = 151 + IPFIX_FIELD_flowStartMilliseconds = 152 + IPFIX_FIELD_flowEndMilliseconds = 153 + IPFIX_FIELD_flowStartMicroseconds = 154 + IPFIX_FIELD_flowEndMicroseconds = 155 + IPFIX_FIELD_flowStartNanoseconds = 156 + IPFIX_FIELD_flowEndNanoseconds = 157 + IPFIX_FIELD_flowStartDeltaMicroseconds = 158 + IPFIX_FIELD_flowEndDeltaMicroseconds = 159 + IPFIX_FIELD_systemInitTimeMilliseconds = 160 + IPFIX_FIELD_flowDurationMilliseconds = 161 + IPFIX_FIELD_flowDurationMicroseconds = 162 + IPFIX_FIELD_observedFlowTotalCount = 163 + IPFIX_FIELD_ignoredPacketTotalCount = 164 + IPFIX_FIELD_ignoredOctetTotalCount = 165 + IPFIX_FIELD_notSentFlowTotalCount = 166 + IPFIX_FIELD_notSentPacketTotalCount = 167 + IPFIX_FIELD_notSentOctetTotalCount = 168 + IPFIX_FIELD_destinationIPv6Prefix = 169 + IPFIX_FIELD_sourceIPv6Prefix = 170 + IPFIX_FIELD_postOctetTotalCount = 171 + IPFIX_FIELD_postPacketTotalCount = 172 + IPFIX_FIELD_flowKeyIndicator = 173 + IPFIX_FIELD_postMCastPacketTotalCount = 174 + IPFIX_FIELD_postMCastOctetTotalCount = 175 + IPFIX_FIELD_icmpTypeIPv4 = 176 + IPFIX_FIELD_icmpCodeIPv4 = 177 + IPFIX_FIELD_icmpTypeIPv6 = 178 + IPFIX_FIELD_icmpCodeIPv6 = 179 + IPFIX_FIELD_udpSourcePort = 180 + IPFIX_FIELD_udpDestinationPort = 181 + IPFIX_FIELD_tcpSourcePort = 182 + IPFIX_FIELD_tcpDestinationPort = 183 + IPFIX_FIELD_tcpSequenceNumber = 184 + IPFIX_FIELD_tcpAcknowledgementNumber = 185 + IPFIX_FIELD_tcpWindowSize = 186 + IPFIX_FIELD_tcpUrgentPointer = 187 + IPFIX_FIELD_tcpHeaderLength = 188 + IPFIX_FIELD_ipHeaderLength = 189 + IPFIX_FIELD_totalLengthIPv4 = 190 + IPFIX_FIELD_payloadLengthIPv6 = 191 + IPFIX_FIELD_ipTTL = 192 + IPFIX_FIELD_nextHeaderIPv6 = 193 + IPFIX_FIELD_mplsPayloadLength = 194 + IPFIX_FIELD_ipDiffServCodePoint = 195 + IPFIX_FIELD_ipPrecedence = 196 + IPFIX_FIELD_fragmentFlags = 197 + IPFIX_FIELD_octetDeltaSumOfSquares = 198 + IPFIX_FIELD_octetTotalSumOfSquares = 199 + IPFIX_FIELD_mplsTopLabelTTL = 200 + IPFIX_FIELD_mplsLabelStackLength = 201 + IPFIX_FIELD_mplsLabelStackDepth = 202 + IPFIX_FIELD_mplsTopLabelExp = 203 + IPFIX_FIELD_ipPayloadLength = 204 + IPFIX_FIELD_udpMessageLength = 205 + IPFIX_FIELD_isMulticast = 206 + IPFIX_FIELD_ipv4IHL = 207 + IPFIX_FIELD_ipv4Options = 208 + IPFIX_FIELD_tcpOptions = 209 + IPFIX_FIELD_paddingOctets = 210 + IPFIX_FIELD_collectorIPv4Address = 211 + IPFIX_FIELD_collectorIPv6Address = 212 + IPFIX_FIELD_exportInterface = 213 + IPFIX_FIELD_exportProtocolVersion = 214 + IPFIX_FIELD_exportTransportProtocol = 215 + IPFIX_FIELD_collectorTransportPort = 216 + IPFIX_FIELD_exporterTransportPort = 217 + IPFIX_FIELD_tcpSynTotalCount = 218 + IPFIX_FIELD_tcpFinTotalCount = 219 + IPFIX_FIELD_tcpRstTotalCount = 220 + IPFIX_FIELD_tcpPshTotalCount = 221 + IPFIX_FIELD_tcpAckTotalCount = 222 + IPFIX_FIELD_tcpUrgTotalCount = 223 + IPFIX_FIELD_ipTotalLength = 224 + IPFIX_FIELD_postNATSourceIPv4Address = 225 + IPFIX_FIELD_postNATDestinationIPv4Address = 226 + IPFIX_FIELD_postNAPTSourceTransportPort = 227 + IPFIX_FIELD_postNAPTDestinationTransportPort = 228 + IPFIX_FIELD_natOriginatingAddressRealm = 229 + IPFIX_FIELD_natEvent = 230 + IPFIX_FIELD_initiatorOctets = 231 + IPFIX_FIELD_responderOctets = 232 + IPFIX_FIELD_firewallEvent = 233 + IPFIX_FIELD_ingressVRFID = 234 + IPFIX_FIELD_egressVRFID = 235 + IPFIX_FIELD_VRFname = 236 + IPFIX_FIELD_postMplsTopLabelExp = 237 + IPFIX_FIELD_tcpWindowScale = 238 + IPFIX_FIELD_biflowDirection = 239 + IPFIX_FIELD_ethernetHeaderLength = 240 + IPFIX_FIELD_ethernetPayloadLength = 241 + IPFIX_FIELD_ethernetTotalLength = 242 + IPFIX_FIELD_dot1qVlanId = 243 + IPFIX_FIELD_dot1qPriority = 244 + IPFIX_FIELD_dot1qCustomerVlanId = 245 + IPFIX_FIELD_dot1qCustomerPriority = 246 + IPFIX_FIELD_metroEvcId = 247 + IPFIX_FIELD_metroEvcType = 248 + IPFIX_FIELD_pseudoWireId = 249 + IPFIX_FIELD_pseudoWireType = 250 + IPFIX_FIELD_pseudoWireControlWord = 251 + IPFIX_FIELD_ingressPhysicalInterface = 252 + IPFIX_FIELD_egressPhysicalInterface = 253 + IPFIX_FIELD_postDot1qVlanId = 254 + IPFIX_FIELD_postDot1qCustomerVlanId = 255 + IPFIX_FIELD_ethernetType = 256 + IPFIX_FIELD_postIpPrecedence = 257 + IPFIX_FIELD_collectionTimeMilliseconds = 258 + IPFIX_FIELD_exportSctpStreamId = 259 + IPFIX_FIELD_maxExportSeconds = 260 + IPFIX_FIELD_maxFlowEndSeconds = 261 + IPFIX_FIELD_messageMD5Checksum = 262 + IPFIX_FIELD_messageScope = 263 + IPFIX_FIELD_minExportSeconds = 264 + IPFIX_FIELD_minFlowStartSeconds = 265 + IPFIX_FIELD_opaqueOctets = 266 + IPFIX_FIELD_sessionScope = 267 + IPFIX_FIELD_maxFlowEndMicroseconds = 268 + IPFIX_FIELD_maxFlowEndMilliseconds = 269 + IPFIX_FIELD_maxFlowEndNanoseconds = 270 + IPFIX_FIELD_minFlowStartMicroseconds = 271 + IPFIX_FIELD_minFlowStartMilliseconds = 272 + IPFIX_FIELD_minFlowStartNanoseconds = 273 + IPFIX_FIELD_collectorCertificate = 274 + IPFIX_FIELD_exporterCertificate = 275 + IPFIX_FIELD_dataRecordsReliability = 276 + IPFIX_FIELD_observationPointType = 277 + IPFIX_FIELD_newConnectionDeltaCount = 278 + IPFIX_FIELD_connectionSumDurationSeconds = 279 + IPFIX_FIELD_connectionTransactionId = 280 + IPFIX_FIELD_postNATSourceIPv6Address = 281 + IPFIX_FIELD_postNATDestinationIPv6Address = 282 + IPFIX_FIELD_natPoolId = 283 + IPFIX_FIELD_natPoolName = 284 + IPFIX_FIELD_anonymizationFlags = 285 + IPFIX_FIELD_anonymizationTechnique = 286 + IPFIX_FIELD_informationElementIndex = 287 + IPFIX_FIELD_p2pTechnology = 288 + IPFIX_FIELD_tunnelTechnology = 289 + IPFIX_FIELD_encryptedTechnology = 290 + IPFIX_FIELD_basicList = 291 + IPFIX_FIELD_subTemplateList = 292 + IPFIX_FIELD_subTemplateMultiList = 293 + IPFIX_FIELD_bgpValidityState = 294 + IPFIX_FIELD_IPSecSPI = 295 + IPFIX_FIELD_greKey = 296 + IPFIX_FIELD_natType = 297 + IPFIX_FIELD_initiatorPackets = 298 + IPFIX_FIELD_responderPackets = 299 + IPFIX_FIELD_observationDomainName = 300 + IPFIX_FIELD_selectionSequenceId = 301 + IPFIX_FIELD_selectorId = 302 + IPFIX_FIELD_informationElementId = 303 + IPFIX_FIELD_selectorAlgorithm = 304 + IPFIX_FIELD_samplingPacketInterval = 305 + IPFIX_FIELD_samplingPacketSpace = 306 + IPFIX_FIELD_samplingTimeInterval = 307 + IPFIX_FIELD_samplingTimeSpace = 308 + IPFIX_FIELD_samplingSize = 309 + IPFIX_FIELD_samplingPopulation = 310 + IPFIX_FIELD_samplingProbability = 311 + IPFIX_FIELD_dataLinkFrameSize = 312 + IPFIX_FIELD_ipHeaderPacketSection = 313 + IPFIX_FIELD_ipPayloadPacketSection = 314 + IPFIX_FIELD_dataLinkFrameSection = 315 + IPFIX_FIELD_mplsLabelStackSection = 316 + IPFIX_FIELD_mplsPayloadPacketSection = 317 + IPFIX_FIELD_selectorIdTotalPktsObserved = 318 + IPFIX_FIELD_selectorIdTotalPktsSelected = 319 + IPFIX_FIELD_absoluteError = 320 + IPFIX_FIELD_relativeError = 321 + IPFIX_FIELD_observationTimeSeconds = 322 + IPFIX_FIELD_observationTimeMilliseconds = 323 + IPFIX_FIELD_observationTimeMicroseconds = 324 + IPFIX_FIELD_observationTimeNanoseconds = 325 + IPFIX_FIELD_digestHashValue = 326 + IPFIX_FIELD_hashIPPayloadOffset = 327 + IPFIX_FIELD_hashIPPayloadSize = 328 + IPFIX_FIELD_hashOutputRangeMin = 329 + IPFIX_FIELD_hashOutputRangeMax = 330 + IPFIX_FIELD_hashSelectedRangeMin = 331 + IPFIX_FIELD_hashSelectedRangeMax = 332 + IPFIX_FIELD_hashDigestOutput = 333 + IPFIX_FIELD_hashInitialiserValue = 334 + IPFIX_FIELD_selectorName = 335 + IPFIX_FIELD_upperCILimit = 336 + IPFIX_FIELD_lowerCILimit = 337 + IPFIX_FIELD_confidenceLevel = 338 + IPFIX_FIELD_informationElementDataType = 339 + IPFIX_FIELD_informationElementDescription = 340 + IPFIX_FIELD_informationElementName = 341 + IPFIX_FIELD_informationElementRangeBegin = 342 + IPFIX_FIELD_informationElementRangeEnd = 343 + IPFIX_FIELD_informationElementSemantics = 344 + IPFIX_FIELD_informationElementUnits = 345 + IPFIX_FIELD_privateEnterpriseNumber = 346 + IPFIX_FIELD_virtualStationInterfaceId = 347 + IPFIX_FIELD_virtualStationInterfaceName = 348 + IPFIX_FIELD_virtualStationUUID = 349 + IPFIX_FIELD_virtualStationName = 350 + IPFIX_FIELD_layer2SegmentId = 351 + IPFIX_FIELD_layer2OctetDeltaCount = 352 + IPFIX_FIELD_layer2OctetTotalCount = 353 + IPFIX_FIELD_ingressUnicastPacketTotalCount = 354 + IPFIX_FIELD_ingressMulticastPacketTotalCount = 355 + IPFIX_FIELD_ingressBroadcastPacketTotalCount = 356 + IPFIX_FIELD_egressUnicastPacketTotalCount = 357 + IPFIX_FIELD_egressBroadcastPacketTotalCount = 358 + IPFIX_FIELD_monitoringIntervalStartMilliSeconds = 359 + IPFIX_FIELD_monitoringIntervalEndMilliSeconds = 360 + IPFIX_FIELD_portRangeStart = 361 + IPFIX_FIELD_portRangeEnd = 362 + IPFIX_FIELD_portRangeStepSize = 363 + IPFIX_FIELD_portRangeNumPorts = 364 + IPFIX_FIELD_staMacAddress = 365 + IPFIX_FIELD_staIPv4Address = 366 + IPFIX_FIELD_wtpMacAddress = 367 + IPFIX_FIELD_ingressInterfaceType = 368 + IPFIX_FIELD_egressInterfaceType = 369 + IPFIX_FIELD_rtpSequenceNumber = 370 + IPFIX_FIELD_userName = 371 + IPFIX_FIELD_applicationCategoryName = 372 + IPFIX_FIELD_applicationSubCategoryName = 373 + IPFIX_FIELD_applicationGroupName = 374 + IPFIX_FIELD_originalFlowsPresent = 375 + IPFIX_FIELD_originalFlowsInitiated = 376 + IPFIX_FIELD_originalFlowsCompleted = 377 + IPFIX_FIELD_distinctCountOfSourceIPAddress = 378 + IPFIX_FIELD_distinctCountOfDestinationIPAddress = 379 + IPFIX_FIELD_distinctCountOfSourceIPv4Address = 380 + IPFIX_FIELD_distinctCountOfDestinationIPv4Address = 381 + IPFIX_FIELD_distinctCountOfSourceIPv6Address = 382 + IPFIX_FIELD_distinctCountOfDestinationIPv6Address = 383 + IPFIX_FIELD_valueDistributionMethod = 384 + IPFIX_FIELD_rfc3550JitterMilliseconds = 385 + IPFIX_FIELD_rfc3550JitterMicroseconds = 386 + IPFIX_FIELD_rfc3550JitterNanoseconds = 387 + IPFIX_FIELD_dot1qDEI = 388 + IPFIX_FIELD_dot1qCustomerDEI = 389 + IPFIX_FIELD_flowSelectorAlgorithm = 390 + IPFIX_FIELD_flowSelectedOctetDeltaCount = 391 + IPFIX_FIELD_flowSelectedPacketDeltaCount = 392 + IPFIX_FIELD_flowSelectedFlowDeltaCount = 393 + IPFIX_FIELD_selectorIDTotalFlowsObserved = 394 + IPFIX_FIELD_selectorIDTotalFlowsSelected = 395 + IPFIX_FIELD_samplingFlowInterval = 396 + IPFIX_FIELD_samplingFlowSpacing = 397 + IPFIX_FIELD_flowSamplingTimeInterval = 398 + IPFIX_FIELD_flowSamplingTimeSpacing = 399 + IPFIX_FIELD_hashFlowDomain = 400 + IPFIX_FIELD_transportOctetDeltaCount = 401 + IPFIX_FIELD_transportPacketDeltaCount = 402 + IPFIX_FIELD_originalExporterIPv4Address = 403 + IPFIX_FIELD_originalExporterIPv6Address = 404 + IPFIX_FIELD_originalObservationDomainId = 405 + IPFIX_FIELD_intermediateProcessId = 406 + IPFIX_FIELD_ignoredDataRecordTotalCount = 407 + IPFIX_FIELD_dataLinkFrameType = 408 + IPFIX_FIELD_sectionOffset = 409 + IPFIX_FIELD_sectionExportedOctets = 410 + IPFIX_FIELD_dot1qServiceInstanceTag = 411 + IPFIX_FIELD_dot1qServiceInstanceId = 412 + IPFIX_FIELD_dot1qServiceInstancePriority = 413 + IPFIX_FIELD_dot1qCustomerSourceMacAddress = 414 + IPFIX_FIELD_dot1qCustomerDestinationMacAddress = 415 + IPFIX_FIELD_postLayer2OctetDeltaCount = 417 + IPFIX_FIELD_postMCastLayer2OctetDeltaCount = 418 + IPFIX_FIELD_postLayer2OctetTotalCount = 420 + IPFIX_FIELD_postMCastLayer2OctetTotalCount = 421 + IPFIX_FIELD_minimumLayer2TotalLength = 422 + IPFIX_FIELD_maximumLayer2TotalLength = 423 + IPFIX_FIELD_droppedLayer2OctetDeltaCount = 424 + IPFIX_FIELD_droppedLayer2OctetTotalCount = 425 + IPFIX_FIELD_ignoredLayer2OctetTotalCount = 426 + IPFIX_FIELD_notSentLayer2OctetTotalCount = 427 + IPFIX_FIELD_layer2OctetDeltaSumOfSquares = 428 + IPFIX_FIELD_layer2OctetTotalSumOfSquares = 429 + IPFIX_FIELD_layer2FrameDeltaCount = 430 + IPFIX_FIELD_layer2FrameTotalCount = 431 + IPFIX_FIELD_pseudoWireDestinationIPv4Address = 432 + IPFIX_FIELD_ignoredLayer2FrameTotalCount = 433 + IPFIX_FIELD_mibObjectValueInteger = 434 + IPFIX_FIELD_mibObjectValueOctetString = 435 + IPFIX_FIELD_mibObjectValueOID = 436 + IPFIX_FIELD_mibObjectValueBits = 437 + IPFIX_FIELD_mibObjectValueIPAddress = 438 + IPFIX_FIELD_mibObjectValueCounter = 439 + IPFIX_FIELD_mibObjectValueGauge = 440 + IPFIX_FIELD_mibObjectValueTimeTicks = 441 + IPFIX_FIELD_mibObjectValueUnsigned = 442 + IPFIX_FIELD_mibObjectValueTable = 443 + IPFIX_FIELD_mibObjectValueRow = 444 + IPFIX_FIELD_mibObjectIdentifier = 445 + IPFIX_FIELD_mibSubIdentifier = 446 + IPFIX_FIELD_mibIndexIndicator = 447 + IPFIX_FIELD_mibCaptureTimeSemantics = 448 + IPFIX_FIELD_mibContextEngineID = 449 + IPFIX_FIELD_mibContextName = 450 + IPFIX_FIELD_mibObjectName = 451 + IPFIX_FIELD_mibObjectDescription = 452 + IPFIX_FIELD_mibObjectSyntax = 453 + IPFIX_FIELD_mibModuleName = 454 + IPFIX_FIELD_mobileIMSI = 455 + IPFIX_FIELD_mobileMSISDN = 456 + IPFIX_FIELD_httpStatusCode = 457 + IPFIX_FIELD_sourceTransportPortsLimit = 458 + IPFIX_FIELD_httpRequestMethod = 459 + IPFIX_FIELD_httpRequestHost = 460 + IPFIX_FIELD_httpRequestTarget = 461 + IPFIX_FIELD_httpMessageVersion = 462 + IPFIX_FIELD_natInstanceID = 463 + IPFIX_FIELD_internalAddressRealm = 464 + IPFIX_FIELD_externalAddressRealm = 465 + IPFIX_FIELD_natQuotaExceededEvent = 466 + IPFIX_FIELD_natThresholdEvent = 467 +) + +type IPFIXPacket struct { + Version uint16 + Length uint16 + ExportTime uint32 + SequenceNumber uint32 + ObservationDomainId uint32 + FlowSets []interface{} +} + +type IPFIXOptionsTemplateFlowSet struct { + FlowSetHeader + Records []IPFIXOptionsTemplateRecord +} + +type IPFIXOptionsTemplateRecord struct { + TemplateId uint16 + FieldCount uint16 + ScopeFieldCount uint16 + Options []Field + Scopes []Field +} + +func IPFIXTypeToString(typeId uint16) string { + + nameList := map[uint16]string{ + 0: "Reserved", + 1: "octetDeltaCount", + 2: "packetDeltaCount", + 3: "deltaFlowCount", + 4: "protocolIdentifier", + 5: "ipClassOfService", + 6: "tcpControlBits", + 7: "sourceTransportPort", + 8: "sourceIPv4Address", + 9: "sourceIPv4PrefixLength", + 10: "ingressInterface", + 11: "destinationTransportPort", + 12: "destinationIPv4Address", + 13: "destinationIPv4PrefixLength", + 14: "egressInterface", + 15: "ipNextHopIPv4Address", + 16: "bgpSourceAsNumber", + 17: "bgpDestinationAsNumber", + 18: "bgpNextHopIPv4Address", + 19: "postMCastPacketDeltaCount", + 20: "postMCastOctetDeltaCount", + 21: "flowEndSysUpTime", + 22: "flowStartSysUpTime", + 23: "postOctetDeltaCount", + 24: "postPacketDeltaCount", + 25: "minimumIpTotalLength", + 26: "maximumIpTotalLength", + 27: "sourceIPv6Address", + 28: "destinationIPv6Address", + 29: "sourceIPv6PrefixLength", + 30: "destinationIPv6PrefixLength", + 31: "flowLabelIPv6", + 32: "icmpTypeCodeIPv4", + 33: "igmpType", + 34: "samplingInterval", + 35: "samplingAlgorithm", + 36: "flowActiveTimeout", + 37: "flowIdleTimeout", + 38: "engineType", + 39: "engineId", + 40: "exportedOctetTotalCount", + 41: "exportedMessageTotalCount", + 42: "exportedFlowRecordTotalCount", + 43: "ipv4RouterSc", + 44: "sourceIPv4Prefix", + 45: "destinationIPv4Prefix", + 46: "mplsTopLabelType", + 47: "mplsTopLabelIPv4Address", + 48: "samplerId", + 49: "samplerMode", + 50: "samplerRandomInterval", + 51: "classId", + 52: "minimumTTL", + 53: "maximumTTL", + 54: "fragmentIdentification", + 55: "postIpClassOfService", + 56: "sourceMacAddress", + 57: "postDestinationMacAddress", + 58: "vlanId", + 59: "postVlanId", + 60: "ipVersion", + 61: "flowDirection", + 62: "ipNextHopIPv6Address", + 63: "bgpNextHopIPv6Address", + 64: "ipv6ExtensionHeaders", + 65: "Assigned for NetFlow v9 compatibility", + 66: "Assigned for NetFlow v9 compatibility", + 67: "Assigned for NetFlow v9 compatibility", + 68: "Assigned for NetFlow v9 compatibility", + 69: "Assigned for NetFlow v9 compatibility", + 70: "mplsTopLabelStackSection", + 71: "mplsLabelStackSection2", + 72: "mplsLabelStackSection3", + 73: "mplsLabelStackSection4", + 74: "mplsLabelStackSection5", + 75: "mplsLabelStackSection6", + 76: "mplsLabelStackSection7", + 77: "mplsLabelStackSection8", + 78: "mplsLabelStackSection9", + 79: "mplsLabelStackSection10", + 80: "destinationMacAddress", + 81: "postSourceMacAddress", + 82: "interfaceName", + 83: "interfaceDescription", + 84: "samplerName", + 85: "octetTotalCount", + 86: "packetTotalCount", + 87: "flagsAndSamplerId", + 88: "fragmentOffset", + 89: "forwardingStatus", + 90: "mplsVpnRouteDistinguisher", + 91: "mplsTopLabelPrefixLength", + 92: "srcTrafficIndex", + 93: "dstTrafficIndex", + 94: "applicationDescription", + 95: "applicationId", + 96: "applicationName", + 97: "Assigned for NetFlow v9 compatibility", + 98: "postIpDiffServCodePoint", + 99: "multicastReplicationFactor", + 100: "className", + 101: "classificationEngineId", + 102: "layer2packetSectionOffset", + 103: "layer2packetSectionSize", + 104: "layer2packetSectionData", + 128: "bgpNextAdjacentAsNumber", + 129: "bgpPrevAdjacentAsNumber", + 130: "exporterIPv4Address", + 131: "exporterIPv6Address", + 132: "droppedOctetDeltaCount", + 133: "droppedPacketDeltaCount", + 134: "droppedOctetTotalCount", + 135: "droppedPacketTotalCount", + 136: "flowEndReason", + 137: "commonPropertiesId", + 138: "observationPointId", + 139: "icmpTypeCodeIPv6", + 140: "mplsTopLabelIPv6Address", + 141: "lineCardId", + 142: "portId", + 143: "meteringProcessId", + 144: "exportingProcessId", + 145: "templateId", + 146: "wlanChannelId", + 147: "wlanSSID", + 148: "flowId", + 149: "observationDomainId", + 150: "flowStartSeconds", + 151: "flowEndSeconds", + 152: "flowStartMilliseconds", + 153: "flowEndMilliseconds", + 154: "flowStartMicroseconds", + 155: "flowEndMicroseconds", + 156: "flowStartNanoseconds", + 157: "flowEndNanoseconds", + 158: "flowStartDeltaMicroseconds", + 159: "flowEndDeltaMicroseconds", + 160: "systemInitTimeMilliseconds", + 161: "flowDurationMilliseconds", + 162: "flowDurationMicroseconds", + 163: "observedFlowTotalCount", + 164: "ignoredPacketTotalCount", + 165: "ignoredOctetTotalCount", + 166: "notSentFlowTotalCount", + 167: "notSentPacketTotalCount", + 168: "notSentOctetTotalCount", + 169: "destinationIPv6Prefix", + 170: "sourceIPv6Prefix", + 171: "postOctetTotalCount", + 172: "postPacketTotalCount", + 173: "flowKeyIndicator", + 174: "postMCastPacketTotalCount", + 175: "postMCastOctetTotalCount", + 176: "icmpTypeIPv4", + 177: "icmpCodeIPv4", + 178: "icmpTypeIPv6", + 179: "icmpCodeIPv6", + 180: "udpSourcePort", + 181: "udpDestinationPort", + 182: "tcpSourcePort", + 183: "tcpDestinationPort", + 184: "tcpSequenceNumber", + 185: "tcpAcknowledgementNumber", + 186: "tcpWindowSize", + 187: "tcpUrgentPointer", + 188: "tcpHeaderLength", + 189: "ipHeaderLength", + 190: "totalLengthIPv4", + 191: "payloadLengthIPv6", + 192: "ipTTL", + 193: "nextHeaderIPv6", + 194: "mplsPayloadLength", + 195: "ipDiffServCodePoint", + 196: "ipPrecedence", + 197: "fragmentFlags", + 198: "octetDeltaSumOfSquares", + 199: "octetTotalSumOfSquares", + 200: "mplsTopLabelTTL", + 201: "mplsLabelStackLength", + 202: "mplsLabelStackDepth", + 203: "mplsTopLabelExp", + 204: "ipPayloadLength", + 205: "udpMessageLength", + 206: "isMulticast", + 207: "ipv4IHL", + 208: "ipv4Options", + 209: "tcpOptions", + 210: "paddingOctets", + 211: "collectorIPv4Address", + 212: "collectorIPv6Address", + 213: "exportInterface", + 214: "exportProtocolVersion", + 215: "exportTransportProtocol", + 216: "collectorTransportPort", + 217: "exporterTransportPort", + 218: "tcpSynTotalCount", + 219: "tcpFinTotalCount", + 220: "tcpRstTotalCount", + 221: "tcpPshTotalCount", + 222: "tcpAckTotalCount", + 223: "tcpUrgTotalCount", + 224: "ipTotalLength", + 225: "postNATSourceIPv4Address", + 226: "postNATDestinationIPv4Address", + 227: "postNAPTSourceTransportPort", + 228: "postNAPTDestinationTransportPort", + 229: "natOriginatingAddressRealm", + 230: "natEvent", + 231: "initiatorOctets", + 232: "responderOctets", + 233: "firewallEvent", + 234: "ingressVRFID", + 235: "egressVRFID", + 236: "VRFname", + 237: "postMplsTopLabelExp", + 238: "tcpWindowScale", + 239: "biflowDirection", + 240: "ethernetHeaderLength", + 241: "ethernetPayloadLength", + 242: "ethernetTotalLength", + 243: "dot1qVlanId", + 244: "dot1qPriority", + 245: "dot1qCustomerVlanId", + 246: "dot1qCustomerPriority", + 247: "metroEvcId", + 248: "metroEvcType", + 249: "pseudoWireId", + 250: "pseudoWireType", + 251: "pseudoWireControlWord", + 252: "ingressPhysicalInterface", + 253: "egressPhysicalInterface", + 254: "postDot1qVlanId", + 255: "postDot1qCustomerVlanId", + 256: "ethernetType", + 257: "postIpPrecedence", + 258: "collectionTimeMilliseconds", + 259: "exportSctpStreamId", + 260: "maxExportSeconds", + 261: "maxFlowEndSeconds", + 262: "messageMD5Checksum", + 263: "messageScope", + 264: "minExportSeconds", + 265: "minFlowStartSeconds", + 266: "opaqueOctets", + 267: "sessionScope", + 268: "maxFlowEndMicroseconds", + 269: "maxFlowEndMilliseconds", + 270: "maxFlowEndNanoseconds", + 271: "minFlowStartMicroseconds", + 272: "minFlowStartMilliseconds", + 273: "minFlowStartNanoseconds", + 274: "collectorCertificate", + 275: "exporterCertificate", + 276: "dataRecordsReliability", + 277: "observationPointType", + 278: "newConnectionDeltaCount", + 279: "connectionSumDurationSeconds", + 280: "connectionTransactionId", + 281: "postNATSourceIPv6Address", + 282: "postNATDestinationIPv6Address", + 283: "natPoolId", + 284: "natPoolName", + 285: "anonymizationFlags", + 286: "anonymizationTechnique", + 287: "informationElementIndex", + 288: "p2pTechnology", + 289: "tunnelTechnology", + 290: "encryptedTechnology", + 291: "basicList", + 292: "subTemplateList", + 293: "subTemplateMultiList", + 294: "bgpValidityState", + 295: "IPSecSPI", + 296: "greKey", + 297: "natType", + 298: "initiatorPackets", + 299: "responderPackets", + 300: "observationDomainName", + 301: "selectionSequenceId", + 302: "selectorId", + 303: "informationElementId", + 304: "selectorAlgorithm", + 305: "samplingPacketInterval", + 306: "samplingPacketSpace", + 307: "samplingTimeInterval", + 308: "samplingTimeSpace", + 309: "samplingSize", + 310: "samplingPopulation", + 311: "samplingProbability", + 312: "dataLinkFrameSize", + 313: "ipHeaderPacketSection", + 314: "ipPayloadPacketSection", + 315: "dataLinkFrameSection", + 316: "mplsLabelStackSection", + 317: "mplsPayloadPacketSection", + 318: "selectorIdTotalPktsObserved", + 319: "selectorIdTotalPktsSelected", + 320: "absoluteError", + 321: "relativeError", + 322: "observationTimeSeconds", + 323: "observationTimeMilliseconds", + 324: "observationTimeMicroseconds", + 325: "observationTimeNanoseconds", + 326: "digestHashValue", + 327: "hashIPPayloadOffset", + 328: "hashIPPayloadSize", + 329: "hashOutputRangeMin", + 330: "hashOutputRangeMax", + 331: "hashSelectedRangeMin", + 332: "hashSelectedRangeMax", + 333: "hashDigestOutput", + 334: "hashInitialiserValue", + 335: "selectorName", + 336: "upperCILimit", + 337: "lowerCILimit", + 338: "confidenceLevel", + 339: "informationElementDataType", + 340: "informationElementDescription", + 341: "informationElementName", + 342: "informationElementRangeBegin", + 343: "informationElementRangeEnd", + 344: "informationElementSemantics", + 345: "informationElementUnits", + 346: "privateEnterpriseNumber", + 347: "virtualStationInterfaceId", + 348: "virtualStationInterfaceName", + 349: "virtualStationUUID", + 350: "virtualStationName", + 351: "layer2SegmentId", + 352: "layer2OctetDeltaCount", + 353: "layer2OctetTotalCount", + 354: "ingressUnicastPacketTotalCount", + 355: "ingressMulticastPacketTotalCount", + 356: "ingressBroadcastPacketTotalCount", + 357: "egressUnicastPacketTotalCount", + 358: "egressBroadcastPacketTotalCount", + 359: "monitoringIntervalStartMilliSeconds", + 360: "monitoringIntervalEndMilliSeconds", + 361: "portRangeStart", + 362: "portRangeEnd", + 363: "portRangeStepSize", + 364: "portRangeNumPorts", + 365: "staMacAddress", + 366: "staIPv4Address", + 367: "wtpMacAddress", + 368: "ingressInterfaceType", + 369: "egressInterfaceType", + 370: "rtpSequenceNumber", + 371: "userName", + 372: "applicationCategoryName", + 373: "applicationSubCategoryName", + 374: "applicationGroupName", + 375: "originalFlowsPresent", + 376: "originalFlowsInitiated", + 377: "originalFlowsCompleted", + 378: "distinctCountOfSourceIPAddress", + 379: "distinctCountOfDestinationIPAddress", + 380: "distinctCountOfSourceIPv4Address", + 381: "distinctCountOfDestinationIPv4Address", + 382: "distinctCountOfSourceIPv6Address", + 383: "distinctCountOfDestinationIPv6Address", + 384: "valueDistributionMethod", + 385: "rfc3550JitterMilliseconds", + 386: "rfc3550JitterMicroseconds", + 387: "rfc3550JitterNanoseconds", + 388: "dot1qDEI", + 389: "dot1qCustomerDEI", + 390: "flowSelectorAlgorithm", + 391: "flowSelectedOctetDeltaCount", + 392: "flowSelectedPacketDeltaCount", + 393: "flowSelectedFlowDeltaCount", + 394: "selectorIDTotalFlowsObserved", + 395: "selectorIDTotalFlowsSelected", + 396: "samplingFlowInterval", + 397: "samplingFlowSpacing", + 398: "flowSamplingTimeInterval", + 399: "flowSamplingTimeSpacing", + 400: "hashFlowDomain", + 401: "transportOctetDeltaCount", + 402: "transportPacketDeltaCount", + 403: "originalExporterIPv4Address", + 404: "originalExporterIPv6Address", + 405: "originalObservationDomainId", + 406: "intermediateProcessId", + 407: "ignoredDataRecordTotalCount", + 408: "dataLinkFrameType", + 409: "sectionOffset", + 410: "sectionExportedOctets", + 411: "dot1qServiceInstanceTag", + 412: "dot1qServiceInstanceId", + 413: "dot1qServiceInstancePriority", + 414: "dot1qCustomerSourceMacAddress", + 415: "dot1qCustomerDestinationMacAddress", + 416: "", + 417: "postLayer2OctetDeltaCount", + 418: "postMCastLayer2OctetDeltaCount", + 419: "", + 420: "postLayer2OctetTotalCount", + 421: "postMCastLayer2OctetTotalCount", + 422: "minimumLayer2TotalLength", + 423: "maximumLayer2TotalLength", + 424: "droppedLayer2OctetDeltaCount", + 425: "droppedLayer2OctetTotalCount", + 426: "ignoredLayer2OctetTotalCount", + 427: "notSentLayer2OctetTotalCount", + 428: "layer2OctetDeltaSumOfSquares", + 429: "layer2OctetTotalSumOfSquares", + 430: "layer2FrameDeltaCount", + 431: "layer2FrameTotalCount", + 432: "pseudoWireDestinationIPv4Address", + 433: "ignoredLayer2FrameTotalCount", + 434: "mibObjectValueInteger", + 435: "mibObjectValueOctetString", + 436: "mibObjectValueOID", + 437: "mibObjectValueBits", + 438: "mibObjectValueIPAddress", + 439: "mibObjectValueCounter", + 440: "mibObjectValueGauge", + 441: "mibObjectValueTimeTicks", + 442: "mibObjectValueUnsigned", + 443: "mibObjectValueTable", + 444: "mibObjectValueRow", + 445: "mibObjectIdentifier", + 446: "mibSubIdentifier", + 447: "mibIndexIndicator", + 448: "mibCaptureTimeSemantics", + 449: "mibContextEngineID", + 450: "mibContextName", + 451: "mibObjectName", + 452: "mibObjectDescription", + 453: "mibObjectSyntax", + 454: "mibModuleName", + 455: "mobileIMSI", + 456: "mobileMSISDN", + 457: "httpStatusCode", + 458: "sourceTransportPortsLimit", + 459: "httpRequestMethod", + 460: "httpRequestHost", + 461: "httpRequestTarget", + 462: "httpMessageVersion", + 463: "natInstanceID", + 464: "internalAddressRealm", + 465: "externalAddressRealm", + 466: "natQuotaExceededEvent", + 467: "natThresholdEvent", + } + + if typeId >= 105 && typeId <= 127 { + return "Assigned for NetFlow v9 compatibility" + } else if typeId >= 468 && typeId <= 32767 { + return "Unassigned" + } else { + return nameList[typeId] + } +} + +func (flowSet IPFIXOptionsTemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) + str += fmt.Sprintf(" ScopeFieldCount: %v\n", record.ScopeFieldCount) + + str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes)) + + for k, field := range record.Scopes { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.Options)) + + for k, field := range record.Options { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + + } + + return str +} + +func (p IPFIXPacket) String() string { + str := "Flow Packet\n" + str += "------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Length: %v\n", p.Length) + + exportTime := time.Unix(int64(p.ExportTime), 0) + str += fmt.Sprintf(" ExportTime: %v\n", exportTime.String()) + str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber) + str += fmt.Sprintf(" ObservationDomainId: %v\n", p.ObservationDomainId) + str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets)) + + for i, flowSet := range p.FlowSets { + switch flowSet := flowSet.(type) { + case TemplateFlowSet: + str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case IPFIXOptionsTemplateFlowSet: + str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case DataFlowSet: + str += fmt.Sprintf(" - DataFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString) + case OptionsDataFlowSet: + str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) + str += flowSet.String(IPFIXTypeToString, IPFIXTypeToString) + default: + str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet) + } + } + + return str +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go new file mode 100644 index 000000000..8191b632f --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/netflow.go @@ -0,0 +1,537 @@ +package netflow + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "sync" + + "github.com/netsampler/goflow2/decoders/netflow/templates" + "github.com/netsampler/goflow2/decoders/utils" +) + +type FlowBaseTemplateSet map[uint16]map[uint32]map[uint16]interface{} + +type NetFlowTemplateSystem interface { + GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) + AddTemplate(version uint16, obsDomainId uint32, template interface{}) +} + +// Transition structure to ease the conversion with the new template systems +type TemplateWrapper struct { + Ctx context.Context + Key string + Inner templates.TemplateInterface +} + +func (w *TemplateWrapper) getTemplateId(template interface{}) (templateId uint16) { + switch templateIdConv := template.(type) { + case IPFIXOptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case NFv9OptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case TemplateRecord: + templateId = templateIdConv.TemplateId + } + return templateId +} + +func (w TemplateWrapper) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { + return w.Inner.GetTemplate(w.Ctx, &templates.TemplateKey{w.Key, version, obsDomainId, templateId}) +} + +func (w TemplateWrapper) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { + w.Inner.AddTemplate(w.Ctx, &templates.TemplateKey{w.Key, version, obsDomainId, w.getTemplateId(template)}, template) +} + +func DecodeNFv9OptionsTemplateSet(payload *bytes.Buffer) ([]NFv9OptionsTemplateRecord, error) { + var records []NFv9OptionsTemplateRecord + var err error + for payload.Len() >= 4 { + optsTemplateRecord := NFv9OptionsTemplateRecord{} + err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.ScopeLength, &optsTemplateRecord.OptionLength) + if err != nil { + return records, err + } + + sizeScope := int(optsTemplateRecord.ScopeLength) / 4 + sizeOptions := int(optsTemplateRecord.OptionLength) / 4 + if sizeScope < 0 || sizeOptions < 0 { + return records, fmt.Errorf("Error decoding OptionsTemplateSet: negative length.") + } + + fields := make([]Field, sizeScope) + for i := 0; i < sizeScope; i++ { + field := Field{} + if err := DecodeField(payload, &field, false); err != nil { + return records, err + } + fields[i] = field + } + optsTemplateRecord.Scopes = fields + + fields = make([]Field, sizeOptions) + for i := 0; i < sizeOptions; i++ { + field := Field{} + if err := DecodeField(payload, &field, false); err != nil { + return records, err + } + fields[i] = field + } + optsTemplateRecord.Options = fields + + records = append(records, optsTemplateRecord) + } + + return records, err +} + +func DecodeField(payload *bytes.Buffer, field *Field, pen bool) error { + err := utils.BinaryDecoder(payload, &field.Type, &field.Length) + if pen && err == nil && field.Type&0x8000 != 0 { + field.PenProvided = true + err = utils.BinaryDecoder(payload, &field.Pen) + } + return err +} + +func DecodeIPFIXOptionsTemplateSet(payload *bytes.Buffer) ([]IPFIXOptionsTemplateRecord, error) { + var records []IPFIXOptionsTemplateRecord + var err error + for payload.Len() >= 4 { + optsTemplateRecord := IPFIXOptionsTemplateRecord{} + err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.FieldCount, &optsTemplateRecord.ScopeFieldCount) + if err != nil { + return records, err + } + + fields := make([]Field, int(optsTemplateRecord.ScopeFieldCount)) + for i := 0; i < int(optsTemplateRecord.ScopeFieldCount); i++ { + field := Field{} + if err := DecodeField(payload, &field, true); err != nil { + return records, err + } + fields[i] = field + } + optsTemplateRecord.Scopes = fields + + optionsSize := int(optsTemplateRecord.FieldCount) - int(optsTemplateRecord.ScopeFieldCount) + if optionsSize < 0 { + return records, fmt.Errorf("Error decoding OptionsTemplateSet: negative length.") + } + fields = make([]Field, optionsSize) + for i := 0; i < optionsSize; i++ { + field := Field{} + if err := DecodeField(payload, &field, true); err != nil { + return records, err + } + fields[i] = field + } + optsTemplateRecord.Options = fields + + records = append(records, optsTemplateRecord) + } + + return records, nil +} + +func DecodeTemplateSet(version uint16, payload *bytes.Buffer) ([]TemplateRecord, error) { + var records []TemplateRecord + var err error + for payload.Len() >= 4 { + templateRecord := TemplateRecord{} + err = utils.BinaryDecoder(payload, &templateRecord.TemplateId, &templateRecord.FieldCount) + if err != nil { + return records, err + } + + if int(templateRecord.FieldCount) < 0 { + return records, fmt.Errorf("Error decoding TemplateSet: zero count.") + } + + fields := make([]Field, int(templateRecord.FieldCount)) + for i := 0; i < int(templateRecord.FieldCount); i++ { + field := Field{} + err := utils.BinaryDecoder(payload, &field.Type, &field.Length) + if err == nil && version == 10 && field.Type&0x8000 != 0 { + field.PenProvided = true + field.Type = field.Type ^ 0x8000 + err = utils.BinaryDecoder(payload, &field.Pen) + } + if err != nil { + return records, err + } + fields[i] = field + } + templateRecord.Fields = fields + records = append(records, templateRecord) + } + + return records, nil +} + +func GetTemplateSize(version uint16, template []Field) int { + sum := 0 + for _, templateField := range template { + if templateField.Length == 0xffff { + continue + } + + sum += int(templateField.Length) + } + return sum +} + +func DecodeDataSetUsingFields(version uint16, payload *bytes.Buffer, listFields []Field) []DataField { + for payload.Len() >= GetTemplateSize(version, listFields) { + + dataFields := make([]DataField, len(listFields)) + for i, templateField := range listFields { + + finalLength := int(templateField.Length) + if templateField.Length == 0xffff { + var variableLen8 byte + var variableLen16 uint16 + err := utils.BinaryDecoder(payload, &variableLen8) + if err != nil { + return []DataField{} + } + if variableLen8 == 0xff { + err := utils.BinaryDecoder(payload, &variableLen16) + if err != nil { + return []DataField{} + } + finalLength = int(variableLen16) + } else { + finalLength = int(variableLen8) + } + } + + value := payload.Next(finalLength) + nfvalue := DataField{ + Type: templateField.Type, + PenProvided: templateField.PenProvided, + Pen: templateField.Pen, + Value: value, + } + dataFields[i] = nfvalue + } + return dataFields + } + return []DataField{} +} + +type ErrorTemplateNotFound struct { + version uint16 + obsDomainId uint32 + templateId uint16 + typeTemplate string +} + +func NewErrorTemplateNotFound(version uint16, obsDomainId uint32, templateId uint16, typeTemplate string) *ErrorTemplateNotFound { + return &ErrorTemplateNotFound{ + version: version, + obsDomainId: obsDomainId, + templateId: templateId, + typeTemplate: typeTemplate, + } +} + +func (e *ErrorTemplateNotFound) Error() string { + return fmt.Sprintf("No %v template %v found for and domain id %v", e.typeTemplate, e.templateId, e.obsDomainId) +} + +func DecodeOptionsDataSet(version uint16, payload *bytes.Buffer, listFieldsScopes, listFieldsOption []Field) ([]OptionsDataRecord, error) { + var records []OptionsDataRecord + + listFieldsScopesSize := GetTemplateSize(version, listFieldsScopes) + listFieldsOptionSize := GetTemplateSize(version, listFieldsOption) + + for payload.Len() >= listFieldsScopesSize+listFieldsOptionSize { + scopeValues := DecodeDataSetUsingFields(version, payload, listFieldsScopes) + optionValues := DecodeDataSetUsingFields(version, payload, listFieldsOption) + + record := OptionsDataRecord{ + ScopesValues: scopeValues, + OptionsValues: optionValues, + } + + records = append(records, record) + } + return records, nil +} + +func DecodeDataSet(version uint16, payload *bytes.Buffer, listFields []Field) ([]DataRecord, error) { + var records []DataRecord + + listFieldsSize := GetTemplateSize(version, listFields) + for payload.Len() >= listFieldsSize { + values := DecodeDataSetUsingFields(version, payload, listFields) + + record := DataRecord{ + Values: values, + } + + records = append(records, record) + } + return records, nil +} + +func (ts *BasicTemplateSystem) GetTemplates() map[uint16]map[uint32]map[uint16]interface{} { + ts.templateslock.RLock() + tmp := ts.templates + ts.templateslock.RUnlock() + return tmp +} + +func (ts *BasicTemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { + ts.templateslock.Lock() + defer ts.templateslock.Unlock() + _, exists := ts.templates[version] + if exists != true { + ts.templates[version] = make(map[uint32]map[uint16]interface{}) + } + _, exists = ts.templates[version][obsDomainId] + if exists != true { + ts.templates[version][obsDomainId] = make(map[uint16]interface{}) + } + var templateId uint16 + switch templateIdConv := template.(type) { + case IPFIXOptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case NFv9OptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case TemplateRecord: + templateId = templateIdConv.TemplateId + } + ts.templates[version][obsDomainId][templateId] = template +} + +func (ts *BasicTemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { + ts.templateslock.RLock() + defer ts.templateslock.RUnlock() + templatesVersion, okver := ts.templates[version] + if okver { + templatesObsDom, okobs := templatesVersion[obsDomainId] + if okobs { + template, okid := templatesObsDom[templateId] + if okid { + return template, nil + } + } + } + return nil, NewErrorTemplateNotFound(version, obsDomainId, templateId, "info") +} + +type BasicTemplateSystem struct { + templates FlowBaseTemplateSet + templateslock *sync.RWMutex +} + +func CreateTemplateSystem() *BasicTemplateSystem { + ts := &BasicTemplateSystem{ + templates: make(FlowBaseTemplateSet), + templateslock: &sync.RWMutex{}, + } + return ts +} + +func DecodeMessage(payload *bytes.Buffer, templates NetFlowTemplateSystem) (interface{}, error) { + return DecodeMessageContext(context.Background(), payload, "", templates) +} + +func DecodeMessageContext(ctx context.Context, payload *bytes.Buffer, templateKey string, tpli NetFlowTemplateSystem) (interface{}, error) { + var size uint16 + packetNFv9 := NFv9Packet{} + packetIPFIX := IPFIXPacket{} + var returnItem interface{} + + var version uint16 + var obsDomainId uint32 + if err := utils.BinaryRead(payload, binary.BigEndian, &version); err != nil { + return nil, fmt.Errorf("Error decoding version: %v", err) + } + + if version == 9 { + err := utils.BinaryDecoder(payload, &packetNFv9.Count, &packetNFv9.SystemUptime, &packetNFv9.UnixSeconds, &packetNFv9.SequenceNumber, &packetNFv9.SourceId) + if err != nil { + return nil, fmt.Errorf("Error decoding NetFlow v9 header: %v", err) + } + size = packetNFv9.Count + packetNFv9.Version = version + returnItem = *(&packetNFv9) + obsDomainId = packetNFv9.SourceId + } else if version == 10 { + err := utils.BinaryDecoder(payload, &packetIPFIX.Length, &packetIPFIX.ExportTime, &packetIPFIX.SequenceNumber, &packetIPFIX.ObservationDomainId) + if err != nil { + return nil, fmt.Errorf("Error decoding IPFIX header: %v", err) + } + size = packetIPFIX.Length + packetIPFIX.Version = version + returnItem = *(&packetIPFIX) + obsDomainId = packetIPFIX.ObservationDomainId + } else { + return nil, fmt.Errorf("NetFlow/IPFIX version error: %d", version) + } + read := 16 + startSize := payload.Len() + + for i := 0; ((i < int(size) && version == 9) || (uint16(read) < size && version == 10)) && payload.Len() > 0; i++ { + fsheader := FlowSetHeader{} + if err := utils.BinaryDecoder(payload, &fsheader.Id, &fsheader.Length); err != nil { + return returnItem, fmt.Errorf("Error decoding FlowSet header: %v", err) + } + + nextrelpos := int(fsheader.Length) - binary.Size(fsheader) + if nextrelpos < 0 { + return returnItem, fmt.Errorf("Error decoding packet: non-terminated stream") + } + + var flowSet interface{} + + if fsheader.Id == 0 && version == 9 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeTemplateSet(version, templateReader) + if err != nil { + return returnItem, fmt.Errorf("Error decoding FlowSet header: %v", err) + } + templatefs := TemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + + flowSet = templatefs + + if tpli != nil { + for _, record := range records { + tpli.AddTemplate(version, obsDomainId, record) + //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record) + } + } + + } else if fsheader.Id == 1 && version == 9 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeNFv9OptionsTemplateSet(templateReader) + if err != nil { + return returnItem, fmt.Errorf("Error decoding NetFlow OptionsTemplateSet: %v", err) + } + optsTemplatefs := NFv9OptionsTemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = optsTemplatefs + + if tpli != nil { + for _, record := range records { + tpli.AddTemplate(version, obsDomainId, record) + //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record) + } + } + + } else if fsheader.Id == 2 && version == 10 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeTemplateSet(version, templateReader) + if err != nil { + return returnItem, fmt.Errorf("Error decoding IPFIX TemplateSet: %v", err) + } + templatefs := TemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = templatefs + + if tpli != nil { + for _, record := range records { + tpli.AddTemplate(version, obsDomainId, record) + //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record) + } + } + + } else if fsheader.Id == 3 && version == 10 { + templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) + records, err := DecodeIPFIXOptionsTemplateSet(templateReader) + if err != nil { + return returnItem, fmt.Errorf("Error decoding IPFIX OptionsTemplateSet: %v", err) + } + optsTemplatefs := IPFIXOptionsTemplateFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = optsTemplatefs + + if tpli != nil { + for _, record := range records { + tpli.AddTemplate(version, obsDomainId, record) + //tpli.AddTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, record.TemplateId), record) + } + } + + } else if fsheader.Id >= 256 { + dataReader := bytes.NewBuffer(payload.Next(nextrelpos)) + + if tpli == nil { + continue + } + + template, err := tpli.GetTemplate(version, obsDomainId, fsheader.Id) + //template, err := tpli.GetTemplate(ctx, templates.NewTemplateKey(templateKey, version, obsDomainId, fsheader.Id)) + + if err == nil { + switch templatec := template.(type) { + case TemplateRecord: + records, err := DecodeDataSet(version, dataReader, templatec.Fields) + if err != nil { + return returnItem, fmt.Errorf("Error decoding DataSet: %v", err) + } + datafs := DataFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = datafs + case IPFIXOptionsTemplateRecord: + records, err := DecodeOptionsDataSet(version, dataReader, templatec.Scopes, templatec.Options) + if err != nil { + return returnItem, fmt.Errorf("Error decoding DataSet: %v", err) + } + + datafs := OptionsDataFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = datafs + case NFv9OptionsTemplateRecord: + records, err := DecodeOptionsDataSet(version, dataReader, templatec.Scopes, templatec.Options) + if err != nil { + return returnItem, fmt.Errorf("Error decoding OptionDataSet: %v", err) + } + + datafs := OptionsDataFlowSet{ + FlowSetHeader: fsheader, + Records: records, + } + flowSet = datafs + } + } else { + return returnItem, err + } + } else { + return returnItem, fmt.Errorf("Error with ID %d", fsheader.Id) + } + + if version == 9 && flowSet != nil { + packetNFv9.FlowSets = append(packetNFv9.FlowSets, flowSet) + } else if version == 10 && flowSet != nil { + packetIPFIX.FlowSets = append(packetIPFIX.FlowSets, flowSet) + } + read = startSize - payload.Len() + 16 + } + + if version == 9 { + return packetNFv9, nil + } else if version == 10 { + return packetIPFIX, nil + } else { + return returnItem, fmt.Errorf("Unknown version: %d", version) + } +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go new file mode 100644 index 000000000..64fe227d8 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/nfv9.go @@ -0,0 +1,317 @@ +package netflow + +import ( + "fmt" + "time" +) + +const ( + NFV9_FIELD_IN_BYTES = 1 + NFV9_FIELD_IN_PKTS = 2 + NFV9_FIELD_FLOWS = 3 + NFV9_FIELD_PROTOCOL = 4 + NFV9_FIELD_SRC_TOS = 5 + NFV9_FIELD_TCP_FLAGS = 6 + NFV9_FIELD_L4_SRC_PORT = 7 + NFV9_FIELD_IPV4_SRC_ADDR = 8 + NFV9_FIELD_SRC_MASK = 9 + NFV9_FIELD_INPUT_SNMP = 10 + NFV9_FIELD_L4_DST_PORT = 11 + NFV9_FIELD_IPV4_DST_ADDR = 12 + NFV9_FIELD_DST_MASK = 13 + NFV9_FIELD_OUTPUT_SNMP = 14 + NFV9_FIELD_IPV4_NEXT_HOP = 15 + NFV9_FIELD_SRC_AS = 16 + NFV9_FIELD_DST_AS = 17 + NFV9_FIELD_BGP_IPV4_NEXT_HOP = 18 + NFV9_FIELD_MUL_DST_PKTS = 19 + NFV9_FIELD_MUL_DST_BYTES = 20 + NFV9_FIELD_LAST_SWITCHED = 21 + NFV9_FIELD_FIRST_SWITCHED = 22 + NFV9_FIELD_OUT_BYTES = 23 + NFV9_FIELD_OUT_PKTS = 24 + NFV9_FIELD_MIN_PKT_LNGTH = 25 + NFV9_FIELD_MAX_PKT_LNGTH = 26 + NFV9_FIELD_IPV6_SRC_ADDR = 27 + NFV9_FIELD_IPV6_DST_ADDR = 28 + NFV9_FIELD_IPV6_SRC_MASK = 29 + NFV9_FIELD_IPV6_DST_MASK = 30 + NFV9_FIELD_IPV6_FLOW_LABEL = 31 + NFV9_FIELD_ICMP_TYPE = 32 + NFV9_FIELD_MUL_IGMP_TYPE = 33 + NFV9_FIELD_SAMPLING_INTERVAL = 34 + NFV9_FIELD_SAMPLING_ALGORITHM = 35 + NFV9_FIELD_FLOW_ACTIVE_TIMEOUT = 36 + NFV9_FIELD_FLOW_INACTIVE_TIMEOUT = 37 + NFV9_FIELD_ENGINE_TYPE = 38 + NFV9_FIELD_ENGINE_ID = 39 + NFV9_FIELD_TOTAL_BYTES_EXP = 40 + NFV9_FIELD_TOTAL_PKTS_EXP = 41 + NFV9_FIELD_TOTAL_FLOWS_EXP = 42 + NFV9_FIELD_IPV4_SRC_PREFIX = 44 + NFV9_FIELD_IPV4_DST_PREFIX = 45 + NFV9_FIELD_MPLS_TOP_LABEL_TYPE = 46 + NFV9_FIELD_MPLS_TOP_LABEL_IP_ADDR = 47 + NFV9_FIELD_FLOW_SAMPLER_ID = 48 + NFV9_FIELD_FLOW_SAMPLER_MODE = 49 + NFV9_FIELD_FLOW_SAMPLER_RANDOM_INTERVAL = 50 + NFV9_FIELD_MIN_TTL = 52 + NFV9_FIELD_MAX_TTL = 53 + NFV9_FIELD_IPV4_IDENT = 54 + NFV9_FIELD_DST_TOS = 55 + NFV9_FIELD_IN_SRC_MAC = 56 + NFV9_FIELD_OUT_DST_MAC = 57 + NFV9_FIELD_SRC_VLAN = 58 + NFV9_FIELD_DST_VLAN = 59 + NFV9_FIELD_IP_PROTOCOL_VERSION = 60 + NFV9_FIELD_DIRECTION = 61 + NFV9_FIELD_IPV6_NEXT_HOP = 62 + NFV9_FIELD_BGP_IPV6_NEXT_HOP = 63 + NFV9_FIELD_IPV6_OPTION_HEADERS = 64 + NFV9_FIELD_MPLS_LABEL_1 = 70 + NFV9_FIELD_MPLS_LABEL_2 = 71 + NFV9_FIELD_MPLS_LABEL_3 = 72 + NFV9_FIELD_MPLS_LABEL_4 = 73 + NFV9_FIELD_MPLS_LABEL_5 = 74 + NFV9_FIELD_MPLS_LABEL_6 = 75 + NFV9_FIELD_MPLS_LABEL_7 = 76 + NFV9_FIELD_MPLS_LABEL_8 = 77 + NFV9_FIELD_MPLS_LABEL_9 = 78 + NFV9_FIELD_MPLS_LABEL_10 = 79 + NFV9_FIELD_IN_DST_MAC = 80 + NFV9_FIELD_OUT_SRC_MAC = 81 + NFV9_FIELD_IF_NAME = 82 + NFV9_FIELD_IF_DESC = 83 + NFV9_FIELD_SAMPLER_NAME = 84 + NFV9_FIELD_IN_PERMANENT_BYTES = 85 + NFV9_FIELD_IN_PERMANENT_PKTS = 86 + NFV9_FIELD_FRAGMENT_OFFSET = 88 + NFV9_FIELD_FORWARDING_STATUS = 89 + NFV9_FIELD_MPLS_PAL_RD = 90 + NFV9_FIELD_MPLS_PREFIX_LEN = 91 + NFV9_FIELD_SRC_TRAFFIC_INDEX = 92 + NFV9_FIELD_DST_TRAFFIC_INDEX = 93 + NFV9_FIELD_APPLICATION_DESCRIPTION = 94 + NFV9_FIELD_APPLICATION_TAG = 95 + NFV9_FIELD_APPLICATION_NAME = 96 + NFV9_FIELD_postipDiffServCodePoint = 98 + NFV9_FIELD_replication_factor = 99 + NFV9_FIELD_layer2packetSectionOffset = 102 + NFV9_FIELD_layer2packetSectionSize = 103 + NFV9_FIELD_layer2packetSectionData = 104 +) + +type NFv9Packet struct { + Version uint16 + Count uint16 + SystemUptime uint32 + UnixSeconds uint32 + SequenceNumber uint32 + SourceId uint32 + FlowSets []interface{} +} + +type NFv9OptionsTemplateFlowSet struct { + FlowSetHeader + Records []NFv9OptionsTemplateRecord +} + +type NFv9OptionsTemplateRecord struct { + TemplateId uint16 + ScopeLength uint16 + OptionLength uint16 + Scopes []Field + Options []Field +} + +func NFv9TypeToString(typeId uint16) string { + + nameList := map[uint16]string{ + 1: "IN_BYTES", + 2: "IN_PKTS", + 3: "FLOWS", + 4: "PROTOCOL", + 5: "SRC_TOS", + 6: "TCP_FLAGS", + 7: "L4_SRC_PORT", + 8: "IPV4_SRC_ADDR", + 9: "SRC_MASK", + 10: "INPUT_SNMP", + 11: "L4_DST_PORT", + 12: "IPV4_DST_ADDR", + 13: "DST_MASK", + 14: "OUTPUT_SNMP", + 15: "IPV4_NEXT_HOP", + 16: "SRC_AS", + 17: "DST_AS", + 18: "BGP_IPV4_NEXT_HOP", + 19: "MUL_DST_PKTS", + 20: "MUL_DST_BYTES", + 21: "LAST_SWITCHED", + 22: "FIRST_SWITCHED", + 23: "OUT_BYTES", + 24: "OUT_PKTS", + 25: "MIN_PKT_LNGTH", + 26: "MAX_PKT_LNGTH", + 27: "IPV6_SRC_ADDR", + 28: "IPV6_DST_ADDR", + 29: "IPV6_SRC_MASK", + 30: "IPV6_DST_MASK", + 31: "IPV6_FLOW_LABEL", + 32: "ICMP_TYPE", + 33: "MUL_IGMP_TYPE", + 34: "SAMPLING_INTERVAL", + 35: "SAMPLING_ALGORITHM", + 36: "FLOW_ACTIVE_TIMEOUT", + 37: "FLOW_INACTIVE_TIMEOUT", + 38: "ENGINE_TYPE", + 39: "ENGINE_ID", + 40: "TOTAL_BYTES_EXP", + 41: "TOTAL_PKTS_EXP", + 42: "TOTAL_FLOWS_EXP", + 43: "*Vendor Proprietary*", + 44: "IPV4_SRC_PREFIX", + 45: "IPV4_DST_PREFIX", + 46: "MPLS_TOP_LABEL_TYPE", + 47: "MPLS_TOP_LABEL_IP_ADDR", + 48: "FLOW_SAMPLER_ID", + 49: "FLOW_SAMPLER_MODE", + 50: "FLOW_SAMPLER_RANDOM_INTERVAL", + 51: "*Vendor Proprietary*", + 52: "MIN_TTL", + 53: "MAX_TTL", + 54: "IPV4_IDENT", + 55: "DST_TOS", + 56: "IN_SRC_MAC", + 57: "OUT_DST_MAC", + 58: "SRC_VLAN", + 59: "DST_VLAN", + 60: "IP_PROTOCOL_VERSION", + 61: "DIRECTION", + 62: "IPV6_NEXT_HOP", + 63: "BPG_IPV6_NEXT_HOP", + 64: "IPV6_OPTION_HEADERS", + 65: "*Vendor Proprietary*", + 66: "*Vendor Proprietary*", + 67: "*Vendor Proprietary*", + 68: "*Vendor Proprietary*", + 69: "*Vendor Proprietary*", + 70: "MPLS_LABEL_1", + 71: "MPLS_LABEL_2", + 72: "MPLS_LABEL_3", + 73: "MPLS_LABEL_4", + 74: "MPLS_LABEL_5", + 75: "MPLS_LABEL_6", + 76: "MPLS_LABEL_7", + 77: "MPLS_LABEL_8", + 78: "MPLS_LABEL_9", + 79: "MPLS_LABEL_10", + 80: "IN_DST_MAC", + 81: "OUT_SRC_MAC", + 82: "IF_NAME", + 83: "IF_DESC", + 84: "SAMPLER_NAME", + 85: "IN_ PERMANENT _BYTES", + 86: "IN_ PERMANENT _PKTS", + 87: "*Vendor Proprietary*", + 88: "FRAGMENT_OFFSET", + 89: "FORWARDING STATUS", + 90: "MPLS PAL RD", + 91: "MPLS PREFIX LEN", + 92: "SRC TRAFFIC INDEX", + 93: "DST TRAFFIC INDEX", + 94: "APPLICATION DESCRIPTION", + 95: "APPLICATION TAG", + 96: "APPLICATION NAME", + 98: "postipDiffServCodePoint", + 99: "replication factor", + 100: "DEPRECATED", + 102: "layer2packetSectionOffset", + 103: "layer2packetSectionSize", + 104: "layer2packetSectionData", + 234: "ingressVRFID", + 235: "egressVRFID", + } + + if typeId > 104 || typeId == 0 { + return "Unassigned" + } else { + return nameList[typeId] + } +} + +func NFv9ScopeToString(scopeId uint16) string { + nameList := map[uint16]string{ + 1: "System", + 2: "Interface", + 3: "Line Card", + 4: "NetFlow Cache", + 5: "Template", + } + + if scopeId >= 1 && scopeId <= 5 { + return nameList[scopeId] + } else { + return "Unassigned" + } +} + +func (flowSet NFv9OptionsTemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" ScopeLength: %v\n", record.ScopeLength) + str += fmt.Sprintf(" OptionLength: %v\n", record.OptionLength) + str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes)) + + for k, field := range record.Scopes { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, NFv9ScopeToString(field.Type), field.Type, field.Length) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.Options)) + + for k, field := range record.Options { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) + } + } + + return str +} + +func (p NFv9Packet) String() string { + str := "Flow Packet\n" + str += "------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Count: %v\n", p.Count) + + unixSeconds := time.Unix(int64(p.UnixSeconds), 0) + str += fmt.Sprintf(" SystemUptime: %v\n", p.SystemUptime) + str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.UTC().String()) + str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber) + str += fmt.Sprintf(" SourceId: %v\n", p.SourceId) + str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets)) + + for i, flowSet := range p.FlowSets { + switch flowSet := flowSet.(type) { + case TemplateFlowSet: + str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case NFv9OptionsTemplateFlowSet: + str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case DataFlowSet: + str += fmt.Sprintf(" - DataFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString) + case OptionsDataFlowSet: + str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) + str += flowSet.String(NFv9TypeToString, NFv9ScopeToString) + default: + str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet) + } + } + return str +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go new file mode 100644 index 000000000..3e3707d64 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/packet.go @@ -0,0 +1,158 @@ +package netflow + +import ( + "fmt" +) + +// FlowSetHeader contains fields shared by all Flow Sets (DataFlowSet, +// TemplateFlowSet, OptionsTemplateFlowSet). +type FlowSetHeader struct { + // FlowSet ID: + // 0 for TemplateFlowSet + // 1 for OptionsTemplateFlowSet + // 256-65535 for DataFlowSet (used as TemplateId) + Id uint16 + + // The total length of this FlowSet in bytes (including padding). + Length uint16 +} + +// TemplateFlowSet is a collection of templates that describe structure of Data +// Records (actual NetFlow data). +type TemplateFlowSet struct { + FlowSetHeader + + // List of Template Records + Records []TemplateRecord +} + +// DataFlowSet is a collection of Data Records (actual NetFlow data) and Options +// Data Records (meta data). +type DataFlowSet struct { + FlowSetHeader + + Records []DataRecord +} + +type OptionsDataFlowSet struct { + FlowSetHeader + + Records []OptionsDataRecord +} + +// TemplateRecord is a single template that describes structure of a Flow Record +// (actual Netflow data). +type TemplateRecord struct { + // Each of the newly generated Template Records is given a unique + // Template ID. This uniqueness is local to the Observation Domain that + // generated the Template ID. Template IDs of Data FlowSets are numbered + // from 256 to 65535. + TemplateId uint16 + + // Number of fields in this Template Record. Because a Template FlowSet + // usually contains multiple Template Records, this field allows the + // Collector to determine the end of the current Template Record and + // the start of the next. + FieldCount uint16 + + // List of fields in this Template Record. + Fields []Field +} + +type DataRecord struct { + Values []DataField +} + +// OptionsDataRecord is meta data sent alongide actual NetFlow data. Combined +// with OptionsTemplateRecord it can be decoded to a single data row. +type OptionsDataRecord struct { + // List of Scope values stored in raw format as []byte + ScopesValues []DataField + + // List of Optons values stored in raw format as []byte + OptionsValues []DataField +} + +// Field describes type and length of a single value in a Flow Data Record. +// Field does not contain the record value itself it is just a description of +// what record value will look like. +type Field struct { + // A numeric value that represents the type of field. + PenProvided bool + Type uint16 + + // The length (in bytes) of the field. + Length uint16 + + Pen uint32 +} + +type DataField struct { + // A numeric value that represents the type of field. + PenProvided bool + Type uint16 + Pen uint32 + + // The value (in bytes) of the field. + Value interface{} + //Value []byte +} + +func (flowSet OptionsDataFlowSet) String(TypeToString func(uint16) string, ScopeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" Scopes (%v):\n", len(record.ScopesValues)) + + for k, value := range record.ScopesValues { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, ScopeToString(value.Type), value.Type, value.Value) + } + + str += fmt.Sprintf(" Options (%v):\n", len(record.OptionsValues)) + + for k, value := range record.OptionsValues { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) + } + } + + return str +} + +func (flowSet DataFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - Record %v:\n", j) + str += fmt.Sprintf(" Values (%v):\n", len(record.Values)) + + for k, value := range record.Values { + str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) + } + } + + return str +} + +func (flowSet TemplateFlowSet) String(TypeToString func(uint16) string) string { + str := fmt.Sprintf(" Id %v\n", flowSet.Id) + str += fmt.Sprintf(" Length: %v\n", flowSet.Length) + str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) + + for j, record := range flowSet.Records { + str += fmt.Sprintf(" - %v. Record:\n", j) + str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) + str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) + str += fmt.Sprintf(" Fields (%v):\n", len(record.Fields)) + + for k, field := range record.Fields { + str += fmt.Sprintf(" - %v. %v (%v/%v): %v\n", k, TypeToString(field.Type), field.Type, field.PenProvided, field.Length) + } + } + + return str +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go new file mode 100644 index 000000000..0f16b241e --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/memory/memory.go @@ -0,0 +1,73 @@ +package memory + +import ( + "context" + "github.com/netsampler/goflow2/decoders/netflow/templates" + "sync" +) + +var ( + Driver = &MemoryDriver{} +) + +type templateData struct { + key *templates.TemplateKey + data interface{} +} + +type MemoryDriver struct { + lock *sync.RWMutex + templates map[string]templateData +} + +func (d *MemoryDriver) Prepare() error { + // could have an expiry + return nil +} + +func (d *MemoryDriver) Init(context.Context) error { + d.lock = &sync.RWMutex{} + d.templates = make(map[string]templateData) + return nil +} + +func (d *MemoryDriver) Close(context.Context) error { + return nil +} + +func (d *MemoryDriver) ListTemplates(ctx context.Context, ch chan *templates.TemplateKey) error { + d.lock.RLock() + defer d.lock.RUnlock() + for _, v := range d.templates { + select { + case ch <- v.key: + case <-ctx.Done(): + return ctx.Err() + } + } + select { + case ch <- nil: + } + return nil +} + +func (d *MemoryDriver) AddTemplate(ctx context.Context, key *templates.TemplateKey, template interface{}) error { + d.lock.Lock() + defer d.lock.Unlock() + + d.templates[key.String()] = templateData{ + key: key, + data: template, + } + return nil +} + +func (d *MemoryDriver) GetTemplate(ctx context.Context, key *templates.TemplateKey) (interface{}, error) { + d.lock.RLock() + defer d.lock.RUnlock() + return d.templates[key.String()].data, nil +} + +func init() { + templates.RegisterTemplateDriver("memory", Driver) +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go new file mode 100644 index 000000000..525e6b10e --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflow/templates/templates.go @@ -0,0 +1,139 @@ +package templates + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" +) + +var ( + templateDrivers = make(map[string]TemplateDriver) // might be better to change into "factory" + lock = &sync.RWMutex{} +) + +type TemplateDriver interface { + TemplateInterface + + Prepare() error // Prepare driver (eg: flag registration) + Init(context.Context) error // Initialize driver (eg: parse keying) + Close(context.Context) error // Close drive (eg: close file) +} + +type TemplateKey struct { + TemplateKey string + Version uint16 + ObsDomainId uint32 + TemplateId uint16 +} + +func NewTemplateKey(templateKey string, version uint16, obsDomainId uint32, templateId uint16) *TemplateKey { + return &TemplateKey{ + TemplateKey: templateKey, + Version: version, + ObsDomainId: obsDomainId, + TemplateId: templateId, + } +} + +func (k *TemplateKey) String() string { + return fmt.Sprintf("%s-%d-%d-%d", k.TemplateKey, k.Version, k.ObsDomainId, k.TemplateId) +} + +func ParseTemplateKey(key string, k *TemplateKey) error { + if k != nil { + return nil + } + var version uint16 + var obsDomainId uint32 + var templateId uint16 + + keySplit := strings.Split(key, "-") + if len(keySplit) != 4 { + return fmt.Errorf("template key format is invalid") + } + templateKey := keySplit[0] + if val, err := strconv.ParseUint(keySplit[1], 10, 64); err != nil { + return fmt.Errorf("template key version is invalid") + } else { + version = uint16(val) + } + if val, err := strconv.ParseUint(keySplit[2], 10, 64); err != nil { + fmt.Errorf("template key observation domain I Dis invalid") + } else { + obsDomainId = uint32(val) + } + if val, err := strconv.ParseUint(keySplit[3], 10, 64); err != nil { + fmt.Errorf("template key template ID is invalid") + } else { + templateId = uint16(val) + } + + k.TemplateKey = templateKey + k.Version = version + k.ObsDomainId = obsDomainId + k.TemplateId = templateId + + return nil +} + +type TemplateInterface interface { + ListTemplates(ctx context.Context, ch chan *TemplateKey) error + GetTemplate(ctx context.Context, key *TemplateKey) (interface{}, error) + AddTemplate(ctx context.Context, key *TemplateKey, template interface{}) error // add expiration +} + +type TemplateSystem struct { + driver TemplateDriver +} + +func (t *TemplateSystem) ListTemplates(ctx context.Context, ch chan *TemplateKey) error { + return t.driver.ListTemplates(ctx, ch) +} + +func (t *TemplateSystem) AddTemplate(ctx context.Context, key *TemplateKey, template interface{}) error { + return t.driver.AddTemplate(ctx, key, template) +} + +func (t *TemplateSystem) GetTemplate(ctx context.Context, key *TemplateKey) (interface{}, error) { + return t.driver.GetTemplate(ctx, key) +} + +func (t *TemplateSystem) Close(ctx context.Context) error { + return t.driver.Close(ctx) +} + +func RegisterTemplateDriver(name string, t TemplateDriver) { + lock.Lock() + templateDrivers[name] = t + lock.Unlock() + + if err := t.Prepare(); err != nil { + panic(err) + } +} + +func FindTemplateSystem(ctx context.Context, name string) (*TemplateSystem, error) { + lock.RLock() + t, ok := templateDrivers[name] + lock.RUnlock() + if !ok { + return nil, fmt.Errorf("Template %s not found", name) + } + + err := t.Init(ctx) + return &TemplateSystem{t}, err +} + +func GetTemplates() []string { + lock.RLock() + defer lock.RUnlock() + t := make([]string, len(templateDrivers)) + var i int + for k, _ := range templateDrivers { + t[i] = k + i++ + } + return t +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go new file mode 100644 index 000000000..955f82cef --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/netflow.go @@ -0,0 +1,90 @@ +package netflowlegacy + +import ( + "bytes" + "fmt" + + "github.com/netsampler/goflow2/decoders/utils" +) + +const ( + MAX_COUNT = 1536 +) + +type ErrorVersion struct { + version uint16 +} + +func NewErrorVersion(version uint16) *ErrorVersion { + return &ErrorVersion{ + version: version, + } +} + +func (e *ErrorVersion) Error() string { + return fmt.Sprintf("Unknown NetFlow version %v (only decodes v5)", e.version) +} + +func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { + var version uint16 + err := utils.BinaryDecoder(payload, &version) + if err != nil { + return nil, err + } + packet := PacketNetFlowV5{} + if version == 5 { + packet.Version = version + + utils.BinaryDecoder(payload, + &(packet.Count), + &(packet.SysUptime), + &(packet.UnixSecs), + &(packet.UnixNSecs), + &(packet.FlowSequence), + &(packet.EngineType), + &(packet.EngineId), + &(packet.SamplingInterval), + ) + + packet.SamplingInterval = packet.SamplingInterval & 0x3FFF + + if packet.Count > MAX_COUNT { + return nil, fmt.Errorf("Too many samples (%d > %d) in packet", packet.Count, MAX_COUNT) + } + + packet.Records = make([]RecordsNetFlowV5, int(packet.Count)) + for i := 0; i < int(packet.Count) && payload.Len() >= 48; i++ { + record := RecordsNetFlowV5{} + err := utils.BinaryDecoder(payload, + &record.SrcAddr, + &record.DstAddr, + &record.NextHop, + &record.Input, + &record.Output, + &record.DPkts, + &record.DOctets, + &record.First, + &record.Last, + &record.SrcPort, + &record.DstPort, + &record.Pad1, + &record.TCPFlags, + &record.Proto, + &record.Tos, + &record.SrcAS, + &record.DstAS, + &record.SrcMask, + &record.DstMask, + &record.Pad2, + ) + if err != nil { + return packet, err + } + packet.Records[i] = record + } + + return packet, nil + } else { + return nil, NewErrorVersion(version) + } +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go new file mode 100644 index 000000000..078bba4df --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/netflowlegacy/packet.go @@ -0,0 +1,96 @@ +package netflowlegacy + +import ( + "encoding/binary" + "fmt" + "net" + "time" +) + +type PacketNetFlowV5 struct { + Version uint16 + Count uint16 + SysUptime uint32 + UnixSecs uint32 + UnixNSecs uint32 + FlowSequence uint32 + EngineType uint8 + EngineId uint8 + SamplingInterval uint16 + Records []RecordsNetFlowV5 +} + +type RecordsNetFlowV5 struct { + SrcAddr uint32 + DstAddr uint32 + NextHop uint32 + Input uint16 + Output uint16 + DPkts uint32 + DOctets uint32 + First uint32 + Last uint32 + SrcPort uint16 + DstPort uint16 + Pad1 byte + TCPFlags uint8 + Proto uint8 + Tos uint8 + SrcAS uint16 + DstAS uint16 + SrcMask uint8 + DstMask uint8 + Pad2 uint16 +} + +func (p PacketNetFlowV5) String() string { + str := "NetFlow v5 Packet\n" + str += "-----------------\n" + str += fmt.Sprintf(" Version: %v\n", p.Version) + str += fmt.Sprintf(" Count: %v\n", p.Count) + + unixSeconds := time.Unix(int64(p.UnixSecs), int64(p.UnixNSecs)) + str += fmt.Sprintf(" SystemUptime: %v\n", time.Duration(p.SysUptime)*time.Millisecond) + str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) + str += fmt.Sprintf(" FlowSequence: %v\n", p.FlowSequence) + str += fmt.Sprintf(" EngineType: %v\n", p.EngineType) + str += fmt.Sprintf(" EngineId: %v\n", p.EngineId) + str += fmt.Sprintf(" SamplingInterval: %v\n", p.SamplingInterval) + str += fmt.Sprintf(" Records (%v):\n", len(p.Records)) + + for i, record := range p.Records { + str += fmt.Sprintf(" Record %v:\n", i) + str += record.String() + } + return str +} + +func (r RecordsNetFlowV5) String() string { + srcaddr := make(net.IP, 4) + binary.BigEndian.PutUint32(srcaddr, r.SrcAddr) + dstaddr := make(net.IP, 4) + binary.BigEndian.PutUint32(dstaddr, r.DstAddr) + nexthop := make(net.IP, 4) + binary.BigEndian.PutUint32(nexthop, r.NextHop) + + str := fmt.Sprintf(" SrcAddr: %v\n", srcaddr.String()) + str += fmt.Sprintf(" DstAddr: %v\n", dstaddr.String()) + str += fmt.Sprintf(" NextHop: %v\n", nexthop.String()) + str += fmt.Sprintf(" Input: %v\n", r.Input) + str += fmt.Sprintf(" Output: %v\n", r.Output) + str += fmt.Sprintf(" DPkts: %v\n", r.DPkts) + str += fmt.Sprintf(" DOctets: %v\n", r.DOctets) + str += fmt.Sprintf(" First: %v\n", time.Duration(r.First)*time.Millisecond) + str += fmt.Sprintf(" Last: %v\n", time.Duration(r.Last)*time.Millisecond) + str += fmt.Sprintf(" SrcPort: %v\n", r.SrcPort) + str += fmt.Sprintf(" DstPort: %v\n", r.DstPort) + str += fmt.Sprintf(" TCPFlags: %v\n", r.TCPFlags) + str += fmt.Sprintf(" Proto: %v\n", r.Proto) + str += fmt.Sprintf(" Tos: %v\n", r.Tos) + str += fmt.Sprintf(" SrcAS: %v\n", r.SrcAS) + str += fmt.Sprintf(" DstAS: %v\n", r.DstAS) + str += fmt.Sprintf(" SrcMask: %v\n", r.SrcMask) + str += fmt.Sprintf(" DstMask: %v\n", r.DstMask) + + return str +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go new file mode 100644 index 000000000..670652a2c --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/datastructure.go @@ -0,0 +1,103 @@ +package sflow + +type SampledHeader struct { + Protocol uint32 + FrameLength uint32 + Stripped uint32 + OriginalLength uint32 + HeaderData []byte +} + +type SampledEthernet struct { + Length uint32 + SrcMac []byte + DstMac []byte + EthType uint32 +} + +type SampledIP_Base struct { + Length uint32 + Protocol uint32 + SrcIP []byte + DstIP []byte + SrcPort uint32 + DstPort uint32 + TcpFlags uint32 +} + +type SampledIPv4 struct { + Base SampledIP_Base + Tos uint32 +} + +type SampledIPv6 struct { + Base SampledIP_Base + Priority uint32 +} + +type ExtendedSwitch struct { + SrcVlan uint32 + SrcPriority uint32 + DstVlan uint32 + DstPriority uint32 +} + +type ExtendedRouter struct { + NextHopIPVersion uint32 + NextHop []byte + SrcMaskLen uint32 + DstMaskLen uint32 +} + +type ExtendedGateway struct { + NextHopIPVersion uint32 + NextHop []byte + AS uint32 + SrcAS uint32 + SrcPeerAS uint32 + ASDestinations uint32 + ASPathType uint32 + ASPathLength uint32 + ASPath []uint32 + CommunitiesLength uint32 + Communities []uint32 + LocalPref uint32 +} + +type IfCounters struct { + IfIndex uint32 + IfType uint32 + IfSpeed uint64 + IfDirection uint32 + IfStatus uint32 + IfInOctets uint64 + IfInUcastPkts uint32 + IfInMulticastPkts uint32 + IfInBroadcastPkts uint32 + IfInDiscards uint32 + IfInErrors uint32 + IfInUnknownProtos uint32 + IfOutOctets uint64 + IfOutUcastPkts uint32 + IfOutMulticastPkts uint32 + IfOutBroadcastPkts uint32 + IfOutDiscards uint32 + IfOutErrors uint32 + IfPromiscuousMode uint32 +} + +type EthernetCounters struct { + Dot3StatsAlignmentErrors uint32 + Dot3StatsFCSErrors uint32 + Dot3StatsSingleCollisionFrames uint32 + Dot3StatsMultipleCollisionFrames uint32 + Dot3StatsSQETestErrors uint32 + Dot3StatsDeferredTransmissions uint32 + Dot3StatsLateCollisions uint32 + Dot3StatsExcessiveCollisions uint32 + Dot3StatsInternalMacTransmitErrors uint32 + Dot3StatsCarrierSenseErrors uint32 + Dot3StatsFrameTooLongs uint32 + Dot3StatsInternalMacReceiveErrors uint32 + Dot3StatsSymbolErrors uint32 +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go new file mode 100644 index 000000000..647f83db3 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/packet.go @@ -0,0 +1,73 @@ +package sflow + +type Packet struct { + Version uint32 + IPVersion uint32 + AgentIP []byte + SubAgentId uint32 + SequenceNumber uint32 + Uptime uint32 + SamplesCount uint32 + Samples []interface{} +} + +type SampleHeader struct { + Format uint32 + Length uint32 + + SampleSequenceNumber uint32 + SourceIdType uint32 + SourceIdValue uint32 +} + +type FlowSample struct { + Header SampleHeader + + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + Input uint32 + Output uint32 + FlowRecordsCount uint32 + Records []FlowRecord +} + +type CounterSample struct { + Header SampleHeader + + CounterRecordsCount uint32 + Records []CounterRecord +} + +type ExpandedFlowSample struct { + Header SampleHeader + + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + InputIfFormat uint32 + InputIfValue uint32 + OutputIfFormat uint32 + OutputIfValue uint32 + FlowRecordsCount uint32 + Records []FlowRecord +} + +type RecordHeader struct { + DataFormat uint32 + Length uint32 +} + +type FlowRecord struct { + Header RecordHeader + Data interface{} +} + +type FlowRecordRaw struct { + Data []byte +} + +type CounterRecord struct { + Header RecordHeader + Data interface{} +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go b/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go new file mode 100644 index 000000000..309e965aa --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/sflow/sflow.go @@ -0,0 +1,482 @@ +package sflow + +import ( + "bytes" + "errors" + "fmt" + + "github.com/netsampler/goflow2/decoders/utils" +) + +const ( + FORMAT_EXT_SWITCH = 1001 + FORMAT_EXT_ROUTER = 1002 + FORMAT_EXT_GATEWAY = 1003 + FORMAT_RAW_PKT = 1 + FORMAT_ETH = 2 + FORMAT_IPV4 = 3 + FORMAT_IPV6 = 4 + + MAX_SAMPLES = 512 + MAX_RECORDS = 8192 + MAX_ATTRS = 16383 +) + +type ErrorDecodingSFlow struct { + msg string +} + +func NewErrorDecodingSFlow(msg string) *ErrorDecodingSFlow { + return &ErrorDecodingSFlow{ + msg: msg, + } +} + +func (e *ErrorDecodingSFlow) Error() string { + return fmt.Sprintf("Error decoding sFlow: %v", e.msg) +} + +type ErrorDataFormat struct { + dataformat uint32 +} + +func NewErrorDataFormat(dataformat uint32) *ErrorDataFormat { + return &ErrorDataFormat{ + dataformat: dataformat, + } +} + +func (e *ErrorDataFormat) Error() string { + return fmt.Sprintf("Unknown data format %v", e.dataformat) +} + +type ErrorIPVersion struct { + version uint32 +} + +func NewErrorIPVersion(version uint32) *ErrorIPVersion { + return &ErrorIPVersion{ + version: version, + } +} + +func (e *ErrorIPVersion) Error() string { + return fmt.Sprintf("Unknown IP version: %v", e.version) +} + +type ErrorVersion struct { + version uint32 +} + +func NewErrorVersion(version uint32) *ErrorVersion { + return &ErrorVersion{ + version: version, + } +} + +func (e *ErrorVersion) Error() string { + return fmt.Sprintf("Unknown sFlow version %v (supported v5)", e.version) +} + +func DecodeCounterRecord(header *RecordHeader, payload *bytes.Buffer) (CounterRecord, error) { + counterRecord := CounterRecord{ + Header: *header, + } + switch (*header).DataFormat { + case 1: + ifCounters := IfCounters{} + err := utils.BinaryDecoder(payload, + &ifCounters.IfIndex, + &ifCounters.IfType, + &ifCounters.IfSpeed, + &ifCounters.IfDirection, + &ifCounters.IfStatus, + &ifCounters.IfInOctets, + &ifCounters.IfInUcastPkts, + &ifCounters.IfInMulticastPkts, + &ifCounters.IfInBroadcastPkts, + &ifCounters.IfInDiscards, + &ifCounters.IfInErrors, + &ifCounters.IfInUnknownProtos, + &ifCounters.IfOutOctets, + &ifCounters.IfOutUcastPkts, + &ifCounters.IfOutMulticastPkts, + &ifCounters.IfOutBroadcastPkts, + &ifCounters.IfOutDiscards, + &ifCounters.IfOutErrors, + &ifCounters.IfPromiscuousMode, + ) + if err != nil { + return counterRecord, err + } + counterRecord.Data = ifCounters + case 2: + ethernetCounters := EthernetCounters{} + err := utils.BinaryDecoder(payload, + ðernetCounters.Dot3StatsAlignmentErrors, + ðernetCounters.Dot3StatsFCSErrors, + ðernetCounters.Dot3StatsSingleCollisionFrames, + ðernetCounters.Dot3StatsMultipleCollisionFrames, + ðernetCounters.Dot3StatsSQETestErrors, + ðernetCounters.Dot3StatsDeferredTransmissions, + ðernetCounters.Dot3StatsLateCollisions, + ðernetCounters.Dot3StatsExcessiveCollisions, + ðernetCounters.Dot3StatsInternalMacTransmitErrors, + ðernetCounters.Dot3StatsCarrierSenseErrors, + ðernetCounters.Dot3StatsFrameTooLongs, + ðernetCounters.Dot3StatsInternalMacReceiveErrors, + ðernetCounters.Dot3StatsSymbolErrors, + ) + if err != nil { + return counterRecord, err + } + counterRecord.Data = ethernetCounters + default: + counterRecord.Data = &FlowRecordRaw{ + Data: payload.Next(int(header.Length)), + } + } + + return counterRecord, nil +} + +func DecodeIP(payload *bytes.Buffer) (uint32, []byte, error) { + var ipVersion uint32 + err := utils.BinaryDecoder(payload, &ipVersion) + if err != nil { + return 0, nil, err + } + var ip []byte + if ipVersion == 1 { + ip = make([]byte, 4) + } else if ipVersion == 2 { + ip = make([]byte, 16) + } else { + return ipVersion, ip, NewErrorIPVersion(ipVersion) + } + if payload.Len() >= len(ip) { + err := utils.BinaryDecoder(payload, ip) + if err != nil { + return 0, nil, err + } + } else { + return ipVersion, ip, NewErrorDecodingSFlow(fmt.Sprintf("Not enough data: %v, needs %v.", payload.Len(), len(ip))) + } + return ipVersion, ip, nil +} + +func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, error) { + flowRecord := FlowRecord{ + Header: *header, + } + switch (*header).DataFormat { + case FORMAT_EXT_SWITCH: + extendedSwitch := ExtendedSwitch{} + err := utils.BinaryDecoder(payload, &extendedSwitch.SrcVlan, &extendedSwitch.SrcPriority, &extendedSwitch.DstVlan, &extendedSwitch.DstPriority) + if err != nil { + return flowRecord, err + } + flowRecord.Data = extendedSwitch + case FORMAT_RAW_PKT: + sampledHeader := SampledHeader{} + err := utils.BinaryDecoder(payload, &sampledHeader.Protocol, &sampledHeader.FrameLength, &sampledHeader.Stripped, &sampledHeader.OriginalLength) + if err != nil { + return flowRecord, err + } + sampledHeader.HeaderData = payload.Bytes() + flowRecord.Data = sampledHeader + case FORMAT_IPV4: + sampledIPBase := SampledIP_Base{ + SrcIP: make([]byte, 4), + DstIP: make([]byte, 4), + } + err := utils.BinaryDecoder(payload, &sampledIPBase.Length, &sampledIPBase.Protocol, sampledIPBase.SrcIP, sampledIPBase.DstIP, &sampledIPBase.SrcPort, &sampledIPBase.DstPort, &sampledIPBase.TcpFlags) + if err != nil { + return flowRecord, err + } + sampledIPv4 := SampledIPv4{ + Base: sampledIPBase, + } + err = utils.BinaryDecoder(payload, &(sampledIPv4.Tos)) + if err != nil { + return flowRecord, err + } + flowRecord.Data = sampledIPv4 + case FORMAT_IPV6: + sampledIPBase := SampledIP_Base{ + SrcIP: make([]byte, 16), + DstIP: make([]byte, 16), + } + err := utils.BinaryDecoder(payload, &sampledIPBase.Length, &sampledIPBase.Protocol, sampledIPBase.SrcIP, sampledIPBase.DstIP, &sampledIPBase.SrcPort, &sampledIPBase.DstPort, &sampledIPBase.TcpFlags) + if err != nil { + return flowRecord, err + } + sampledIPv6 := SampledIPv6{ + Base: sampledIPBase, + } + err = utils.BinaryDecoder(payload, &sampledIPv6.Priority) + if err != nil { + return flowRecord, err + } + flowRecord.Data = sampledIPv6 + case FORMAT_EXT_ROUTER: + extendedRouter := ExtendedRouter{} + + ipVersion, ip, err := DecodeIP(payload) + if err != nil { + return flowRecord, err + } + extendedRouter.NextHopIPVersion = ipVersion + extendedRouter.NextHop = ip + err = utils.BinaryDecoder(payload, &extendedRouter.SrcMaskLen, &extendedRouter.DstMaskLen) + if err != nil { + return flowRecord, err + } + flowRecord.Data = extendedRouter + case FORMAT_EXT_GATEWAY: + extendedGateway := ExtendedGateway{} + ipVersion, ip, err := DecodeIP(payload) + if err != nil { + return flowRecord, err + } + extendedGateway.NextHopIPVersion = ipVersion + extendedGateway.NextHop = ip + err = utils.BinaryDecoder(payload, &extendedGateway.AS, &extendedGateway.SrcAS, &extendedGateway.SrcPeerAS, + &extendedGateway.ASDestinations) + if err != nil { + return flowRecord, err + } + var asPath []uint32 + if extendedGateway.ASDestinations != 0 { + err := utils.BinaryDecoder(payload, &extendedGateway.ASPathType, &extendedGateway.ASPathLength) + if err != nil { + return flowRecord, err + } + if int(extendedGateway.ASPathLength) > payload.Len()-4 { + return flowRecord, errors.New(fmt.Sprintf("Invalid AS path length: %v.", extendedGateway.ASPathLength)) + } + + if extendedGateway.ASPathLength > MAX_ATTRS { + return flowRecord, fmt.Errorf("AS path too large (%d > %d) in record", extendedGateway.ASPathLength, MAX_ATTRS) + } + + asPath = make([]uint32, extendedGateway.ASPathLength) + if len(asPath) > 0 { + err = utils.BinaryDecoder(payload, asPath) + if err != nil { + return flowRecord, err + } + } + } + extendedGateway.ASPath = asPath + + err = utils.BinaryDecoder(payload, &extendedGateway.CommunitiesLength) + if err != nil { + return flowRecord, err + } + if extendedGateway.CommunitiesLength > MAX_ATTRS { + return flowRecord, fmt.Errorf("Communities list too large (%d > %d) in record", extendedGateway.CommunitiesLength, MAX_ATTRS) + } + + if int(extendedGateway.CommunitiesLength) > payload.Len()-4 { + return flowRecord, errors.New(fmt.Sprintf("Invalid Communities length: %v.", extendedGateway.ASPathLength)) + } + communities := make([]uint32, extendedGateway.CommunitiesLength) + if len(communities) > 0 { + err = utils.BinaryDecoder(payload, communities) + if err != nil { + return flowRecord, err + } + } + err = utils.BinaryDecoder(payload, &extendedGateway.LocalPref) + if err != nil { + return flowRecord, err + } + extendedGateway.Communities = communities + + flowRecord.Data = extendedGateway + default: + //return flowRecord, errors.New(fmt.Sprintf("Unknown data format %v.", (*header).DataFormat)) + flowRecord.Data = &FlowRecordRaw{ + Data: payload.Next(int(header.Length)), + } + } + return flowRecord, nil +} + +func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, error) { + format := header.Format + var sample interface{} + + err := utils.BinaryDecoder(payload, &header.SampleSequenceNumber) + if err != nil { + return sample, err + } + if format == FORMAT_RAW_PKT || format == FORMAT_ETH { + var sourceId uint32 + err = utils.BinaryDecoder(payload, &sourceId) + if err != nil { + return sample, err + } + + (*header).SourceIdType = sourceId >> 24 + (*header).SourceIdValue = sourceId & 0x00ffffff + } else if format == FORMAT_IPV4 || format == FORMAT_IPV6 { + err = utils.BinaryDecoder(payload, &header.SourceIdType, &header.SourceIdValue) + if err != nil { + return sample, err + } + } else { + return nil, NewErrorDataFormat(format) + } + + var recordsCount uint32 + var flowSample FlowSample + var counterSample CounterSample + var expandedFlowSample ExpandedFlowSample + if format == FORMAT_RAW_PKT { + flowSample = FlowSample{ + Header: *header, + } + err = utils.BinaryDecoder(payload, &flowSample.SamplingRate, &flowSample.SamplePool, + &flowSample.Drops, &flowSample.Input, &flowSample.Output, &flowSample.FlowRecordsCount) + if err != nil { + return sample, err + } + recordsCount = flowSample.FlowRecordsCount + + if recordsCount > MAX_RECORDS { + return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS) + } + + flowSample.Records = make([]FlowRecord, recordsCount) + sample = flowSample + } else if format == FORMAT_ETH || format == FORMAT_IPV6 { + err = utils.BinaryDecoder(payload, &recordsCount) + if err != nil { + return sample, err + } + counterSample = CounterSample{ + Header: *header, + CounterRecordsCount: recordsCount, + } + + if recordsCount > MAX_RECORDS { + return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS) + } + counterSample.Records = make([]CounterRecord, recordsCount) + sample = counterSample + } else if format == FORMAT_IPV4 { + expandedFlowSample = ExpandedFlowSample{ + Header: *header, + } + err = utils.BinaryDecoder(payload, &expandedFlowSample.SamplingRate, &expandedFlowSample.SamplePool, + &expandedFlowSample.Drops, &expandedFlowSample.InputIfFormat, &expandedFlowSample.InputIfValue, + &expandedFlowSample.OutputIfFormat, &expandedFlowSample.OutputIfValue, &expandedFlowSample.FlowRecordsCount) + if err != nil { + return sample, err + } + recordsCount = expandedFlowSample.FlowRecordsCount + + if recordsCount > MAX_RECORDS { + return flowSample, fmt.Errorf("Too many records (%d > %d) in packet", recordsCount, MAX_RECORDS) + } + expandedFlowSample.Records = make([]FlowRecord, recordsCount) + sample = expandedFlowSample + } + for i := 0; i < int(recordsCount) && payload.Len() >= 8; i++ { + recordHeader := RecordHeader{} + err = utils.BinaryDecoder(payload, &recordHeader.DataFormat, &recordHeader.Length) + if err != nil { + return sample, err + } + if int(recordHeader.Length) > payload.Len() { + break + } + recordReader := bytes.NewBuffer(payload.Next(int(recordHeader.Length))) + if format == FORMAT_RAW_PKT || format == FORMAT_IPV4 { + record, err := DecodeFlowRecord(&recordHeader, recordReader) + if err != nil { + continue + } + if format == FORMAT_RAW_PKT { + flowSample.Records[i] = record + } else if format == FORMAT_IPV4 { + expandedFlowSample.Records[i] = record + } + } else if format == FORMAT_ETH || format == FORMAT_IPV6 { + record, err := DecodeCounterRecord(&recordHeader, recordReader) + if err != nil { + continue + } + counterSample.Records[i] = record + } + } + return sample, nil +} + +func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { + var version uint32 + err := utils.BinaryDecoder(payload, &version) + if err != nil { + return nil, err + } + packetV5 := Packet{} + if version == 5 { + packetV5.Version = version + err = utils.BinaryDecoder(payload, &(packetV5.IPVersion)) + if err != nil { + return packetV5, err + } + var ip []byte + if packetV5.IPVersion == 1 { + ip = make([]byte, 4) + err = utils.BinaryDecoder(payload, ip) + if err != nil { + return packetV5, err + } + } else if packetV5.IPVersion == 2 { + ip = make([]byte, 16) + err = utils.BinaryDecoder(payload, ip) + if err != nil { + return packetV5, err + } + } else { + return nil, NewErrorIPVersion(packetV5.IPVersion) + } + + packetV5.AgentIP = ip + err = utils.BinaryDecoder(payload, &packetV5.SubAgentId, &packetV5.SequenceNumber, &packetV5.Uptime, &packetV5.SamplesCount) + if err != nil { + return packetV5, err + } + + if packetV5.SamplesCount > MAX_SAMPLES { + return nil, fmt.Errorf("Too many samples (%d > %d) in packet", packetV5.SamplesCount, MAX_SAMPLES) + } + + packetV5.Samples = make([]interface{}, int(packetV5.SamplesCount)) + for i := 0; i < int(packetV5.SamplesCount) && payload.Len() >= 8; i++ { + header := SampleHeader{} + err = utils.BinaryDecoder(payload, &(header.Format), &(header.Length)) + if err != nil { + return packetV5, err + } + if int(header.Length) > payload.Len() { + break + } + sampleReader := bytes.NewBuffer(payload.Next(int(header.Length))) + + sample, err := DecodeSample(&header, sampleReader) + if err != nil { + continue + } else { + packetV5.Samples[i] = sample + } + } + + return packetV5, nil + } else { + return nil, NewErrorVersion(version) + } +} diff --git a/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go b/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go new file mode 100644 index 000000000..9c8e597f5 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/decoders/utils/utils.go @@ -0,0 +1,128 @@ +package utils + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" +) + +type BytesBuffer interface { + io.Reader + Next(int) []byte +} + +func BinaryDecoder(payload *bytes.Buffer, dests ...interface{}) error { + for _, dest := range dests { + err := BinaryRead(payload, binary.BigEndian, dest) + if err != nil { + return err + } + } + return nil +} +func BinaryRead(payload BytesBuffer, order binary.ByteOrder, data any) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + bs := payload.Next(n) + if len(bs) < n { + return io.ErrUnexpectedEOF + } + switch data := data.(type) { + case *bool: + *data = bs[0] != 0 + case *int8: + *data = int8(bs[0]) + case *uint8: + *data = bs[0] + case *int16: + *data = int16(order.Uint16(bs)) + case *uint16: + *data = order.Uint16(bs) + case *int32: + *data = int32(order.Uint32(bs)) + case *uint32: + *data = order.Uint32(bs) + case *int64: + *data = int64(order.Uint64(bs)) + case *uint64: + *data = order.Uint64(bs) + case []bool: + for i, x := range bs { // Easier to loop over the input for 8-bit values. + data[i] = x != 0 + } + case []int8: + for i, x := range bs { + data[i] = int8(x) + } + case []uint8: + copy(data, bs) + case []int16: + for i := range data { + data[i] = int16(order.Uint16(bs[2*i:])) + } + case []uint16: + for i := range data { + data[i] = order.Uint16(bs[2*i:]) + } + case []int32: + for i := range data { + data[i] = int32(order.Uint32(bs[4*i:])) + } + case []uint32: + for i := range data { + data[i] = order.Uint32(bs[4*i:]) + } + case []int64: + for i := range data { + data[i] = int64(order.Uint64(bs[8*i:])) + } + case []uint64: + for i := range data { + data[i] = order.Uint64(bs[8*i:]) + } + default: + n = 0 // fast path doesn't apply + } + if n != 0 { + return nil + } + } + + return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) +} + +// intDataSize returns the size of the data required to represent the data when encoded. +// It returns zero if the type cannot be implemented by the fast path in Read or Write. +func intDataSize(data any) int { + switch data := data.(type) { + case bool, int8, uint8, *bool, *int8, *uint8: + return 1 + case []bool: + return len(data) + case []int8: + return len(data) + case []uint8: + return len(data) + case int16, uint16, *int16, *uint16: + return 2 + case []int16: + return 2 * len(data) + case []uint16: + return 2 * len(data) + case int32, uint32, *int32, *uint32: + return 4 + case []int32: + return 4 * len(data) + case []uint32: + return 4 * len(data) + case int64, uint64, *int64, *uint64: + return 8 + case []int64: + return 8 * len(data) + case []uint64: + return 8 * len(data) + } + return 0 +} diff --git a/vendor/github.com/netsampler/goflow2/format/common/hash.go b/vendor/github.com/netsampler/goflow2/format/common/hash.go new file mode 100644 index 000000000..1d9018603 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/format/common/hash.go @@ -0,0 +1,56 @@ +package common + +import ( + "flag" + "fmt" + "reflect" + "strings" + "sync" +) + +var ( + fieldsVar string + fields []string // Hashing fields + + hashDeclared bool + hashDeclaredLock = &sync.Mutex{} +) + +func HashFlag() { + hashDeclaredLock.Lock() + defer hashDeclaredLock.Unlock() + + if hashDeclared { + return + } + hashDeclared = true + flag.StringVar(&fieldsVar, "format.hash", "SamplerAddress", "List of fields to do hashing, separated by commas") + +} + +func ManualHashInit() error { + fields = strings.Split(fieldsVar, ",") + return nil +} + +func HashProtoLocal(msg interface{}) string { + return HashProto(fields, msg) +} + +func HashProto(fields []string, msg interface{}) string { + var keyStr string + + if msg != nil { + vfm := reflect.ValueOf(msg) + vfm = reflect.Indirect(vfm) + + for _, kf := range fields { + fieldValue := vfm.FieldByName(kf) + if fieldValue.IsValid() { + keyStr += fmt.Sprintf("%v-", fieldValue) + } + } + } + + return keyStr +} diff --git a/vendor/github.com/netsampler/goflow2/format/common/selector.go b/vendor/github.com/netsampler/goflow2/format/common/selector.go new file mode 100644 index 000000000..531c716fe --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/format/common/selector.go @@ -0,0 +1,36 @@ +package common + +import ( + "flag" + "strings" + "sync" +) + +var ( + selectorVar string + selector []string // Hashing fields + selectorTag string // Hashing fields + + selectorDeclared bool + selectorDeclaredLock = &sync.Mutex{} +) + +func SelectorFlag() { + selectorDeclaredLock.Lock() + defer selectorDeclaredLock.Unlock() + + if selectorDeclared { + return + } + selectorDeclared = true + flag.StringVar(&selectorVar, "format.selector", "", "List of fields to do keep in output") + flag.StringVar(&selectorTag, "format.tag", "", "Use format tag") +} + +func ManualSelectorInit() error { + if selectorVar == "" { + return nil + } + selector = strings.Split(selectorVar, ",") + return nil +} diff --git a/vendor/github.com/netsampler/goflow2/format/common/text.go b/vendor/github.com/netsampler/goflow2/format/common/text.go new file mode 100644 index 000000000..6e009c583 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/format/common/text.go @@ -0,0 +1,247 @@ +package common + +import ( + "encoding/binary" + "fmt" + "net" + "reflect" + "strings" +) + +const ( + FORMAT_TYPE_UNKNOWN = iota + FORMAT_TYPE_STRING_FUNC + FORMAT_TYPE_STRING + FORMAT_TYPE_INTEGER + FORMAT_TYPE_IP + FORMAT_TYPE_MAC + FORMAT_TYPE_BYTES +) + +var ( + EtypeName = map[uint32]string{ + 0x806: "ARP", + 0x800: "IPv4", + 0x86dd: "IPv6", + } + ProtoName = map[uint32]string{ + 1: "ICMP", + 6: "TCP", + 17: "UDP", + 58: "ICMPv6", + 132: "SCTP", + } + IcmpTypeName = map[uint32]string{ + 0: "EchoReply", + 3: "DestinationUnreachable", + 8: "Echo", + 9: "RouterAdvertisement", + 10: "RouterSolicitation", + 11: "TimeExceeded", + } + Icmp6TypeName = map[uint32]string{ + 1: "DestinationUnreachable", + 2: "PacketTooBig", + 3: "TimeExceeded", + 128: "EchoRequest", + 129: "EchoReply", + 133: "RouterSolicitation", + 134: "RouterAdvertisement", + } + + TextFields = map[string]int{ + "Type": FORMAT_TYPE_STRING_FUNC, + "SamplerAddress": FORMAT_TYPE_IP, + "SrcAddr": FORMAT_TYPE_IP, + "DstAddr": FORMAT_TYPE_IP, + "SrcMac": FORMAT_TYPE_MAC, + "DstMac": FORMAT_TYPE_MAC, + "NextHop": FORMAT_TYPE_IP, + "MPLSLabelIP": FORMAT_TYPE_IP, + } + + RenderExtras = map[string]RenderExtraFunction{ + "EtypeName": RenderExtraFunctionEtypeName, + "ProtoName": RenderExtraFunctionProtoName, + "IcmpName": RenderExtraFunctionIcmpName, + } +) + +/* +func AddTextField(name string, jtype int) { + TextFields = append(TextFields, name) + TextFieldsTypes = append(TextFieldsTypes, jtype) +}*/ + +type RenderExtraFunction func(interface{}) string + +func RenderExtraFetchNumbers(msg interface{}, fields []string) []uint64 { + vfm := reflect.ValueOf(msg) + vfm = reflect.Indirect(vfm) + + values := make([]uint64, len(fields)) + for i, kf := range fields { + fieldValue := vfm.FieldByName(kf) + if fieldValue.IsValid() { + values[i] = fieldValue.Uint() + } + } + + return values +} + +func RenderExtraFunctionEtypeName(msg interface{}) string { + num := RenderExtraFetchNumbers(msg, []string{"Etype"}) + return EtypeName[uint32(num[0])] +} + +func RenderExtraFunctionProtoName(msg interface{}) string { + num := RenderExtraFetchNumbers(msg, []string{"Proto"}) + return ProtoName[uint32(num[0])] +} +func RenderExtraFunctionIcmpName(msg interface{}) string { + num := RenderExtraFetchNumbers(msg, []string{"Proto", "IcmpCode", "IcmpType"}) + return IcmpCodeType(uint32(num[0]), uint32(num[1]), uint32(num[2])) +} + +func IcmpCodeType(proto, icmpCode, icmpType uint32) string { + if proto == 1 { + return IcmpTypeName[icmpType] + } else if proto == 58 { + return Icmp6TypeName[icmpType] + } + return "" +} + +func RenderIP(addr []byte) string { + if addr == nil || (len(addr) != 4 && len(addr) != 16) { + return "" + } + + return net.IP(addr).String() +} + +func FormatMessageReflectText(msg interface{}, ext string) string { + return FormatMessageReflectCustom(msg, ext, "", " ", "=", false) +} + +func FormatMessageReflectJSON(msg interface{}, ext string) string { + return fmt.Sprintf("{%s}", FormatMessageReflectCustom(msg, ext, "\"", ",", ":", true)) +} + +func ExtractTag(name, original string, tag reflect.StructTag) string { + lookup, ok := tag.Lookup(name) + if !ok { + return original + } + before, _, _ := strings.Cut(lookup, ",") + return before +} + +func FormatMessageReflectCustom(msg interface{}, ext, quotes, sep, sign string, null bool) string { + customSelector := selector + reMap := make(map[string]string) + + vfm := reflect.ValueOf(msg) + vfm = reflect.Indirect(vfm) + vft := vfm.Type() + + if len(customSelector) == 0 || selectorTag != "" { + /* + // we would need proto v2 + msgR := msg.ProtoReflect() + customSelector = make([]string, msgR.Fields().Len()) + for i := 0; i flowpb.FlowMessage.FlowType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pb_flow_proto_init() } +func file_pb_flow_proto_init() { + if File_pb_flow_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_flow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FlowMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_flow_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_flow_proto_goTypes, + DependencyIndexes: file_pb_flow_proto_depIdxs, + EnumInfos: file_pb_flow_proto_enumTypes, + MessageInfos: file_pb_flow_proto_msgTypes, + }.Build() + File_pb_flow_proto = out.File + file_pb_flow_proto_rawDesc = nil + file_pb_flow_proto_goTypes = nil + file_pb_flow_proto_depIdxs = nil +} diff --git a/vendor/github.com/netsampler/goflow2/pb/flow.proto b/vendor/github.com/netsampler/goflow2/pb/flow.proto new file mode 100644 index 000000000..1cbaae8d5 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/pb/flow.proto @@ -0,0 +1,131 @@ +syntax = "proto3"; +package flowpb; +option go_package = "github.com/netsampler/goflow2/pb;flowpb"; + +message FlowMessage { + + enum FlowType { + FLOWUNKNOWN = 0; + SFLOW_5 = 1; + NETFLOW_V5 = 2; + NETFLOW_V9 = 3; + IPFIX = 4; + } + FlowType type = 1; + + uint64 time_received = 2; + uint32 sequence_num = 4; + uint64 sampling_rate = 3; + + uint32 flow_direction = 42; + + // Sampler information + bytes sampler_address = 11; + + // Found inside packet + uint64 time_flow_start = 38; + uint64 time_flow_end = 5; + uint64 time_flow_start_ms = 63; + uint64 time_flow_end_ms = 64; + + // Size of the sampled packet + uint64 bytes = 9; + uint64 packets = 10; + + // Source/destination addresses + bytes src_addr = 6; + bytes dst_addr = 7; + + // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) + uint32 etype = 30; + + // Layer 4 protocol + uint32 proto = 20; + + // Ports for UDP and TCP + uint32 src_port = 21; + uint32 dst_port = 22; + + // Interfaces + uint32 in_if = 18; + uint32 out_if = 19; + + // Ethernet information + uint64 src_mac = 27; + uint64 dst_mac = 28; + + // Vlan + uint32 src_vlan = 33; + uint32 dst_vlan = 34; + // 802.1q VLAN in sampled packet + uint32 vlan_id = 29; + + // VRF + uint32 ingress_vrf_id = 39; + uint32 egress_vrf_id = 40; + + // IP and TCP special flags + uint32 ip_tos = 23; + uint32 forwarding_status = 24; + uint32 ip_ttl = 25; + uint32 tcp_flags = 26; + uint32 icmp_type = 31; + uint32 icmp_code = 32; + uint32 ipv6_flow_label = 37; + // Fragments (IPv4/IPv6) + uint32 fragment_id = 35; + uint32 fragment_offset = 36; + uint32 bi_flow_direction = 41; + + // Autonomous system information + uint32 src_as = 14; + uint32 dst_as = 15; + + bytes next_hop = 12; + uint32 next_hop_as = 13; + + // Prefix size + uint32 src_net = 16; + uint32 dst_net = 17; + + // BGP information + bytes bgp_next_hop = 100; + repeated uint32 bgp_communities = 101; + repeated uint32 as_path = 102; + + // MPLS information + bool has_mpls = 53; + uint32 mpls_count = 54; + uint32 mpls_1_ttl = 55; // First TTL + uint32 mpls_1_label = 56; // First Label + uint32 mpls_2_ttl = 57; // Second TTL + uint32 mpls_2_label = 58; // Second Label + uint32 mpls_3_ttl = 59; // Third TTL + uint32 mpls_3_label = 60; // Third Label + uint32 mpls_last_ttl = 61; // Last TTL + uint32 mpls_last_label = 62; // Last Label + bytes mpls_label_ip = 65; // MPLS TOP Label IP + + uint32 observation_domain_id = 70; + uint32 observation_point_id = 71; + + // Custom fields: start after ID 1000: + // uint32 my_custom_field = 1000; + + + // Custom allocations + uint64 custom_integer_1 = 1001; + uint64 custom_integer_2 = 1002; + uint64 custom_integer_3 = 1003; + uint64 custom_integer_4 = 1004; + uint64 custom_integer_5 = 1005; + + bytes custom_bytes_1 = 1011; + bytes custom_bytes_2 = 1012; + bytes custom_bytes_3 = 1013; + bytes custom_bytes_4 = 1014; + bytes custom_bytes_5 = 1015; + + repeated uint32 custom_list_1 = 1021; + +} diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_nf.go b/vendor/github.com/netsampler/goflow2/producer/producer_nf.go new file mode 100644 index 000000000..d14936553 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/producer/producer_nf.go @@ -0,0 +1,683 @@ +package producer + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/netsampler/goflow2/decoders/netflow" + "github.com/netsampler/goflow2/decoders/utils" + flowmessage "github.com/netsampler/goflow2/pb" +) + +type SamplingRateSystem interface { + GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) + AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) +} + +type basicSamplingRateSystem struct { + sampling map[uint16]map[uint32]uint32 + samplinglock *sync.RWMutex +} + +func CreateSamplingSystem() SamplingRateSystem { + ts := &basicSamplingRateSystem{ + sampling: make(map[uint16]map[uint32]uint32), + samplinglock: &sync.RWMutex{}, + } + return ts +} + +func (s *basicSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) { + s.samplinglock.Lock() + defer s.samplinglock.Unlock() + _, exists := s.sampling[version] + if exists != true { + s.sampling[version] = make(map[uint32]uint32) + } + s.sampling[version][obsDomainId] = samplingRate +} + +func (s *basicSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) { + s.samplinglock.RLock() + defer s.samplinglock.RUnlock() + samplingVersion, okver := s.sampling[version] + if okver { + samplingRate, okid := samplingVersion[obsDomainId] + if okid { + return samplingRate, nil + } + return 0, errors.New("") // TBC + } + return 0, errors.New("") // TBC +} + +type SingleSamplingRateSystem struct { + Sampling uint32 +} + +func (s *SingleSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) { +} + +func (s *SingleSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) { + return s.Sampling, nil +} + +func NetFlowLookFor(dataFields []netflow.DataField, typeId uint16) (bool, interface{}) { + for _, dataField := range dataFields { + if dataField.Type == typeId { + return true, dataField.Value + } + } + return false, nil +} + +func NetFlowPopulate(dataFields []netflow.DataField, typeId uint16, addr interface{}) bool { + exists, value := NetFlowLookFor(dataFields, typeId) + if exists && value != nil { + valueBytes, ok := value.([]byte) + valueReader := bytes.NewBuffer(valueBytes) + if ok { + switch addrt := addr.(type) { + case *(net.IP): + *addrt = valueBytes + case *(time.Time): + t := uint64(0) + utils.BinaryRead(valueReader, binary.BigEndian, &t) + t64 := int64(t / 1000) + *addrt = time.Unix(t64, 0) + default: + utils.BinaryRead(valueReader, binary.BigEndian, addr) + } + } + } + return exists +} + +func WriteUDecoded(o uint64, out interface{}) error { + switch t := out.(type) { + case *byte: + *t = byte(o) + case *uint16: + *t = uint16(o) + case *uint32: + *t = uint32(o) + case *uint64: + *t = o + default: + return errors.New("The parameter is not a pointer to a byte/uint16/uint32/uint64 structure") + } + return nil +} + +func WriteDecoded(o int64, out interface{}) error { + switch t := out.(type) { + case *int8: + *t = int8(o) + case *int16: + *t = int16(o) + case *int32: + *t = int32(o) + case *int64: + *t = o + default: + return errors.New("The parameter is not a pointer to a int8/int16/int32/int64 structure") + } + return nil +} + +func DecodeUNumber(b []byte, out interface{}) error { + var o uint64 + l := len(b) + switch l { + case 1: + o = uint64(b[0]) + case 2: + o = uint64(binary.BigEndian.Uint16(b)) + case 4: + o = uint64(binary.BigEndian.Uint32(b)) + case 8: + o = binary.BigEndian.Uint64(b) + default: + if l < 8 { + var iter uint + for i := range b { + o |= uint64(b[i]) << uint(8*(uint(l)-iter-1)) + iter++ + } + } else { + return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l)) + } + } + return WriteUDecoded(o, out) +} + +func DecodeUNumberLE(b []byte, out interface{}) error { + var o uint64 + l := len(b) + switch l { + case 1: + o = uint64(b[0]) + case 2: + o = uint64(binary.LittleEndian.Uint16(b)) + case 4: + o = uint64(binary.LittleEndian.Uint32(b)) + case 8: + o = binary.LittleEndian.Uint64(b) + default: + if l < 8 { + var iter uint + for i := range b { + o |= uint64(b[i]) << uint(8*(iter)) + iter++ + } + } else { + return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l)) + } + } + return WriteUDecoded(o, out) +} + +func DecodeNumber(b []byte, out interface{}) error { + var o int64 + l := len(b) + switch l { + case 1: + o = int64(int8(b[0])) + case 2: + o = int64(int16(binary.BigEndian.Uint16(b))) + case 4: + o = int64(int32(binary.BigEndian.Uint32(b))) + case 8: + o = int64(binary.BigEndian.Uint64(b)) + default: + if l < 8 { + var iter int + for i := range b { + o |= int64(b[i]) << int(8*(int(l)-iter-1)) + iter++ + } + } else { + return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l)) + } + } + return WriteDecoded(o, out) +} + +func DecodeNumberLE(b []byte, out interface{}) error { + var o int64 + l := len(b) + switch l { + case 1: + o = int64(int8(b[0])) + case 2: + o = int64(int16(binary.LittleEndian.Uint16(b))) + case 4: + o = int64(int32(binary.LittleEndian.Uint32(b))) + case 8: + o = int64(binary.LittleEndian.Uint64(b)) + default: + if l < 8 { + var iter int + for i := range b { + o |= int64(b[i]) << int(8*(iter)) + iter++ + } + } else { + return errors.New(fmt.Sprintf("Non-regular number of bytes for a number: %v", l)) + } + } + return WriteDecoded(o, out) +} + +func allZeroes(v []byte) bool { + for _, b := range v { + if b != 0 { + return false + } + } + return true +} + +func addrReplaceCheck(dstAddr *[]byte, v []byte, eType *uint32, ipv6 bool) { + if (len(*dstAddr) == 0 && len(v) > 0) || + (len(*dstAddr) != 0 && len(v) > 0 && !allZeroes(v)) { + *dstAddr = v + + if ipv6 { + *eType = 0x86dd + } else { + *eType = 0x800 + } + + } +} + +func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, record []netflow.DataField, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) *flowmessage.FlowMessage { + flowMessage := &flowmessage.FlowMessage{} + var time uint64 + + if version == 9 { + flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V9 + } else if version == 10 { + flowMessage.Type = flowmessage.FlowMessage_IPFIX + } + + for i := range record { + df := record[i] + + v, ok := df.Value.([]byte) + if !ok { + continue + } + + MapCustomNetFlow(flowMessage, df, mapperNetFlow) + + if df.PenProvided { + continue + } + + switch df.Type { + + case netflow.IPFIX_FIELD_observationPointId: + DecodeUNumber(v, &(flowMessage.ObservationPointId)) + + // Statistics + case netflow.NFV9_FIELD_IN_BYTES: + DecodeUNumber(v, &(flowMessage.Bytes)) + case netflow.NFV9_FIELD_IN_PKTS: + DecodeUNumber(v, &(flowMessage.Packets)) + case netflow.NFV9_FIELD_OUT_BYTES: + DecodeUNumber(v, &(flowMessage.Bytes)) + case netflow.NFV9_FIELD_OUT_PKTS: + DecodeUNumber(v, &(flowMessage.Packets)) + + // L4 + case netflow.NFV9_FIELD_L4_SRC_PORT: + DecodeUNumber(v, &(flowMessage.SrcPort)) + case netflow.NFV9_FIELD_L4_DST_PORT: + DecodeUNumber(v, &(flowMessage.DstPort)) + case netflow.NFV9_FIELD_PROTOCOL: + DecodeUNumber(v, &(flowMessage.Proto)) + + // Network + case netflow.NFV9_FIELD_SRC_AS: + DecodeUNumber(v, &(flowMessage.SrcAs)) + case netflow.NFV9_FIELD_DST_AS: + DecodeUNumber(v, &(flowMessage.DstAs)) + + // Interfaces + case netflow.NFV9_FIELD_INPUT_SNMP: + DecodeUNumber(v, &(flowMessage.InIf)) + case netflow.NFV9_FIELD_OUTPUT_SNMP: + DecodeUNumber(v, &(flowMessage.OutIf)) + + case netflow.NFV9_FIELD_FORWARDING_STATUS: + DecodeUNumber(v, &(flowMessage.ForwardingStatus)) + case netflow.NFV9_FIELD_SRC_TOS: + DecodeUNumber(v, &(flowMessage.IpTos)) + case netflow.NFV9_FIELD_TCP_FLAGS: + DecodeUNumber(v, &(flowMessage.TcpFlags)) + case netflow.NFV9_FIELD_MIN_TTL: + DecodeUNumber(v, &(flowMessage.IpTtl)) + + // IP + case netflow.NFV9_FIELD_IP_PROTOCOL_VERSION: + if len(v) > 0 { + if v[0] == 4 { + flowMessage.Etype = 0x800 + } else if v[0] == 6 { + flowMessage.Etype = 0x86dd + } + } + + case netflow.NFV9_FIELD_IPV4_SRC_ADDR: + addrReplaceCheck(&(flowMessage.SrcAddr), v, &(flowMessage.Etype), false) + + case netflow.NFV9_FIELD_IPV4_DST_ADDR: + addrReplaceCheck(&(flowMessage.DstAddr), v, &(flowMessage.Etype), false) + + case netflow.NFV9_FIELD_SRC_MASK: + DecodeUNumber(v, &(flowMessage.SrcNet)) + case netflow.NFV9_FIELD_DST_MASK: + DecodeUNumber(v, &(flowMessage.DstNet)) + + case netflow.NFV9_FIELD_IPV6_SRC_ADDR: + addrReplaceCheck(&(flowMessage.SrcAddr), v, &(flowMessage.Etype), true) + + case netflow.NFV9_FIELD_IPV6_DST_ADDR: + addrReplaceCheck(&(flowMessage.DstAddr), v, &(flowMessage.Etype), true) + + case netflow.NFV9_FIELD_IPV6_SRC_MASK: + DecodeUNumber(v, &(flowMessage.SrcNet)) + case netflow.NFV9_FIELD_IPV6_DST_MASK: + DecodeUNumber(v, &(flowMessage.DstNet)) + + case netflow.NFV9_FIELD_IPV4_NEXT_HOP: + flowMessage.NextHop = v + case netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP: + flowMessage.BgpNextHop = v + + case netflow.NFV9_FIELD_IPV6_NEXT_HOP: + flowMessage.NextHop = v + case netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP: + flowMessage.BgpNextHop = v + + // ICMP + case netflow.NFV9_FIELD_ICMP_TYPE: + var icmpTypeCode uint16 + DecodeUNumber(v, &icmpTypeCode) + flowMessage.IcmpType = uint32(icmpTypeCode >> 8) + flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff) + case netflow.IPFIX_FIELD_icmpTypeCodeIPv6: + var icmpTypeCode uint16 + DecodeUNumber(v, &icmpTypeCode) + flowMessage.IcmpType = uint32(icmpTypeCode >> 8) + flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff) + case netflow.IPFIX_FIELD_icmpTypeIPv4: + DecodeUNumber(v, &(flowMessage.IcmpType)) + case netflow.IPFIX_FIELD_icmpTypeIPv6: + DecodeUNumber(v, &(flowMessage.IcmpType)) + case netflow.IPFIX_FIELD_icmpCodeIPv4: + DecodeUNumber(v, &(flowMessage.IcmpCode)) + case netflow.IPFIX_FIELD_icmpCodeIPv6: + DecodeUNumber(v, &(flowMessage.IcmpCode)) + + // Mac + case netflow.NFV9_FIELD_IN_SRC_MAC: + DecodeUNumber(v, &(flowMessage.SrcMac)) + case netflow.NFV9_FIELD_IN_DST_MAC: + DecodeUNumber(v, &(flowMessage.DstMac)) + case netflow.NFV9_FIELD_OUT_SRC_MAC: + DecodeUNumber(v, &(flowMessage.SrcMac)) + case netflow.NFV9_FIELD_OUT_DST_MAC: + DecodeUNumber(v, &(flowMessage.DstMac)) + + case netflow.NFV9_FIELD_SRC_VLAN: + DecodeUNumber(v, &(flowMessage.VlanId)) + DecodeUNumber(v, &(flowMessage.SrcVlan)) + case netflow.NFV9_FIELD_DST_VLAN: + DecodeUNumber(v, &(flowMessage.DstVlan)) + + case netflow.IPFIX_FIELD_ingressVRFID: + DecodeUNumber(v, &(flowMessage.IngressVrfId)) + case netflow.IPFIX_FIELD_egressVRFID: + DecodeUNumber(v, &(flowMessage.EgressVrfId)) + + case netflow.NFV9_FIELD_IPV4_IDENT: + DecodeUNumber(v, &(flowMessage.FragmentId)) + case netflow.NFV9_FIELD_FRAGMENT_OFFSET: + var fragOffset uint32 + DecodeUNumber(v, &fragOffset) + flowMessage.FragmentOffset |= fragOffset + case netflow.IPFIX_FIELD_fragmentFlags: + var ipFlags uint32 + DecodeUNumber(v, &ipFlags) + flowMessage.FragmentOffset |= ipFlags + case netflow.NFV9_FIELD_IPV6_FLOW_LABEL: + DecodeUNumber(v, &(flowMessage.Ipv6FlowLabel)) + + case netflow.IPFIX_FIELD_biflowDirection: + DecodeUNumber(v, &(flowMessage.BiFlowDirection)) + + case netflow.NFV9_FIELD_DIRECTION: + DecodeUNumber(v, &(flowMessage.FlowDirection)) + + // MPLS + case netflow.IPFIX_FIELD_mplsTopLabelStackSection: + var mplsLabel uint32 + DecodeUNumber(v, &mplsLabel) + flowMessage.Mpls_1Label = uint32(mplsLabel >> 4) + flowMessage.HasMpls = true + case netflow.IPFIX_FIELD_mplsLabelStackSection2: + var mplsLabel uint32 + DecodeUNumber(v, &mplsLabel) + flowMessage.Mpls_2Label = uint32(mplsLabel >> 4) + case netflow.IPFIX_FIELD_mplsLabelStackSection3: + var mplsLabel uint32 + DecodeUNumber(v, &mplsLabel) + flowMessage.Mpls_3Label = uint32(mplsLabel >> 4) + case netflow.IPFIX_FIELD_mplsTopLabelIPv4Address: + flowMessage.MplsLabelIp = v + case netflow.IPFIX_FIELD_mplsTopLabelIPv6Address: + flowMessage.MplsLabelIp = v + + default: + if version == 9 { + // NetFlow v9 time works with a differential based on router's uptime + switch df.Type { + case netflow.NFV9_FIELD_FIRST_SWITCHED: + var timeFirstSwitched uint32 + DecodeUNumber(v, &timeFirstSwitched) + timeDiff := (uptime - timeFirstSwitched) + flowMessage.TimeFlowStart = uint64(baseTime - timeDiff/1000) + flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - uint64(timeDiff) + case netflow.NFV9_FIELD_LAST_SWITCHED: + var timeLastSwitched uint32 + DecodeUNumber(v, &timeLastSwitched) + timeDiff := (uptime - timeLastSwitched) + flowMessage.TimeFlowEnd = uint64(baseTime - timeDiff/1000) + flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - uint64(timeDiff) + } + } else if version == 10 { + switch df.Type { + case netflow.IPFIX_FIELD_flowStartSeconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowStart = time + flowMessage.TimeFlowStartMs = time * 1000 + case netflow.IPFIX_FIELD_flowStartMilliseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowStart = time / 1000 + flowMessage.TimeFlowStartMs = time + case netflow.IPFIX_FIELD_flowStartMicroseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowStart = time / 1000000 + flowMessage.TimeFlowStartMs = time / 1000 + case netflow.IPFIX_FIELD_flowStartNanoseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowStart = time / 1000000000 + flowMessage.TimeFlowStartMs = time / 1000000 + case netflow.IPFIX_FIELD_flowEndSeconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowEnd = time + flowMessage.TimeFlowEndMs = time * 1000 + case netflow.IPFIX_FIELD_flowEndMilliseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowEnd = time / 1000 + flowMessage.TimeFlowEndMs = time + case netflow.IPFIX_FIELD_flowEndMicroseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowEnd = time / 1000000 + flowMessage.TimeFlowEndMs = time / 1000 + case netflow.IPFIX_FIELD_flowEndNanoseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowEnd = time / 1000000000 + flowMessage.TimeFlowEndMs = time / 1000000 + case netflow.IPFIX_FIELD_flowStartDeltaMicroseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowStart = uint64(baseTime) - time/1000000 + flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - time/1000 + case netflow.IPFIX_FIELD_flowEndDeltaMicroseconds: + DecodeUNumber(v, &time) + flowMessage.TimeFlowEnd = uint64(baseTime) - time/1000000 + flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - time/1000 + // RFC7133 + case netflow.IPFIX_FIELD_dataLinkFrameSize: + DecodeUNumber(v, &(flowMessage.Bytes)) + flowMessage.Packets = 1 + case netflow.IPFIX_FIELD_dataLinkFrameSection: + ParseEthernetHeader(flowMessage, v, mapperSFlow) + flowMessage.Packets = 1 + if flowMessage.Bytes == 0 { + flowMessage.Bytes = uint64(len(v)) + } + } + } + } + + } + + return flowMessage +} + +func SearchNetFlowDataSetsRecords(version uint16, baseTime uint32, uptime uint32, dataRecords []netflow.DataRecord, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) []*flowmessage.FlowMessage { + var flowMessageSet []*flowmessage.FlowMessage + for _, record := range dataRecords { + fmsg := ConvertNetFlowDataSet(version, baseTime, uptime, record.Values, mapperNetFlow, mapperSFlow) + if fmsg != nil { + flowMessageSet = append(flowMessageSet, fmsg) + } + } + return flowMessageSet +} + +func SearchNetFlowDataSets(version uint16, baseTime uint32, uptime uint32, dataFlowSet []netflow.DataFlowSet, mapperNetFlow *NetFlowMapper, mapperSFlow *SFlowMapper) []*flowmessage.FlowMessage { + var flowMessageSet []*flowmessage.FlowMessage + for _, dataFlowSetItem := range dataFlowSet { + fmsg := SearchNetFlowDataSetsRecords(version, baseTime, uptime, dataFlowSetItem.Records, mapperNetFlow, mapperSFlow) + if fmsg != nil { + flowMessageSet = append(flowMessageSet, fmsg...) + } + } + return flowMessageSet +} + +func SearchNetFlowOptionDataSets(dataFlowSet []netflow.OptionsDataFlowSet) (uint32, bool) { + var samplingRate uint32 + var found bool + for _, dataFlowSetItem := range dataFlowSet { + for _, record := range dataFlowSetItem.Records { + b := NetFlowPopulate(record.OptionsValues, 305, &samplingRate) + if b { + return samplingRate, b + } + b = NetFlowPopulate(record.OptionsValues, 50, &samplingRate) + if b { + return samplingRate, b + } + b = NetFlowPopulate(record.OptionsValues, 34, &samplingRate) + if b { + return samplingRate, b + } + } + } + return samplingRate, found +} + +func SplitNetFlowSets(packetNFv9 netflow.NFv9Packet) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.NFv9OptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) { + var dataFlowSet []netflow.DataFlowSet + var templatesFlowSet []netflow.TemplateFlowSet + var optionsTemplatesFlowSet []netflow.NFv9OptionsTemplateFlowSet + var optionsDataFlowSet []netflow.OptionsDataFlowSet + for _, flowSet := range packetNFv9.FlowSets { + switch tFlowSet := flowSet.(type) { + case netflow.TemplateFlowSet: + templatesFlowSet = append(templatesFlowSet, tFlowSet) + case netflow.NFv9OptionsTemplateFlowSet: + optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, tFlowSet) + case netflow.DataFlowSet: + dataFlowSet = append(dataFlowSet, tFlowSet) + case netflow.OptionsDataFlowSet: + optionsDataFlowSet = append(optionsDataFlowSet, tFlowSet) + } + } + return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet +} + +func SplitIPFIXSets(packetIPFIX netflow.IPFIXPacket) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.IPFIXOptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) { + var dataFlowSet []netflow.DataFlowSet + var templatesFlowSet []netflow.TemplateFlowSet + var optionsTemplatesFlowSet []netflow.IPFIXOptionsTemplateFlowSet + var optionsDataFlowSet []netflow.OptionsDataFlowSet + for _, flowSet := range packetIPFIX.FlowSets { + switch tFlowSet := flowSet.(type) { + case netflow.TemplateFlowSet: + templatesFlowSet = append(templatesFlowSet, tFlowSet) + case netflow.IPFIXOptionsTemplateFlowSet: + optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, tFlowSet) + case netflow.DataFlowSet: + dataFlowSet = append(dataFlowSet, tFlowSet) + case netflow.OptionsDataFlowSet: + optionsDataFlowSet = append(optionsDataFlowSet, tFlowSet) + } + } + return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet +} + +func ProcessMessageNetFlow(msgDec interface{}, samplingRateSys SamplingRateSystem) ([]*flowmessage.FlowMessage, error) { + return ProcessMessageNetFlowConfig(msgDec, samplingRateSys, nil) +} + +// Convert a NetFlow datastructure to a FlowMessage protobuf +// Does not put sampling rate +func ProcessMessageNetFlowConfig(msgDec interface{}, samplingRateSys SamplingRateSystem, config *ProducerConfigMapped) ([]*flowmessage.FlowMessage, error) { + seqnum := uint32(0) + var baseTime uint32 + var uptime uint32 + + var flowMessageSet []*flowmessage.FlowMessage + + switch msgDecConv := msgDec.(type) { + case netflow.NFv9Packet: + dataFlowSet, _, _, optionDataFlowSet := SplitNetFlowSets(msgDecConv) + + seqnum = msgDecConv.SequenceNumber + baseTime = msgDecConv.UnixSeconds + uptime = msgDecConv.SystemUptime + obsDomainId := msgDecConv.SourceId + + var cfg *NetFlowMapper + if config != nil { + cfg = config.NetFlowV9 + } + flowMessageSet = SearchNetFlowDataSets(9, baseTime, uptime, dataFlowSet, cfg, nil) + samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet) + if samplingRateSys != nil { + if found { + samplingRateSys.AddSamplingRate(9, obsDomainId, samplingRate) + } else { + samplingRate, _ = samplingRateSys.GetSamplingRate(9, obsDomainId) + } + } + for _, fmsg := range flowMessageSet { + fmsg.SequenceNum = seqnum + fmsg.SamplingRate = uint64(samplingRate) + } + case netflow.IPFIXPacket: + dataFlowSet, _, _, optionDataFlowSet := SplitIPFIXSets(msgDecConv) + + seqnum = msgDecConv.SequenceNumber + baseTime = msgDecConv.ExportTime + obsDomainId := msgDecConv.ObservationDomainId + + var cfgIpfix *NetFlowMapper + var cfgSflow *SFlowMapper + if config != nil { + cfgIpfix = config.IPFIX + cfgSflow = config.SFlow + } + flowMessageSet = SearchNetFlowDataSets(10, baseTime, uptime, dataFlowSet, cfgIpfix, cfgSflow) + + samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet) + if samplingRateSys != nil { + if found { + samplingRateSys.AddSamplingRate(10, obsDomainId, samplingRate) + } else { + samplingRate, _ = samplingRateSys.GetSamplingRate(10, obsDomainId) + } + } + for _, fmsg := range flowMessageSet { + fmsg.SequenceNum = seqnum + fmsg.SamplingRate = uint64(samplingRate) + fmsg.ObservationDomainId = obsDomainId + } + default: + return flowMessageSet, errors.New("Bad NetFlow/IPFIX version") + } + + return flowMessageSet, nil +} diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go b/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go new file mode 100644 index 000000000..8acb3eeaa --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/producer/producer_nflegacy.go @@ -0,0 +1,81 @@ +package producer + +import ( + "encoding/binary" + "errors" + "net" + + "github.com/netsampler/goflow2/decoders/netflowlegacy" + flowmessage "github.com/netsampler/goflow2/pb" +) + +func ConvertNetFlowLegacyRecord(baseTime uint32, uptime uint32, record netflowlegacy.RecordsNetFlowV5) *flowmessage.FlowMessage { + flowMessage := &flowmessage.FlowMessage{} + + flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V5 + + timeDiffFirst := (uptime - record.First) + timeDiffLast := (uptime - record.Last) + flowMessage.TimeFlowStart = uint64(baseTime - timeDiffFirst/1000) + flowMessage.TimeFlowStartMs = uint64(baseTime)*1000 - uint64(timeDiffFirst) + flowMessage.TimeFlowEnd = uint64(baseTime - timeDiffLast/1000) + flowMessage.TimeFlowEndMs = uint64(baseTime)*1000 - uint64(timeDiffLast) + + v := make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.NextHop) + flowMessage.NextHop = v + v = make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.SrcAddr) + flowMessage.SrcAddr = v + v = make(net.IP, 4) + binary.BigEndian.PutUint32(v, record.DstAddr) + flowMessage.DstAddr = v + + flowMessage.Etype = 0x800 + flowMessage.SrcAs = uint32(record.SrcAS) + flowMessage.DstAs = uint32(record.DstAS) + flowMessage.SrcNet = uint32(record.SrcMask) + flowMessage.DstNet = uint32(record.DstMask) + flowMessage.Proto = uint32(record.Proto) + flowMessage.TcpFlags = uint32(record.TCPFlags) + flowMessage.IpTos = uint32(record.Tos) + flowMessage.InIf = uint32(record.Input) + flowMessage.OutIf = uint32(record.Output) + flowMessage.SrcPort = uint32(record.SrcPort) + flowMessage.DstPort = uint32(record.DstPort) + flowMessage.Packets = uint64(record.DPkts) + flowMessage.Bytes = uint64(record.DOctets) + + return flowMessage +} + +func SearchNetFlowLegacyRecords(baseTime uint32, uptime uint32, dataRecords []netflowlegacy.RecordsNetFlowV5) []*flowmessage.FlowMessage { + var flowMessageSet []*flowmessage.FlowMessage + for _, record := range dataRecords { + fmsg := ConvertNetFlowLegacyRecord(baseTime, uptime, record) + if fmsg != nil { + flowMessageSet = append(flowMessageSet, fmsg) + } + } + return flowMessageSet +} + +func ProcessMessageNetFlowLegacy(msgDec interface{}) ([]*flowmessage.FlowMessage, error) { + switch packet := msgDec.(type) { + case netflowlegacy.PacketNetFlowV5: + seqnum := packet.FlowSequence + samplingRate := packet.SamplingInterval + baseTime := packet.UnixSecs + uptime := packet.SysUptime + + flowMessageSet := SearchNetFlowLegacyRecords(baseTime, uptime, packet.Records) + for _, fmsg := range flowMessageSet { + fmsg.SequenceNum = seqnum + fmsg.SamplingRate = uint64(samplingRate) + } + + return flowMessageSet, nil + default: + return []*flowmessage.FlowMessage{}, errors.New("Bad NetFlow v5 version") + } +} diff --git a/vendor/github.com/netsampler/goflow2/producer/producer_sf.go b/vendor/github.com/netsampler/goflow2/producer/producer_sf.go new file mode 100644 index 000000000..ab949144b --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/producer/producer_sf.go @@ -0,0 +1,349 @@ +package producer + +import ( + "encoding/binary" + "errors" + "net" + + "github.com/netsampler/goflow2/decoders/sflow" + flowmessage "github.com/netsampler/goflow2/pb" +) + +func GetSFlowFlowSamples(packet *sflow.Packet) []interface{} { + var flowSamples []interface{} + for _, sample := range packet.Samples { + switch sample.(type) { + case sflow.FlowSample: + flowSamples = append(flowSamples, sample) + case sflow.ExpandedFlowSample: + flowSamples = append(flowSamples, sample) + } + } + return flowSamples +} + +func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader) error { + return ParseSampledHeaderConfig(flowMessage, sampledHeader, nil) +} + +func ParseEthernetHeader(flowMessage *flowmessage.FlowMessage, data []byte, config *SFlowMapper) { + var hasMpls bool + var countMpls uint32 + var firstLabelMpls uint32 + var firstTtlMpls uint8 + var secondLabelMpls uint32 + var secondTtlMpls uint8 + var thirdLabelMpls uint32 + var thirdTtlMpls uint8 + var lastLabelMpls uint32 + var lastTtlMpls uint8 + + var nextHeader byte + var tcpflags byte + srcIP := net.IP{} + dstIP := net.IP{} + offset := 14 + + var srcMac uint64 + var dstMac uint64 + + var tos byte + var ttl byte + var identification uint16 + var fragOffset uint16 + var flowLabel uint32 + + var srcPort uint16 + var dstPort uint16 + + for _, configLayer := range GetSFlowConfigLayer(config, 0) { + extracted := GetBytes(data, configLayer.Offset, configLayer.Length) + MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian) + } + + etherType := data[12:14] + + dstMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[0:6]...)) + srcMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[6:12]...)) + (*flowMessage).SrcMac = srcMac + (*flowMessage).DstMac = dstMac + + encap := true + iterations := 0 + for encap && iterations <= 1 { + encap = false + + if etherType[0] == 0x81 && etherType[1] == 0x0 { // VLAN 802.1Q + (*flowMessage).VlanId = uint32(binary.BigEndian.Uint16(data[14:16])) + offset += 4 + etherType = data[16:18] + } + + if etherType[0] == 0x88 && etherType[1] == 0x47 { // MPLS + iterateMpls := true + hasMpls = true + for iterateMpls { + if len(data) < offset+5 { + iterateMpls = false + break + } + label := binary.BigEndian.Uint32(append([]byte{0}, data[offset:offset+3]...)) >> 4 + //exp := data[offset+2] > 1 + bottom := data[offset+2] & 1 + mplsTtl := data[offset+3] + offset += 4 + + if bottom == 1 || label <= 15 || offset > len(data) { + if data[offset]&0xf0>>4 == 4 { + etherType = []byte{0x8, 0x0} + } else if data[offset]&0xf0>>4 == 6 { + etherType = []byte{0x86, 0xdd} + } + iterateMpls = false + } + + if countMpls == 0 { + firstLabelMpls = label + firstTtlMpls = mplsTtl + } else if countMpls == 1 { + secondLabelMpls = label + secondTtlMpls = mplsTtl + } else if countMpls == 2 { + thirdLabelMpls = label + thirdTtlMpls = mplsTtl + } else { + lastLabelMpls = label + lastTtlMpls = mplsTtl + } + countMpls++ + } + } + + for _, configLayer := range GetSFlowConfigLayer(config, 3) { + extracted := GetBytes(data, offset*8+configLayer.Offset, configLayer.Length) + MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian) + } + + if etherType[0] == 0x8 && etherType[1] == 0x0 { // IPv4 + if len(data) >= offset+20 { + nextHeader = data[offset+9] + srcIP = data[offset+12 : offset+16] + dstIP = data[offset+16 : offset+20] + tos = data[offset+1] + ttl = data[offset+8] + + identification = binary.BigEndian.Uint16(data[offset+4 : offset+6]) + fragOffset = binary.BigEndian.Uint16(data[offset+6:offset+8]) & 8191 + + offset += 20 + } + } else if etherType[0] == 0x86 && etherType[1] == 0xdd { // IPv6 + if len(data) >= offset+40 { + nextHeader = data[offset+6] + srcIP = data[offset+8 : offset+24] + dstIP = data[offset+24 : offset+40] + + tostmp := uint32(binary.BigEndian.Uint16(data[offset : offset+2])) + tos = uint8(tostmp & 0x0ff0 >> 4) + ttl = data[offset+7] + + flowLabel = binary.BigEndian.Uint32(data[offset : offset+4]) + + offset += 40 + + } + } else if etherType[0] == 0x8 && etherType[1] == 0x6 { // ARP + } /*else { + return errors.New(fmt.Sprintf("Unknown EtherType: %v\n", etherType)) + } */ + + for _, configLayer := range GetSFlowConfigLayer(config, 4) { + extracted := GetBytes(data, offset*8+configLayer.Offset, configLayer.Length) + MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian) + } + + appOffset := 0 + if len(data) >= offset+4 && (nextHeader == 17 || nextHeader == 6) && fragOffset&8191 == 0 { + srcPort = binary.BigEndian.Uint16(data[offset+0 : offset+2]) + dstPort = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + } + + if nextHeader == 17 { + appOffset = 8 + } + + if len(data) > offset+13 && nextHeader == 6 { + tcpflags = data[offset+13] + + appOffset = int(data[13]>>4) * 4 + } + + // ICMP and ICMPv6 + if len(data) >= offset+2 && (nextHeader == 1 || nextHeader == 58) { + (*flowMessage).IcmpType = uint32(data[offset+0]) + (*flowMessage).IcmpCode = uint32(data[offset+1]) + } + + if appOffset > 0 { + for _, configLayer := range GetSFlowConfigLayer(config, 7) { + extracted := GetBytes(data, (offset+appOffset)*8+configLayer.Offset, configLayer.Length) + MapCustom(flowMessage, extracted, configLayer.Destination, configLayer.Endian) + } + } + + iterations++ + } + + (*flowMessage).HasMpls = hasMpls + (*flowMessage).MplsCount = countMpls + (*flowMessage).Mpls_1Label = firstLabelMpls + (*flowMessage).Mpls_1Ttl = uint32(firstTtlMpls) + (*flowMessage).Mpls_2Label = secondLabelMpls + (*flowMessage).Mpls_2Ttl = uint32(secondTtlMpls) + (*flowMessage).Mpls_3Label = thirdLabelMpls + (*flowMessage).Mpls_3Ttl = uint32(thirdTtlMpls) + (*flowMessage).MplsLastLabel = lastLabelMpls + (*flowMessage).MplsLastTtl = uint32(lastTtlMpls) + + (*flowMessage).Etype = uint32(binary.BigEndian.Uint16(etherType[0:2])) + (*flowMessage).Ipv6FlowLabel = flowLabel & 0xFFFFF + + (*flowMessage).SrcPort = uint32(srcPort) + (*flowMessage).DstPort = uint32(dstPort) + + (*flowMessage).SrcAddr = srcIP + (*flowMessage).DstAddr = dstIP + (*flowMessage).Proto = uint32(nextHeader) + (*flowMessage).IpTos = uint32(tos) + (*flowMessage).IpTtl = uint32(ttl) + (*flowMessage).TcpFlags = uint32(tcpflags) + + (*flowMessage).FragmentId = uint32(identification) + (*flowMessage).FragmentOffset = uint32(fragOffset) +} + +func ParseSampledHeaderConfig(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader, config *SFlowMapper) error { + data := (*sampledHeader).HeaderData + switch (*sampledHeader).Protocol { + case 1: // Ethernet + ParseEthernetHeader(flowMessage, data, config) + } + return nil +} + +func SearchSFlowSamples(samples []interface{}) []*flowmessage.FlowMessage { + return SearchSFlowSamples(samples) +} + +func SearchSFlowSamplesConfig(samples []interface{}, config *SFlowMapper) []*flowmessage.FlowMessage { + var flowMessageSet []*flowmessage.FlowMessage + + for _, flowSample := range samples { + var records []sflow.FlowRecord + + flowMessage := &flowmessage.FlowMessage{} + flowMessage.Type = flowmessage.FlowMessage_SFLOW_5 + + switch flowSample := flowSample.(type) { + case sflow.FlowSample: + records = flowSample.Records + flowMessage.SamplingRate = uint64(flowSample.SamplingRate) + flowMessage.InIf = flowSample.Input + flowMessage.OutIf = flowSample.Output + case sflow.ExpandedFlowSample: + records = flowSample.Records + flowMessage.SamplingRate = uint64(flowSample.SamplingRate) + flowMessage.InIf = flowSample.InputIfValue + flowMessage.OutIf = flowSample.OutputIfValue + } + + ipNh := net.IP{} + ipSrc := net.IP{} + ipDst := net.IP{} + flowMessage.Packets = 1 + for _, record := range records { + switch recordData := record.Data.(type) { + case sflow.SampledHeader: + flowMessage.Bytes = uint64(recordData.FrameLength) + ParseSampledHeaderConfig(flowMessage, &recordData, config) + case sflow.SampledIPv4: + ipSrc = recordData.Base.SrcIP + ipDst = recordData.Base.DstIP + flowMessage.SrcAddr = ipSrc + flowMessage.DstAddr = ipDst + flowMessage.Bytes = uint64(recordData.Base.Length) + flowMessage.Proto = recordData.Base.Protocol + flowMessage.SrcPort = recordData.Base.SrcPort + flowMessage.DstPort = recordData.Base.DstPort + flowMessage.IpTos = recordData.Tos + flowMessage.Etype = 0x800 + case sflow.SampledIPv6: + ipSrc = recordData.Base.SrcIP + ipDst = recordData.Base.DstIP + flowMessage.SrcAddr = ipSrc + flowMessage.DstAddr = ipDst + flowMessage.Bytes = uint64(recordData.Base.Length) + flowMessage.Proto = recordData.Base.Protocol + flowMessage.SrcPort = recordData.Base.SrcPort + flowMessage.DstPort = recordData.Base.DstPort + flowMessage.IpTos = recordData.Priority + flowMessage.Etype = 0x86dd + case sflow.ExtendedRouter: + ipNh = recordData.NextHop + flowMessage.NextHop = ipNh + flowMessage.SrcNet = recordData.SrcMaskLen + flowMessage.DstNet = recordData.DstMaskLen + case sflow.ExtendedGateway: + ipNh = recordData.NextHop + flowMessage.BgpNextHop = ipNh + flowMessage.BgpCommunities = recordData.Communities + flowMessage.AsPath = recordData.ASPath + if len(recordData.ASPath) > 0 { + flowMessage.DstAs = recordData.ASPath[len(recordData.ASPath)-1] + flowMessage.NextHopAs = recordData.ASPath[0] + } else { + flowMessage.DstAs = recordData.AS + } + if recordData.SrcAS > 0 { + flowMessage.SrcAs = recordData.SrcAS + } else { + flowMessage.SrcAs = recordData.AS + } + case sflow.ExtendedSwitch: + flowMessage.SrcVlan = recordData.SrcVlan + flowMessage.DstVlan = recordData.DstVlan + } + } + flowMessageSet = append(flowMessageSet, flowMessage) + } + return flowMessageSet +} + +func ProcessMessageSFlow(msgDec interface{}) ([]*flowmessage.FlowMessage, error) { + return ProcessMessageSFlowConfig(msgDec, nil) +} + +func ProcessMessageSFlowConfig(msgDec interface{}, config *ProducerConfigMapped) ([]*flowmessage.FlowMessage, error) { + switch packet := msgDec.(type) { + case sflow.Packet: + seqnum := packet.SequenceNumber + var agent net.IP + agent = packet.AgentIP + + var cfg *SFlowMapper + if config != nil { + cfg = config.SFlow + } + + flowSamples := GetSFlowFlowSamples(&packet) + flowMessageSet := SearchSFlowSamplesConfig(flowSamples, cfg) + for _, fmsg := range flowMessageSet { + fmsg.SamplerAddress = agent + fmsg.SequenceNum = seqnum + } + + return flowMessageSet, nil + default: + return []*flowmessage.FlowMessage{}, errors.New("Bad sFlow version") + } +} diff --git a/vendor/github.com/netsampler/goflow2/producer/reflect.go b/vendor/github.com/netsampler/goflow2/producer/reflect.go new file mode 100644 index 000000000..91a2a415c --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/producer/reflect.go @@ -0,0 +1,233 @@ +package producer + +import ( + "fmt" + "reflect" + + "github.com/netsampler/goflow2/decoders/netflow" + flowmessage "github.com/netsampler/goflow2/pb" +) + +type EndianType string + +var ( + BigEndian EndianType = "big" + LittleEndian EndianType = "little" +) + +func GetBytes(d []byte, offset int, length int) []byte { + if length == 0 { + return nil + } + leftBytes := offset / 8 + rightBytes := (offset + length) / 8 + if (offset+length)%8 != 0 { + rightBytes += 1 + } + if leftBytes >= len(d) { + return nil + } + if rightBytes > len(d) { + rightBytes = len(d) + } + chunk := make([]byte, rightBytes-leftBytes) + + offsetMod8 := (offset % 8) + shiftAnd := byte(0xff >> (8 - offsetMod8)) + + var shifted byte + for i := range chunk { + j := len(chunk) - 1 - i + cur := d[j+leftBytes] + chunk[j] = (cur << offsetMod8) | shifted + shifted = shiftAnd & cur + } + last := len(chunk) - 1 + shiftAndLast := byte(0xff << ((8 - ((offset + length) % 8)) % 8)) + chunk[last] = chunk[last] & shiftAndLast + return chunk +} + +func IsUInt(k reflect.Kind) bool { + return k == reflect.Uint8 || k == reflect.Uint16 || k == reflect.Uint32 || k == reflect.Uint64 +} + +func IsInt(k reflect.Kind) bool { + return k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64 +} + +func MapCustomNetFlow(flowMessage *flowmessage.FlowMessage, df netflow.DataField, mapper *NetFlowMapper) { + if mapper == nil { + return + } + mapped, ok := mapper.Map(df) + if ok { + v := df.Value.([]byte) + MapCustom(flowMessage, v, mapped.Destination, mapped.Endian) + } +} + +func MapCustom(flowMessage *flowmessage.FlowMessage, v []byte, destination string, endianness EndianType) { + vfm := reflect.ValueOf(flowMessage) + vfm = reflect.Indirect(vfm) + + fieldValue := vfm.FieldByName(destination) + + if fieldValue.IsValid() { + typeDest := fieldValue.Type() + fieldValueAddr := fieldValue.Addr() + + if typeDest.Kind() == reflect.Slice { + + if typeDest.Elem().Kind() == reflect.Uint8 { + fieldValue.SetBytes(v) + } else { + item := reflect.New(typeDest.Elem()) + + if IsUInt(typeDest.Elem().Kind()) { + if endianness == LittleEndian { + DecodeUNumberLE(v, item.Interface()) + } else { + DecodeUNumber(v, item.Interface()) + } + } else if IsUInt(typeDest.Elem().Kind()) { + if endianness == LittleEndian { + DecodeUNumberLE(v, item.Interface()) + } else { + DecodeUNumber(v, item.Interface()) + } + } + + itemi := reflect.Indirect(item) + tmpFieldValue := reflect.Append(fieldValue, itemi) + fieldValue.Set(tmpFieldValue) + } + + } else if fieldValueAddr.IsValid() && IsUInt(typeDest.Kind()) { + if endianness == LittleEndian { + DecodeUNumberLE(v, fieldValueAddr.Interface()) + } else { + DecodeUNumber(v, fieldValueAddr.Interface()) + } + } else if fieldValueAddr.IsValid() && IsInt(typeDest.Kind()) { + if endianness == LittleEndian { + DecodeUNumberLE(v, fieldValueAddr.Interface()) + } else { + DecodeUNumber(v, fieldValueAddr.Interface()) + } + } + } +} + +type NetFlowMapField struct { + PenProvided bool `json:"penprovided" yaml:"penprovided"` + Type uint16 `json:"field" yaml:"field"` + Pen uint32 `json:"pen" yaml:"pen"` + + Destination string `json:"destination" yaml:"destination"` + Endian EndianType `json:"endianness" yaml:"endianness"` + //DestinationLength uint8 `json:"dlen"` // could be used if populating a slice of uint16 that aren't in protobuf +} + +type IPFIXProducerConfig struct { + Mapping []NetFlowMapField `json:"mapping"` + //PacketMapping []SFlowMapField `json:"packet-mapping"` // for embedded frames: use sFlow configuration +} + +type NetFlowV9ProducerConfig struct { + Mapping []NetFlowMapField `json:"mapping"` +} + +type SFlowMapField struct { + Layer int `json:"layer"` + Offset int `json:"offset"` // offset in bits + Length int `json:"length"` // length in bits + + Destination string `json:"destination" yaml:"destination"` + Endian EndianType `json:"endianness" yaml:"endianness"` + //DestinationLength uint8 `json:"dlen"` +} + +type SFlowProducerConfig struct { + Mapping []SFlowMapField `json:"mapping"` +} + +type ProducerConfig struct { + IPFIX IPFIXProducerConfig `json:"ipfix"` + NetFlowV9 NetFlowV9ProducerConfig `json:"netflowv9"` + SFlow SFlowProducerConfig `json:"sflow"` // also used for IPFIX data frames + + // should do a rename map list for when printing +} + +type DataMap struct { + Destination string + Endian EndianType +} + +type NetFlowMapper struct { + data map[string]DataMap // maps field to destination +} + +func (m *NetFlowMapper) Map(field netflow.DataField) (DataMap, bool) { + mapped, found := m.data[fmt.Sprintf("%v-%d-%d", field.PenProvided, field.Pen, field.Type)] + return mapped, found +} + +func MapFieldsNetFlow(fields []NetFlowMapField) *NetFlowMapper { + ret := make(map[string]DataMap) + for _, field := range fields { + ret[fmt.Sprintf("%v-%d-%d", field.PenProvided, field.Pen, field.Type)] = DataMap{Destination: field.Destination, Endian: field.Endian} + } + return &NetFlowMapper{ret} +} + +type DataMapLayer struct { + Offset int + Length int + Destination string + Endian EndianType +} + +type SFlowMapper struct { + data map[int][]DataMapLayer // map layer to list of offsets +} + +func GetSFlowConfigLayer(m *SFlowMapper, layer int) []DataMapLayer { + if m == nil { + return nil + } + return m.data[layer] +} + +func MapFieldsSFlow(fields []SFlowMapField) *SFlowMapper { + ret := make(map[int][]DataMapLayer) + for _, field := range fields { + retLayerEntry := DataMapLayer{ + Offset: field.Offset, + Length: field.Length, + Destination: field.Destination, + Endian: field.Endian, + } + retLayer := ret[field.Layer] + retLayer = append(retLayer, retLayerEntry) + ret[field.Layer] = retLayer + } + return &SFlowMapper{ret} +} + +type ProducerConfigMapped struct { + IPFIX *NetFlowMapper `json:"ipfix"` + NetFlowV9 *NetFlowMapper `json:"netflowv9"` + SFlow *SFlowMapper `json:"sflow"` +} + +func NewProducerConfigMapped(config *ProducerConfig) *ProducerConfigMapped { + newCfg := &ProducerConfigMapped{} + if config != nil { + newCfg.IPFIX = MapFieldsNetFlow(config.IPFIX.Mapping) + newCfg.NetFlowV9 = MapFieldsNetFlow(config.NetFlowV9.Mapping) + newCfg.SFlow = MapFieldsSFlow(config.SFlow.Mapping) + } + return newCfg +} diff --git a/vendor/github.com/netsampler/goflow2/transport/transport.go b/vendor/github.com/netsampler/goflow2/transport/transport.go new file mode 100644 index 000000000..11e9c6786 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/transport/transport.go @@ -0,0 +1,68 @@ +package transport + +import ( + "context" + "fmt" + "sync" +) + +var ( + transportDrivers = make(map[string]TransportDriver) + lock = &sync.RWMutex{} +) + +type TransportDriver interface { + Prepare() error // Prepare driver (eg: flag registration) + Init(context.Context) error // Initialize driver (eg: start connections, open files...) + Close(context.Context) error // Close driver (eg: close connections and files...) + Send(key, data []byte) error // Send a formatted message +} + +type TransportInterface interface { + Send(key, data []byte) error +} + +type Transport struct { + driver TransportDriver +} + +func (t *Transport) Close(ctx context.Context) { + t.driver.Close(ctx) +} +func (t *Transport) Send(key, data []byte) error { + return t.driver.Send(key, data) +} + +func RegisterTransportDriver(name string, t TransportDriver) { + lock.Lock() + transportDrivers[name] = t + lock.Unlock() + + if err := t.Prepare(); err != nil { + panic(err) + } +} + +func FindTransport(ctx context.Context, name string) (*Transport, error) { + lock.RLock() + t, ok := transportDrivers[name] + lock.RUnlock() + if !ok { + return nil, fmt.Errorf("Transport %s not found", name) + } + + err := t.Init(ctx) + return &Transport{t}, err +} + +func GetTransports() []string { + lock.RLock() + defer lock.RUnlock() + t := make([]string, len(transportDrivers)) + var i int + for k, _ := range transportDrivers { + t[i] = k + i++ + } + return t +} diff --git a/vendor/github.com/netsampler/goflow2/utils/metrics.go b/vendor/github.com/netsampler/goflow2/utils/metrics.go new file mode 100644 index 000000000..eb3f23158 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/metrics.go @@ -0,0 +1,171 @@ +package utils + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + MetricTrafficBytes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_traffic_bytes", + Help: "Bytes received by the application.", + }, + []string{"remote_ip", "local_ip", "local_port", "type"}, + ) + MetricTrafficPackets = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_traffic_packets", + Help: "Packets received by the application.", + }, + []string{"remote_ip", "local_ip", "local_port", "type"}, + ) + MetricPacketSizeSum = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_traffic_summary_size_bytes", + Help: "Summary of packet size.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"remote_ip", "local_ip", "local_port", "type"}, + ) + DecoderStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_decoder_count", + Help: "Decoder processed count.", + }, + []string{"worker", "name"}, + ) + DecoderErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_decoder_error_count", + Help: "Decoder processed error count.", + }, + []string{"worker", "name"}, + ) + DecoderTime = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_summary_decoding_time_us", + Help: "Decoding time summary.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"name"}, + ) + DecoderProcessTime = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_summary_processing_time_us", + Help: "Processing time summary.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"name"}, + ) + NetFlowStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_count", + Help: "NetFlows processed.", + }, + []string{"router", "version"}, + ) + NetFlowErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_errors_count", + Help: "NetFlows processed errors.", + }, + []string{"router", "error"}, + ) + NetFlowSetRecordsStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_flowset_records_sum", + Help: "NetFlows FlowSets sum of records.", + }, + []string{"router", "version", "type"}, // data-template, data, opts... + ) + NetFlowSetStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_flowset_sum", + Help: "NetFlows FlowSets sum.", + }, + []string{"router", "version", "type"}, // data-template, data, opts... + ) + NetFlowTimeStatsSum = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "flow_process_nf_delay_summary_seconds", + Help: "NetFlows time difference between time of flow and processing.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"router", "version"}, + ) + NetFlowTemplatesStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_nf_templates_count", + Help: "NetFlows Template count.", + }, + []string{"router", "version", "obs_domain_id", "template_id", "type"}, // options/template + ) + SFlowStats = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_count", + Help: "sFlows processed.", + }, + []string{"router", "agent", "version"}, + ) + SFlowErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_errors_count", + Help: "sFlows processed errors.", + }, + []string{"router", "error"}, + ) + SFlowSampleStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_samples_sum", + Help: "SFlows samples sum.", + }, + []string{"router", "agent", "version", "type"}, // counter, flow, expanded... + ) + SFlowSampleRecordsStatsSum = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "flow_process_sf_samples_records_sum", + Help: "SFlows samples sum of records.", + }, + []string{"router", "agent", "version", "type"}, // data-template, data, opts... + ) +) + +func init() { + prometheus.MustRegister(MetricTrafficBytes) + prometheus.MustRegister(MetricTrafficPackets) + prometheus.MustRegister(MetricPacketSizeSum) + + prometheus.MustRegister(DecoderStats) + prometheus.MustRegister(DecoderErrors) + prometheus.MustRegister(DecoderTime) + prometheus.MustRegister(DecoderProcessTime) + + prometheus.MustRegister(NetFlowStats) + prometheus.MustRegister(NetFlowErrors) + prometheus.MustRegister(NetFlowSetRecordsStatsSum) + prometheus.MustRegister(NetFlowSetStatsSum) + prometheus.MustRegister(NetFlowTimeStatsSum) + prometheus.MustRegister(NetFlowTemplatesStats) + + prometheus.MustRegister(SFlowStats) + prometheus.MustRegister(SFlowErrors) + prometheus.MustRegister(SFlowSampleStatsSum) + prometheus.MustRegister(SFlowSampleRecordsStatsSum) +} + +func DefaultAccountCallback(name string, id int, start, end time.Time) { + DecoderProcessTime.With( + prometheus.Labels{ + "name": name, + }). + Observe(float64((end.Sub(start)).Nanoseconds()) / 1000) + DecoderStats.With( + prometheus.Labels{ + "worker": strconv.Itoa(id), + "name": name, + }). + Inc() +} diff --git a/vendor/github.com/netsampler/goflow2/utils/netflow.go b/vendor/github.com/netsampler/goflow2/utils/netflow.go new file mode 100644 index 000000000..0923ee3b7 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/netflow.go @@ -0,0 +1,377 @@ +package utils + +import ( + "bytes" + "context" + "sync" + "time" + + "github.com/netsampler/goflow2/decoders/netflow" + "github.com/netsampler/goflow2/decoders/netflow/templates" + "github.com/netsampler/goflow2/format" + flowmessage "github.com/netsampler/goflow2/pb" + "github.com/netsampler/goflow2/producer" + "github.com/netsampler/goflow2/transport" + "github.com/prometheus/client_golang/prometheus" +) + +/* +type TemplateSystem struct { + key string + templates *netflow.BasicTemplateSystem +} + +func (s *TemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { + s.templates.AddTemplate(version, obsDomainId, template) + + typeStr := "options_template" + var templateId uint16 + switch templateIdConv := template.(type) { + case netflow.IPFIXOptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case netflow.NFv9OptionsTemplateRecord: + templateId = templateIdConv.TemplateId + case netflow.TemplateRecord: + templateId = templateIdConv.TemplateId + typeStr = "template" + } + NetFlowTemplatesStats.With( + prometheus.Labels{ + "router": s.key, + "version": strconv.Itoa(int(version)), + "obs_domain_id": strconv.Itoa(int(obsDomainId)), + "template_id": strconv.Itoa(int(templateId)), + "type": typeStr, + }). + Inc() +} + +func (s *TemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { + return s.templates.GetTemplate(version, obsDomainId, templateId) +} +*/ + +type StateNetFlow struct { + stopper + + Format format.FormatInterface + Transport transport.TransportInterface + Logger Logger + /*templateslock *sync.RWMutex + templates map[string]*TemplateSystem*/ + + samplinglock *sync.RWMutex + sampling map[string]producer.SamplingRateSystem + + Config *producer.ProducerConfig + configMapped *producer.ProducerConfigMapped + + TemplateSystem templates.TemplateInterface + + ctx context.Context +} + +func NewStateNetFlow() *StateNetFlow { + return &StateNetFlow{ + ctx: context.Background(), + samplinglock: &sync.RWMutex{}, + sampling: make(map[string]producer.SamplingRateSystem), + } +} + +func (s *StateNetFlow) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + + key := pkt.Src.String() + samplerAddress := pkt.Src + if samplerAddress.To4() != nil { + samplerAddress = samplerAddress.To4() + } + + s.samplinglock.RLock() + sampling, ok := s.sampling[key] + s.samplinglock.RUnlock() + if !ok { + sampling = producer.CreateSamplingSystem() + s.samplinglock.Lock() + s.sampling[key] = sampling + s.samplinglock.Unlock() + } + + ts := uint64(time.Now().UTC().Unix()) + if pkt.SetTime { + ts = uint64(pkt.RecvTime.UTC().Unix()) + } + + timeTrackStart := time.Now() + msgDec, err := netflow.DecodeMessageContext(s.ctx, buf, key, netflow.TemplateWrapper{s.ctx, key, s.TemplateSystem}) + if err != nil { + switch err.(type) { + case *netflow.ErrorTemplateNotFound: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "template_not_found", + }). + Inc() + default: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_decoding", + }). + Inc() + } + return err + } + + var flowMessageSet []*flowmessage.FlowMessage + + switch msgDecConv := msgDec.(type) { + case netflow.NFv9Packet: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "9", + }). + Inc() + + for _, fs := range msgDecConv.FlowSets { + switch fsConv := fs.(type) { + case netflow.TemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "TemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "TemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.NFv9OptionsTemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsTemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsTemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.OptionsDataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsDataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "OptionsDataFlowSet", + }). + Add(float64(len(fsConv.Records))) + case netflow.DataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "DataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + "type": "DataFlowSet", + }). + Add(float64(len(fsConv.Records))) + } + } + flowMessageSet, err = producer.ProcessMessageNetFlowConfig(msgDecConv, sampling, s.configMapped) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = ts + fmsg.SamplerAddress = samplerAddress + timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd + NetFlowTimeStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "9", + }). + Observe(float64(timeDiff)) + } + case netflow.IPFIXPacket: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "10", + }). + Inc() + + for _, fs := range msgDecConv.FlowSets { + switch fsConv := fs.(type) { + case netflow.TemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "TemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "TemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.IPFIXOptionsTemplateFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsTemplateFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsTemplateFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.OptionsDataFlowSet: + + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsDataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "OptionsDataFlowSet", + }). + Add(float64(len(fsConv.Records))) + + case netflow.DataFlowSet: + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "DataFlowSet", + }). + Inc() + + NetFlowSetRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + "type": "DataFlowSet", + }). + Add(float64(len(fsConv.Records))) + } + } + flowMessageSet, err = producer.ProcessMessageNetFlowConfig(msgDecConv, sampling, s.configMapped) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = ts + fmsg.SamplerAddress = samplerAddress + timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd + NetFlowTimeStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "10", + }). + Observe(float64(timeDiff)) + } + } + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "NetFlow", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + for _, fmsg := range flowMessageSet { + if s.Format != nil { + key, data, err := s.Format.Format(fmsg) + + if err != nil && s.Logger != nil { + s.Logger.Error(err) + } + if err == nil && s.Transport != nil { + err = s.Transport.Send(key, data) + if err != nil { + s.Logger.Error(err) + } + } + } + } + + return nil +} + +/* +func (s *StateNetFlow) ServeHTTPTemplates(w http.ResponseWriter, r *http.Request) { + tmp := make(map[string]map[uint16]map[uint32]map[uint16]interface{}) + s.templateslock.RLock() + for key, templatesrouterstr := range s.templates { + templatesrouter := templatesrouterstr.templates.GetTemplates() + tmp[key] = templatesrouter + } + s.templateslock.RUnlock() + enc := json.NewEncoder(w) + enc.Encode(tmp) +} + +func (s *StateNetFlow) InitTemplates() { + s.templates = make(map[string]*TemplateSystem) + s.templateslock = &sync.RWMutex{} + s.sampling = make(map[string]producer.SamplingRateSystem) + s.samplinglock = &sync.RWMutex{} +}*/ + +func (s *StateNetFlow) initConfig() { + s.configMapped = producer.NewProducerConfigMapped(s.Config) +} + +func (s *StateNetFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error { + if err := s.start(); err != nil { + return err + } + //s.InitTemplates() + s.initConfig() + return UDPStoppableRoutine(s.stopCh, "NetFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) +} + +// FlowRoutineCtx? diff --git a/vendor/github.com/netsampler/goflow2/utils/nflegacy.go b/vendor/github.com/netsampler/goflow2/utils/nflegacy.go new file mode 100644 index 000000000..dcfc36dbe --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/nflegacy.go @@ -0,0 +1,111 @@ +package utils + +import ( + "bytes" + "time" + + "github.com/netsampler/goflow2/decoders/netflowlegacy" + "github.com/netsampler/goflow2/format" + flowmessage "github.com/netsampler/goflow2/pb" + "github.com/netsampler/goflow2/producer" + "github.com/netsampler/goflow2/transport" + "github.com/prometheus/client_golang/prometheus" +) + +type StateNFLegacy struct { + stopper + + Format format.FormatInterface + Transport transport.TransportInterface + Logger Logger +} + +func NewStateNFLegacy() *StateNFLegacy { + return &StateNFLegacy{} +} + +func (s *StateNFLegacy) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + key := pkt.Src.String() + samplerAddress := pkt.Src + if samplerAddress.To4() != nil { + samplerAddress = samplerAddress.To4() + } + + ts := uint64(time.Now().UTC().Unix()) + if pkt.SetTime { + ts = uint64(pkt.RecvTime.UTC().Unix()) + } + + timeTrackStart := time.Now() + msgDec, err := netflowlegacy.DecodeMessage(buf) + + if err != nil { + switch err.(type) { + case *netflowlegacy.ErrorVersion: + NetFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_version", + }). + Inc() + } + return err + } + + switch msgDecConv := msgDec.(type) { + case netflowlegacy.PacketNetFlowV5: + NetFlowStats.With( + prometheus.Labels{ + "router": key, + "version": "5", + }). + Inc() + NetFlowSetStatsSum.With( + prometheus.Labels{ + "router": key, + "version": "5", + "type": "DataFlowSet", + }). + Add(float64(msgDecConv.Count)) + } + + var flowMessageSet []*flowmessage.FlowMessage + flowMessageSet, err = producer.ProcessMessageNetFlowLegacy(msgDec) + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "NetFlowV5", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = ts + fmsg.SamplerAddress = samplerAddress + + if s.Format != nil { + key, data, err := s.Format.Format(fmsg) + + if err != nil && s.Logger != nil { + s.Logger.Error(err) + } + if err == nil && s.Transport != nil { + err = s.Transport.Send(key, data) + if err != nil { + s.Logger.Error(err) + } + } + } + } + + return nil +} + +func (s *StateNFLegacy) FlowRoutine(workers int, addr string, port int, reuseport bool) error { + if err := s.start(); err != nil { + return err + } + return UDPStoppableRoutine(s.stopCh, "NetFlowV5", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) +} diff --git a/vendor/github.com/netsampler/goflow2/utils/sflow.go b/vendor/github.com/netsampler/goflow2/utils/sflow.go new file mode 100644 index 000000000..27223bcc0 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/sflow.go @@ -0,0 +1,170 @@ +package utils + +import ( + "bytes" + "net" + "time" + + "github.com/netsampler/goflow2/decoders/sflow" + "github.com/netsampler/goflow2/format" + flowmessage "github.com/netsampler/goflow2/pb" + "github.com/netsampler/goflow2/producer" + "github.com/netsampler/goflow2/transport" + "github.com/prometheus/client_golang/prometheus" +) + +type StateSFlow struct { + stopper + + Format format.FormatInterface + Transport transport.TransportInterface + Logger Logger + + Config *producer.ProducerConfig + configMapped *producer.ProducerConfigMapped +} + +func NewStateSFlow() *StateSFlow { + return &StateSFlow{} +} + +func (s *StateSFlow) DecodeFlow(msg interface{}) error { + pkt := msg.(BaseMessage) + buf := bytes.NewBuffer(pkt.Payload) + key := pkt.Src.String() + + ts := uint64(time.Now().UTC().Unix()) + if pkt.SetTime { + ts = uint64(pkt.RecvTime.UTC().Unix()) + } + + timeTrackStart := time.Now() + msgDec, err := sflow.DecodeMessage(buf) + + if err != nil { + switch err.(type) { + case *sflow.ErrorVersion: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_version", + }). + Inc() + case *sflow.ErrorIPVersion: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_ip_version", + }). + Inc() + case *sflow.ErrorDataFormat: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_data_format", + }). + Inc() + default: + SFlowErrors.With( + prometheus.Labels{ + "router": key, + "error": "error_decoding", + }). + Inc() + } + return err + } + + switch msgDecConv := msgDec.(type) { + case sflow.Packet: + agentStr := net.IP(msgDecConv.AgentIP).String() + SFlowStats.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + }). + Inc() + + for _, samples := range msgDecConv.Samples { + typeStr := "unknown" + countRec := 0 + switch samplesConv := samples.(type) { + case sflow.FlowSample: + typeStr = "FlowSample" + countRec = len(samplesConv.Records) + case sflow.CounterSample: + typeStr = "CounterSample" + if samplesConv.Header.Format == 4 { + typeStr = "Expanded" + typeStr + } + countRec = len(samplesConv.Records) + case sflow.ExpandedFlowSample: + typeStr = "ExpandedFlowSample" + countRec = len(samplesConv.Records) + } + SFlowSampleStatsSum.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + "type": typeStr, + }). + Inc() + + SFlowSampleRecordsStatsSum.With( + prometheus.Labels{ + "router": key, + "agent": agentStr, + "version": "5", + "type": typeStr, + }). + Add(float64(countRec)) + } + + } + + var flowMessageSet []*flowmessage.FlowMessage + flowMessageSet, err = producer.ProcessMessageSFlowConfig(msgDec, s.configMapped) + + timeTrackStop := time.Now() + DecoderTime.With( + prometheus.Labels{ + "name": "sFlow", + }). + Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) + + for _, fmsg := range flowMessageSet { + fmsg.TimeReceived = ts + fmsg.TimeFlowStart = ts + fmsg.TimeFlowEnd = ts + + if s.Format != nil { + key, data, err := s.Format.Format(fmsg) + + if err != nil && s.Logger != nil { + s.Logger.Error(err) + } + if err == nil && s.Transport != nil { + err = s.Transport.Send(key, data) + if err != nil { + s.Logger.Error(err) + } + } + } + } + + return nil +} + +func (s *StateSFlow) initConfig() { + s.configMapped = producer.NewProducerConfigMapped(s.Config) +} + +func (s *StateSFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error { + if err := s.start(); err != nil { + return err + } + s.initConfig() + return UDPStoppableRoutine(s.stopCh, "sFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) +} diff --git a/vendor/github.com/netsampler/goflow2/utils/stopper.go b/vendor/github.com/netsampler/goflow2/utils/stopper.go new file mode 100644 index 000000000..153b1bd17 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/stopper.go @@ -0,0 +1,33 @@ +package utils + +import ( + "errors" +) + +// ErrAlreadyStarted error happens when you try to start twice a flow routine +var ErrAlreadyStarted = errors.New("the routine is already started") + +// stopper mechanism, common for all the flow routines +type stopper struct { + stopCh chan struct{} +} + +func (s *stopper) start() error { + if s.stopCh != nil { + return ErrAlreadyStarted + } + s.stopCh = make(chan struct{}) + return nil +} + +func (s *stopper) Shutdown() { + if s.stopCh != nil { + select { + case <-s.stopCh: + default: + close(s.stopCh) + } + + s.stopCh = nil + } +} diff --git a/vendor/github.com/netsampler/goflow2/utils/utils.go b/vendor/github.com/netsampler/goflow2/utils/utils.go new file mode 100644 index 000000000..2aab45a10 --- /dev/null +++ b/vendor/github.com/netsampler/goflow2/utils/utils.go @@ -0,0 +1,234 @@ +package utils + +import ( + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" + + reuseport "github.com/libp2p/go-reuseport" + decoder "github.com/netsampler/goflow2/decoders" + "github.com/netsampler/goflow2/decoders/netflow" + flowmessage "github.com/netsampler/goflow2/pb" + "github.com/netsampler/goflow2/producer" + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" +) + +type ProducerConfig *producer.ProducerConfig + +func LoadMapping(f io.Reader) (ProducerConfig, error) { + config := &producer.ProducerConfig{} + dec := yaml.NewDecoder(f) + err := dec.Decode(config) + return config, err +} + +func GetServiceAddresses(srv string) (addrs []string, err error) { + _, srvs, err := net.LookupSRV("", "", srv) + if err != nil { + return nil, errors.New(fmt.Sprintf("Service discovery: %v\n", err)) + } + for _, srv := range srvs { + addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port)))) + } + return addrs, nil +} + +type Logger interface { + Printf(string, ...interface{}) + Errorf(string, ...interface{}) + Warnf(string, ...interface{}) + Warn(...interface{}) + Error(...interface{}) + Debug(...interface{}) + Debugf(string, ...interface{}) + Infof(string, ...interface{}) + Fatalf(string, ...interface{}) +} + +type BaseMessage struct { + Src net.IP + Port int + Payload []byte + + SetTime bool + RecvTime time.Time +} + +type Transport interface { + Send([]*flowmessage.FlowMessage) +} + +type Formatter interface { + Format([]*flowmessage.FlowMessage) +} + +/* +type DefaultLogTransport struct { +} + + func (s *DefaultLogTransport) Publish(msgs []*flowmessage.FlowMessage) { + for _, msg := range msgs { + fmt.Printf("%v\n", FlowMessageToString(msg)) + } + } + +type DefaultJSONTransport struct { +} + + func (s *DefaultJSONTransport) Publish(msgs []*flowmessage.FlowMessage) { + for _, msg := range msgs { + fmt.Printf("%v\n", FlowMessageToJSON(msg)) + } + } +*/ +type DefaultErrorCallback struct { + Logger Logger +} + +func (cb *DefaultErrorCallback) Callback(name string, id int, start, end time.Time, err error) { + if _, ok := err.(*netflow.ErrorTemplateNotFound); ok { + return + } + if cb.Logger != nil { + cb.Logger.Errorf("Error from: %v (%v) duration: %v. %v", name, id, end.Sub(start), err) + } +} + +func UDPRoutine(name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error { + return UDPStoppableRoutine(make(chan struct{}), name, decodeFunc, workers, addr, port, sockReuse, logger) +} + +// UDPStoppableRoutine runs a UDPRoutine that can be stopped by closing the stopCh passed as argument +func UDPStoppableRoutine(stopCh <-chan struct{}, name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error { + ecb := DefaultErrorCallback{ + Logger: logger, + } + + decoderParams := decoder.DecoderParams{ + DecoderFunc: decodeFunc, + DoneCallback: DefaultAccountCallback, + ErrorCallback: ecb.Callback, + } + + processor := decoder.CreateProcessor(workers, decoderParams, name) + processor.Start() + + addrUDP := net.UDPAddr{ + IP: net.ParseIP(addr), + Port: port, + } + + var udpconn *net.UDPConn + var err error + + if sockReuse { + pconn, err := reuseport.ListenPacket("udp", addrUDP.String()) + if err != nil { + return err + } + defer pconn.Close() + var ok bool + udpconn, ok = pconn.(*net.UDPConn) + if !ok { + return err + } + } else { + udpconn, err = net.ListenUDP("udp", &addrUDP) + if err != nil { + return err + } + defer udpconn.Close() + } + + payload := make([]byte, 9000) + + localIP := addrUDP.IP.String() + if addrUDP.IP == nil { + localIP = "" + } + + type udpData struct { + size int + pktAddr *net.UDPAddr + payload []byte + } + + udpDataCh := make(chan udpData) + defer close(udpDataCh) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for { + u := udpData{} + u.size, u.pktAddr, _ = udpconn.ReadFromUDP(payload) + if u.size == 0 { // Ignore 0 byte packets. + continue + } + u.payload = make([]byte, u.size) + copy(u.payload, payload[0:u.size]) + select { + case <-stopCh: + return + default: + udpDataCh <- u + } + } + }() + func() { + for { + select { + case u := <-udpDataCh: + process(u.size, u.payload, u.pktAddr, processor, localIP, addrUDP, name) + case <-stopCh: + return + } + } + }() + + for _ = range udpDataCh { + // drain + } + wg.Wait() + return nil +} + +func process(size int, payload []byte, pktAddr *net.UDPAddr, processor decoder.Processor, localIP string, addrUDP net.UDPAddr, name string) { + baseMessage := BaseMessage{ + Src: pktAddr.IP, + Port: pktAddr.Port, + Payload: payload, + } + processor.ProcessMessage(baseMessage) + + MetricTrafficBytes.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Add(float64(size)) + MetricTrafficPackets.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Inc() + MetricPacketSizeSum.With( + prometheus.Labels{ + "remote_ip": pktAddr.IP.String(), + "local_ip": localIP, + "local_port": strconv.Itoa(addrUDP.Port), + "type": name, + }). + Observe(float64(size)) +} diff --git a/vendor/github.com/ovn-org/libovsdb/LICENSE b/vendor/github.com/ovn-org/libovsdb/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/ovn-org/libovsdb/NOTICE b/vendor/github.com/ovn-org/libovsdb/NOTICE new file mode 100644 index 000000000..156dcf39f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/NOTICE @@ -0,0 +1,13 @@ +libovsdb + +Copyright 2014-2015 Socketplane Inc. +Copyright 2015-2018 Docker Inc. + +This software consists of voluntary contributions made by many individuals. For +exact contribution history, see the commit history. + +Modifications Copyright 2018-2019 eBay Inc. + +This software contains modifications developed by eBay Inc. and voluntary contributions +from other individuals in a fork maintained at https://github.com/eBay/libovsdb +For details on these contributions, please consult the git history. diff --git a/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/vendor/github.com/ovn-org/libovsdb/cache/cache.go new file mode 100644 index 000000000..0b1e09e72 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/cache.go @@ -0,0 +1,1284 @@ +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/gob" + "encoding/hex" + "fmt" + "log" + "os" + "reflect" + "sort" + "strings" + "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/libovsdb/updates" +) + +const ( + updateEvent = "update" + addEvent = "add" + deleteEvent = "delete" + bufferSize = 65536 + columnDelimiter = "," + keyDelimiter = "|" +) + +// ErrCacheInconsistent is an error that can occur when an operation +// would cause the cache to be inconsistent +type ErrCacheInconsistent struct { + details string +} + +// Error implements the error interface +func (e *ErrCacheInconsistent) Error() string { + msg := "cache inconsistent" + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +func NewErrCacheInconsistent(details string) *ErrCacheInconsistent { + return &ErrCacheInconsistent{ + details: details, + } +} + +// ErrIndexExists is returned when an item in the database cannot be inserted due to existing indexes +type ErrIndexExists struct { + Table string + Value interface{} + Index string + New string + Existing []string +} + +func (e *ErrIndexExists) Error() string { + return fmt.Sprintf("cannot insert %s in the %s table. item %s has identical indexes. index: %s, value: %v", e.New, e.Table, e.Existing, e.Index, e.Value) +} + +func NewIndexExistsError(table string, value interface{}, index string, new string, existing []string) *ErrIndexExists { + return &ErrIndexExists{ + table, value, index, new, existing, + } +} + +// map of unique values to uuids +type valueToUUIDs map[interface{}]uuidset + +// map of column name(s) to unique values, to UUIDs +type columnToValue map[index]valueToUUIDs + +// index is the type used to implement multiple cache indexes +type index string + +// indexType is the type of index +type indexType uint + +const ( + schemaIndexType indexType = iota + clientIndexType +) + +// indexSpec contains details about an index +type indexSpec struct { + index index + columns []model.ColumnKey + indexType indexType +} + +func (s indexSpec) isClientIndex() bool { + return s.indexType == clientIndexType +} + +func (s indexSpec) isSchemaIndex() bool { + return s.indexType == schemaIndexType +} + +// newIndex builds a index from a list of columns +func newIndexFromColumns(columns ...string) index { + sort.Strings(columns) + return index(strings.Join(columns, columnDelimiter)) +} + +// newIndexFromColumnKeys builds a index from a list of column keys +func newIndexFromColumnKeys(columnsKeys ...model.ColumnKey) index { + // RFC 7047 says that Indexes is a [] and "Each is a set of + // columns whose values, taken together within any given row, must be + // unique within the table". We'll store the column names, separated by comma + // as we'll assume (RFC is not clear), that comma isn't valid in a + columns := make([]string, 0, len(columnsKeys)) + columnsMap := map[string]struct{}{} + for _, columnKey := range columnsKeys { + var column string + if columnKey.Key != nil { + column = fmt.Sprintf("%s%s%v", columnKey.Column, keyDelimiter, columnKey.Key) + } else { + column = columnKey.Column + } + if _, found := columnsMap[column]; !found { + columns = append(columns, column) + columnsMap[column] = struct{}{} + } + } + return newIndexFromColumns(columns...) +} + +// newColumnKeysFromColumns builds a list of column keys from a list of columns +func newColumnKeysFromColumns(columns ...string) []model.ColumnKey { + columnKeys := make([]model.ColumnKey, len(columns)) + for i, column := range columns { + columnKeys[i] = model.ColumnKey{Column: column} + } + return columnKeys +} + +// RowCache is a collections of Models hashed by UUID +type RowCache struct { + name string + dbModel model.DatabaseModel + dataType reflect.Type + cache map[string]model.Model + indexSpecs []indexSpec + indexes columnToValue + mutex sync.RWMutex +} + +// rowByUUID returns one model from the cache by UUID. Caller must hold the row +// cache lock. +func (r *RowCache) rowByUUID(uuid string) model.Model { + if row, ok := r.cache[uuid]; ok { + return model.Clone(row) + } + return nil +} + +// Row returns one model from the cache by UUID +func (r *RowCache) Row(uuid string) model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.rowByUUID(uuid) +} + +func (r *RowCache) HasRow(uuid string) bool { + r.mutex.RLock() + defer r.mutex.RUnlock() + _, found := r.cache[uuid] + return found +} + +// rowsByModels searches the cache to find all rows matching any of the provided +// models, either by UUID or indexes. An error is returned if the model schema +// has no UUID field, or if the provided models are not all the same type. +func (r *RowCache) rowsByModels(models []model.Model, useClientIndexes bool) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + + results := make(map[string]model.Model, len(models)) + for _, m := range models { + if reflect.TypeOf(m) != r.dataType { + return nil, fmt.Errorf("model type %s didn't match expected row type %s", reflect.TypeOf(m), r.dataType) + } + info, _ := r.dbModel.NewModelInfo(m) + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil, err + } + if uuid := field.(string); uuid != "" { + if _, ok := results[uuid]; !ok { + if row := r.rowByUUID(uuid); row != nil { + results[uuid] = row + continue + } + } + } + + // indexSpecs are ordered, schema indexes go first, then client indexes + for _, indexSpec := range r.indexSpecs { + if indexSpec.isClientIndex() && !useClientIndexes { + // Given the ordered indexSpecs, we can break here if we reach the + // first client index + break + } + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[indexSpec.index] + if uuids, ok := vals[val]; ok { + for uuid := range uuids { + if _, ok := results[uuid]; !ok { + results[uuid] = r.rowByUUID(uuid) + } + } + // Break after handling the first found index + // to ensure we preserve index order preference + break + } + } + } + if len(results) == 0 { + return nil, nil + } + return results, nil +} + +// RowByModel searches the cache by UUID and schema indexes. UUID search is +// performed first. Then schema indexes are evaluated in turn by the same order +// with which they are defined in the schema. The model for the first matching +// index is returned along with its UUID. An empty string and nil is returned if +// no Model is found. +func (r *RowCache) RowByModel(m model.Model) (string, model.Model, error) { + models, err := r.rowsByModels([]model.Model{m}, false) + if err != nil { + return "", nil, err + } + for uuid, model := range models { + return uuid, model, nil + } + return "", nil, nil +} + +// RowsByModels searches the cache by UUID, schema indexes and client indexes. +// UUID search is performed first. Schema indexes are evaluated next in turn by +// the same order with which they are defined in the schema. Finally, client +// indexes are evaluated in turn by the same order with which they are defined +// in the client DB model. The models for the first matching index are returned, +// which might be more than 1 if they were found through a client index since in +// that case uniqueness is not enforced. Nil is returned if no Model is found. +func (r *RowCache) RowsByModels(models []model.Model) (map[string]model.Model, error) { + return r.rowsByModels(models, true) +} + +// Create writes the provided content to the cache +func (r *RowCache) Create(uuid string, m model.Model, checkIndexes bool) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot create row %s as it already exists", uuid)) + } + if reflect.TypeOf(m) != r.dataType { + return fmt.Errorf("expected data of type %s, but got %s", r.dataType.String(), reflect.TypeOf(m).String()) + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return err + } + addIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + return err + } + + uuidset := newUUIDSet(uuid) + + vals := r.indexes[index] + existing := vals[val] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + return NewIndexExistsError(r.name, val, string(index), uuid, existing.list()) + } + + addIndexes[index][val] = uuidset + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + } + + r.cache[uuid] = model.Clone(m) + return nil +} + +// Update updates the content in the cache and returns the original (pre-update) model +func (r *RowCache) Update(uuid string, m model.Model, checkIndexes bool) (model.Model, error) { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return nil, NewErrCacheInconsistent(fmt.Sprintf("cannot update row %s as it does not exist in the cache", uuid)) + } + oldRow := model.Clone(r.cache[uuid]) + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return nil, err + } + newInfo, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + + addIndexes := r.newIndexes() + removeIndexes := r.newIndexes() + var errs []error + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + var err error + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return nil, err + } + newVal, err := valueFromIndex(newInfo, indexSpec.columns) + if err != nil { + return nil, err + } + + // if old and new values are the same, don't worry + if oldVal == newVal { + continue + } + // old and new values are NOT the same + + uuidset := newUUIDSet(uuid) + + // check that there are no conflicts + vals := r.indexes[index] + existing := vals[newVal] + if checkIndexes && indexSpec.isSchemaIndex() && !existing.empty() && !existing.equals(uuidset) { + errs = append(errs, NewIndexExistsError( + r.name, + newVal, + string(index), + uuid, + existing.list(), + )) + } + + addIndexes[index][newVal] = uuidset + removeIndexes[index][oldVal] = uuidset + } + if len(errs) > 0 { + return nil, fmt.Errorf("%+v", errs) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range addIndexes[index] { + if indexSpec.isSchemaIndex() { + r.indexes[index][k] = v + } else { + r.indexes[index][k] = addUUIDSet(r.indexes[index][k], v) + } + } + for k, v := range removeIndexes[index] { + if indexSpec.isSchemaIndex() || substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + r.cache[uuid] = model.Clone(m) + return oldRow, nil +} + +// IndexExists checks if any of the schema indexes of the provided model is +// already in the cache under a different UUID. +func (r *RowCache) IndexExists(row model.Model) error { + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + field, err := info.FieldByColumn("_uuid") + if err != nil { + return nil + } + uuid := field.(string) + for _, indexSpec := range r.indexSpecs { + if !indexSpec.isSchemaIndex() { + // Given the ordered indexSpecs, we can break here if we reach the + // first non schema index + break + } + index := indexSpec.index + val, err := valueFromIndex(info, indexSpec.columns) + if err != nil { + continue + } + vals := r.indexes[index] + existing := vals[val] + if !existing.empty() && !existing.equals(newUUIDSet(uuid)) { + return NewIndexExistsError( + r.name, + val, + string(index), + uuid, + existing.list(), + ) + } + } + return nil +} + +// Delete deletes a row from the cache +func (r *RowCache) Delete(uuid string) error { + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.cache[uuid]; !ok { + return NewErrCacheInconsistent(fmt.Sprintf("cannot delete row %s as it does not exist in the cache", uuid)) + } + oldRow := r.cache[uuid] + oldInfo, err := r.dbModel.NewModelInfo(oldRow) + if err != nil { + return err + } + + removeIndexes := r.newIndexes() + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + oldVal, err := valueFromIndex(oldInfo, indexSpec.columns) + if err != nil { + return err + } + + removeIndexes[index][oldVal] = newUUIDSet(uuid) + } + + // write indexes + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + for k, v := range removeIndexes[index] { + // only remove the index if it is pointing to this uuid + // otherwise we can cause a consistency issue if we've processed + // updates out of order + if substractUUIDSet(r.indexes[index][k], v).empty() { + delete(r.indexes[index], k) + } + } + } + + delete(r.cache, uuid) + return nil +} + +// Rows returns a copy of all Rows in the Cache +func (r *RowCache) Rows() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + result := make(map[string]model.Model) + for k, v := range r.cache { + result[k] = model.Clone(v) + } + return result +} + +// RowsShallow returns a clone'd list of f all Rows in the cache, but does not +// clone the underlying objects. Therefore, the objects returned are READ ONLY. +// This is, however, thread safe, as the cached objects are cloned before being updated +// when modifications come in. +func (r *RowCache) RowsShallow() map[string]model.Model { + r.mutex.RLock() + defer r.mutex.RUnlock() + + result := make(map[string]model.Model, len(r.cache)) + for k, v := range r.cache { + result[k] = v + } + return result +} + +// uuidsByConditionsAsIndexes checks possible indexes that can be built with a +// subset of the provided conditions and returns the uuids for the models that +// match that subset of conditions. If no conditions could be used as indexes, +// returns nil. Note that this method does not necessarily match all the +// provided conditions. Thus the caller is required to evaluate all the +// conditions against the returned candidates. This is only useful to obtain, as +// quick as possible, via indexes, a reduced list of candidate models that might +// match all conditions, which should be better than just evaluating all +// conditions against all rows of a table. +// +//nolint:gocyclo // warns overall function is complex but ignores inner functions +func (r *RowCache) uuidsByConditionsAsIndexes(conditions []ovsdb.Condition, nativeValues []interface{}) (uuidset, error) { + type indexableCondition struct { + column string + keys []interface{} + nativeValue interface{} + } + + // build an indexable condition, more appropriate for our processing, from + // an ovsdb condition. Only equality based conditions can be used as indexes + // (or `includes` conditions on map values). + toIndexableCondition := func(condition ovsdb.Condition, nativeValue interface{}) *indexableCondition { + if condition.Column == "_uuid" { + return nil + } + if condition.Function != ovsdb.ConditionEqual && condition.Function != ovsdb.ConditionIncludes { + return nil + } + v := reflect.ValueOf(nativeValue) + if !v.IsValid() { + return nil + } + isSet := v.Kind() == reflect.Slice || v.Kind() == reflect.Array + if condition.Function == ovsdb.ConditionIncludes && isSet { + return nil + } + keys := []interface{}{} + if v.Kind() == reflect.Map && condition.Function == ovsdb.ConditionIncludes { + for _, key := range v.MapKeys() { + keys = append(keys, key.Interface()) + } + } + return &indexableCondition{ + column: condition.Column, + keys: keys, + nativeValue: nativeValue, + } + } + + // for any given set of conditions, we need to check if an index uses the + // same fields as the conditions + indexMatchesConditions := func(spec indexSpec, conditions []*indexableCondition) bool { + columnKeys := []model.ColumnKey{} + for _, condition := range conditions { + if len(condition.keys) == 0 { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column}) + continue + } + for _, key := range condition.keys { + columnKeys = append(columnKeys, model.ColumnKey{Column: condition.column, Key: key}) + } + } + index := newIndexFromColumnKeys(columnKeys...) + return index == spec.index + } + + // for a specific set of conditions, check if an index can be built from + // them and return the associated UUIDs + evaluateConditionSetAsIndex := func(conditions []*indexableCondition) (uuidset, error) { + // build a model with the values from the conditions + m, err := r.dbModel.NewModel(r.name) + if err != nil { + return nil, err + } + info, err := r.dbModel.NewModelInfo(m) + if err != nil { + return nil, err + } + for _, conditions := range conditions { + err := info.SetField(conditions.column, conditions.nativeValue) + if err != nil { + return nil, err + } + } + for _, spec := range r.indexSpecs { + if !indexMatchesConditions(spec, conditions) { + continue + } + // if we have an index for those conditions, calculate the index + // value. The models mapped to that value match the conditions. + v, err := valueFromIndex(info, spec.columns) + if err != nil { + return nil, err + } + if v != nil { + uuids := r.indexes[spec.index][v] + if uuids == nil { + // this set of conditions was represented by an index but + // had no matches, return an empty set + uuids = uuidset{} + } + return uuids, nil + } + } + return nil, nil + } + + // set of uuids that match the conditions as we evaluate them + var matching uuidset + + // attempt to evaluate a set of conditions via indexes and intersect the + // results against matches of previous sets + intersectUUIDsFromConditionSet := func(indexableConditions []*indexableCondition) (bool, error) { + uuids, err := evaluateConditionSetAsIndex(indexableConditions) + if err != nil { + return true, err + } + if matching == nil { + matching = uuids + } else if uuids != nil { + matching = intersectUUIDSets(matching, uuids) + } + if matching != nil && len(matching) <= 1 { + // if we had no matches or a single match, no point in continuing + // searching for additional indexes. If we had a single match, it's + // cheaper to just evaluate all conditions on it. + return true, nil + } + return false, nil + } + + // First, filter out conditions that cannot be matched against indexes. With + // the remaining conditions build all possible subsets (the power set of all + // conditions) and for any subset that is an index, intersect the obtained + // uuids with the ones obtained from previous subsets + matchUUIDsFromConditionsPowerSet := func() error { + ps := [][]*indexableCondition{} + // prime the power set with a first empty subset + ps = append(ps, []*indexableCondition{}) + for i, condition := range conditions { + nativeValue := nativeValues[i] + iCondition := toIndexableCondition(condition, nativeValue) + // this is not a condition we can use as an index, skip it + if iCondition == nil { + continue + } + // the power set is built appending the subsets that result from + // adding each item to each of the previous subsets + ss := make([][]*indexableCondition, len(ps)) + for j := range ss { + ss[j] = make([]*indexableCondition, len(ps[j]), len(ps[j])+1) + copy(ss[j], ps[j]) + ss[j] = append(ss[j], iCondition) + // as we add them to the power set, attempt to evaluate this + // subset of conditions as indexes + stop, err := intersectUUIDsFromConditionSet(ss[j]) + if stop || err != nil { + return err + } + } + ps = append(ps, ss...) + } + return nil + } + + // finally + err := matchUUIDsFromConditionsPowerSet() + return matching, err +} + +// RowsByCondition searches models in the cache that match all conditions +func (r *RowCache) RowsByCondition(conditions []ovsdb.Condition) (map[string]model.Model, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + results := make(map[string]model.Model) + schema := r.dbModel.Schema.Table(r.name) + + // no conditions matches all rows + if len(conditions) == 0 { + for uuid := range r.cache { + results[uuid] = r.rowByUUID(uuid) + } + return results, nil + } + + // one pass to obtain the native values + nativeValues := make([]interface{}, 0, len(conditions)) + for _, condition := range conditions { + tSchema := schema.Column(condition.Column) + nativeValue, err := ovsdb.OvsToNative(tSchema, condition.Value) + if err != nil { + return nil, err + } + nativeValues = append(nativeValues, nativeValue) + } + + // obtain all possible matches using conditions as indexes + matching, err := r.uuidsByConditionsAsIndexes(conditions, nativeValues) + if err != nil { + return nil, err + } + + // From the matches obtained with indexes, which might have not used all + // conditions, continue trimming down the list explicitly evaluating the + // conditions. + for i, condition := range conditions { + matchingCondition := uuidset{} + + if condition.Column == "_uuid" && (condition.Function == ovsdb.ConditionEqual || condition.Function == ovsdb.ConditionIncludes) { + uuid, ok := nativeValues[i].(string) + if !ok { + panic(fmt.Sprintf("%+v is not a uuid", nativeValues[i])) + } + if _, found := r.cache[uuid]; found { + matchingCondition.add(uuid) + } + } else { + matchCondition := func(uuid string) error { + row := r.cache[uuid] + info, err := r.dbModel.NewModelInfo(row) + if err != nil { + return err + } + value, err := info.FieldByColumn(condition.Column) + if err != nil { + return err + } + ok, err := condition.Function.Evaluate(value, nativeValues[i]) + if err != nil { + return err + } + if ok { + matchingCondition.add(uuid) + } + return nil + } + if matching != nil { + // we just need to consider rows that matched previous + // conditions + for uuid := range matching { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } else { + // If this is the first condition we are able to check, just run + // it by whole table + for uuid := range r.cache { + err = matchCondition(uuid) + if err != nil { + return nil, err + } + } + } + } + if matching == nil { + matching = matchingCondition + } else { + matching = intersectUUIDSets(matching, matchingCondition) + } + if matching.empty() { + // no models match the conditions checked up to now, no need to + // check remaining conditions + break + } + } + + for uuid := range matching { + results[uuid] = r.rowByUUID(uuid) + } + + return results, nil +} + +// Len returns the length of the cache +func (r *RowCache) Len() int { + r.mutex.RLock() + defer r.mutex.RUnlock() + return len(r.cache) +} + +func (r *RowCache) Index(columns ...string) (map[interface{}][]string, error) { + r.mutex.RLock() + defer r.mutex.RUnlock() + spec := newIndexFromColumns(columns...) + index, ok := r.indexes[spec] + if !ok { + return nil, fmt.Errorf("%v is not an index", columns) + } + dbIndex := make(map[interface{}][]string, len(index)) + for k, v := range index { + dbIndex[k] = v.list() + } + return dbIndex, nil +} + +// EventHandler can handle events when the contents of the cache changes +type EventHandler interface { + OnAdd(table string, model model.Model) + OnUpdate(table string, old model.Model, new model.Model) + OnDelete(table string, model model.Model) +} + +// EventHandlerFuncs is a wrapper for the EventHandler interface +// It allows a caller to only implement the functions they need +type EventHandlerFuncs struct { + AddFunc func(table string, model model.Model) + UpdateFunc func(table string, old model.Model, new model.Model) + DeleteFunc func(table string, model model.Model) +} + +// OnAdd calls AddFunc if it is not nil +func (e *EventHandlerFuncs) OnAdd(table string, model model.Model) { + if e.AddFunc != nil { + e.AddFunc(table, model) + } +} + +// OnUpdate calls UpdateFunc if it is not nil +func (e *EventHandlerFuncs) OnUpdate(table string, old, new model.Model) { + if e.UpdateFunc != nil { + e.UpdateFunc(table, old, new) + } +} + +// OnDelete calls DeleteFunc if it is not nil +func (e *EventHandlerFuncs) OnDelete(table string, row model.Model) { + if e.DeleteFunc != nil { + e.DeleteFunc(table, row) + } +} + +// TableCache contains a collection of RowCaches, hashed by name, +// and an array of EventHandlers that respond to cache updates +// It implements the ovsdb.NotificationHandler interface so it may +// handle update notifications +type TableCache struct { + cache map[string]*RowCache + eventProcessor *eventProcessor + dbModel model.DatabaseModel + ovsdb.NotificationHandler + mutex sync.RWMutex + logger *logr.Logger +} + +// Data is the type for data that can be prepopulated in the cache +type Data map[string]map[string]model.Model + +// NewTableCache creates a new TableCache +func NewTableCache(dbModel model.DatabaseModel, data Data, logger *logr.Logger) (*TableCache, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("tablecache without valid databasemodel cannot be populated") + } + if logger == nil { + l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("cache") + logger = &l + } else { + l := logger.WithName("cache") + logger = &l + } + eventProcessor := newEventProcessor(bufferSize, logger) + cache := make(map[string]*RowCache) + tableTypes := dbModel.Types() + for name := range dbModel.Schema.Tables { + cache[name] = newRowCache(name, dbModel, tableTypes[name]) + } + for table, rowData := range data { + if _, ok := dbModel.Schema.Tables[table]; !ok { + return nil, fmt.Errorf("table %s is not in schema", table) + } + rowCache := cache[table] + for uuid, row := range rowData { + if err := rowCache.Create(uuid, row, true); err != nil { + return nil, err + } + } + } + return &TableCache{ + cache: cache, + eventProcessor: eventProcessor, + dbModel: dbModel, + mutex: sync.RWMutex{}, + logger: logger, + }, nil +} + +// Mapper returns the mapper +func (t *TableCache) Mapper() mapper.Mapper { + return t.dbModel.Mapper +} + +// DatabaseModel returns the DatabaseModelRequest +func (t *TableCache) DatabaseModel() model.DatabaseModel { + return t.dbModel +} + +// Table returns the a Table from the cache with a given name +func (t *TableCache) Table(name string) *RowCache { + t.mutex.RLock() + defer t.mutex.RUnlock() + if table, ok := t.cache[name]; ok { + return table + } + return nil +} + +// Tables returns a list of table names that are in the cache +func (t *TableCache) Tables() []string { + t.mutex.RLock() + defer t.mutex.RUnlock() + var result []string + for k := range t.cache { + result = append(result, k) + } + return result +} + +// Update implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update(context interface{}, tableUpdates ovsdb.TableUpdates) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate") + return err + } + return nil +} + +// Update2 implements the update method of the NotificationHandler interface +// this populates a channel with updates so they can be processed after the initial +// state has been Populated +func (t *TableCache) Update2(context interface{}, tableUpdates ovsdb.TableUpdates2) error { + if len(tableUpdates) == 0 { + return nil + } + if err := t.Populate2(tableUpdates); err != nil { + t.logger.Error(err, "during libovsdb cache populate2") + return err + } + return nil +} + +// Locked implements the locked method of the NotificationHandler interface +func (t *TableCache) Locked([]interface{}) { +} + +// Stolen implements the stolen method of the NotificationHandler interface +func (t *TableCache) Stolen([]interface{}) { +} + +// Echo implements the echo method of the NotificationHandler interface +func (t *TableCache) Echo([]interface{}) { +} + +// Disconnected implements the disconnected method of the NotificationHandler interface +func (t *TableCache) Disconnected() { +} + +// Populate adds data to the cache and places an event on the channel +func (t *TableCache) Populate(tableUpdates ovsdb.TableUpdates) error { + t.mutex.Lock() + defer t.mutex.Unlock() + + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + err := update.AddRowUpdate(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Populate2 adds data to the cache and places an event on the channel +func (t *TableCache) Populate2(tableUpdates ovsdb.TableUpdates2) error { + t.mutex.Lock() + defer t.mutex.Unlock() + for table := range t.dbModel.Types() { + tu, ok := tableUpdates[table] + if !ok { + continue + } + tCache := t.cache[table] + for uuid, row := range tu { + t.logger.V(5).Info("processing update", "table", table, "uuid", uuid) + update := updates.ModelUpdates{} + current := tCache.cache[uuid] + if row.Initial == nil && row.Insert == nil && current == nil { + return NewErrCacheInconsistent(fmt.Sprintf("row with uuid %s does not exist", uuid)) + } + err := update.AddRowUpdate2(t.dbModel, table, uuid, current, *row) + if err != nil { + return err + } + err = t.ApplyCacheUpdate(update) + if err != nil { + return err + } + } + } + return nil +} + +// Purge drops all data in the cache and reinitializes it using the +// provided database model +func (t *TableCache) Purge(dbModel model.DatabaseModel) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.dbModel = dbModel + tableTypes := t.dbModel.Types() + for name := range t.dbModel.Schema.Tables { + t.cache[name] = newRowCache(name, t.dbModel, tableTypes[name]) + } +} + +// AddEventHandler registers the supplied EventHandler to receive cache events +func (t *TableCache) AddEventHandler(handler EventHandler) { + t.eventProcessor.AddEventHandler(handler) +} + +// Run starts the event processing and update processing loops. +// It blocks until the stop channel is closed. +// Once closed, it clears the updates/updates2 channels to ensure we don't process stale updates on a new connection +func (t *TableCache) Run(stopCh <-chan struct{}) { + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + t.eventProcessor.Run(stopCh) + }() + wg.Wait() +} + +// newRowCache creates a new row cache with the provided data +// if the data is nil, and empty RowCache will be created +func newRowCache(name string, dbModel model.DatabaseModel, dataType reflect.Type) *RowCache { + schemaIndexes := dbModel.Schema.Table(name).Indexes + clientIndexes := dbModel.Client().Indexes(name) + + r := &RowCache{ + name: name, + dbModel: dbModel, + indexSpecs: make([]indexSpec, 0, len(schemaIndexes)+len(clientIndexes)), + dataType: dataType, + cache: make(map[string]model.Model), + mutex: sync.RWMutex{}, + } + + // respect the order of indexes, add first schema indexes, then client + // indexes + indexes := map[index]indexSpec{} + for _, columns := range schemaIndexes { + columnKeys := newColumnKeysFromColumns(columns...) + index := newIndexFromColumnKeys(columnKeys...) + spec := indexSpec{index: index, columns: columnKeys, indexType: schemaIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + for _, clientIndex := range clientIndexes { + columnKeys := clientIndex.Columns + index := newIndexFromColumnKeys(columnKeys...) + // if this is already a DB index, ignore + if _, ok := indexes[index]; ok { + continue + } + spec := indexSpec{index: index, columns: columnKeys, indexType: clientIndexType} + r.indexSpecs = append(r.indexSpecs, spec) + indexes[index] = spec + } + + r.indexes = r.newIndexes() + return r +} + +func (r *RowCache) newIndexes() columnToValue { + c := make(columnToValue) + for _, indexSpec := range r.indexSpecs { + index := indexSpec.index + c[index] = make(valueToUUIDs) + } + return c +} + +// event encapsulates a cache event +type event struct { + eventType string + table string + old model.Model + new model.Model +} + +// eventProcessor handles the queueing and processing of cache events +type eventProcessor struct { + events chan *event + // handlersMutex locks the handlers array when we add a handler or dispatch events + // we don't need a RWMutex in this case as we only have one thread reading and the write + // volume is very low (i.e only when AddEventHandler is called) + handlersMutex sync.Mutex + handlers []EventHandler + logger *logr.Logger +} + +func newEventProcessor(capacity int, logger *logr.Logger) *eventProcessor { + return &eventProcessor{ + events: make(chan *event, capacity), + handlers: []EventHandler{}, + logger: logger, + } +} + +// AddEventHandler registers the supplied EventHandler with the eventProcessor +// EventHandlers MUST process events quickly, for example, pushing them to a queue +// to be processed by the client. Long Running handler functions adversely affect +// other handlers and MAY cause loss of data if the channel buffer is full +func (e *eventProcessor) AddEventHandler(handler EventHandler) { + e.handlersMutex.Lock() + defer e.handlersMutex.Unlock() + e.handlers = append(e.handlers, handler) +} + +// AddEvent writes an event to the channel +func (e *eventProcessor) AddEvent(eventType string, table string, old model.Model, new model.Model) { + // We don't need to check for error here since there + // is only a single writer. RPC is run in blocking mode + event := event{ + eventType: eventType, + table: table, + old: old, + new: new, + } + select { + case e.events <- &event: + // noop + return + default: + e.logger.V(0).Info("dropping event because event buffer is full") + } +} + +// Run runs the eventProcessor loop. +// It will block until the stopCh has been closed +// Otherwise it will wait for events to arrive on the event channel +// Once received, it will dispatch the event to each registered handler +func (e *eventProcessor) Run(stopCh <-chan struct{}) { + for { + select { + case <-stopCh: + return + case event := <-e.events: + e.handlersMutex.Lock() + for _, handler := range e.handlers { + switch event.eventType { + case addEvent: + handler.OnAdd(event.table, event.new) + case updateEvent: + handler.OnUpdate(event.table, event.old, event.new) + case deleteEvent: + handler.OnDelete(event.table, event.old) + } + } + e.handlersMutex.Unlock() + } + } +} + +type cacheUpdate interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error +} + +func (t *TableCache) ApplyCacheUpdate(update cacheUpdate) error { + tables := update.GetUpdatedTables() + for _, table := range tables { + tCache := t.cache[table] + err := update.ForEachModelUpdate(table, func(uuid string, old, new model.Model) error { + switch { + case old == nil && new != nil: + t.logger.V(5).Info("inserting model", "table", table, "uuid", uuid, "model", new) + err := tCache.Create(uuid, new, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(addEvent, table, nil, new) + case old != nil && new != nil: + t.logger.V(5).Info("updating model", "table", table, "uuid", uuid, "old", old, "new", new) + _, err := tCache.Update(uuid, new, false) + if err != nil { + return err + } + t.eventProcessor.AddEvent(updateEvent, table, old, new) + case new == nil: + t.logger.V(5).Info("deleting model", "table", table, "uuid", uuid, "model", old) + err := tCache.Delete(uuid) + if err != nil { + return err + } + t.eventProcessor.AddEvent(deleteEvent, table, old, nil) + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func valueFromIndex(info *mapper.Info, columnKeys []model.ColumnKey) (interface{}, error) { + if len(columnKeys) > 1 { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + for _, columnKey := range columnKeys { + val, err := valueFromColumnKey(info, columnKey) + if err != nil { + return "", err + } + // if object is nil dont try to encode it + value := reflect.ValueOf(val) + if value.Kind() == reflect.Invalid { + continue + } + // if object is a nil pointer dont try to encode it + if value.Kind() == reflect.Pointer && value.IsNil() { + continue + } + err = enc.Encode(val) + if err != nil { + return "", err + } + } + h := sha256.New() + val := hex.EncodeToString(h.Sum(buf.Bytes())) + return val, nil + } + val, err := valueFromColumnKey(info, columnKeys[0]) + if err != nil { + return "", err + } + return val, err +} + +func valueFromColumnKey(info *mapper.Info, columnKey model.ColumnKey) (interface{}, error) { + val, err := info.FieldByColumn(columnKey.Column) + if err != nil { + return nil, err + } + if columnKey.Key != nil { + val, err = valueFromMap(val, columnKey.Key) + if err != nil { + return "", fmt.Errorf("can't get key value from map: %v", err) + } + } + // if the value is a non-nil pointer of an optional, dereference + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr && !v.IsNil() { + val = v.Elem().Interface() + } + return val, err +} + +func valueFromMap(aMap interface{}, key interface{}) (interface{}, error) { + m := reflect.ValueOf(aMap) + if m.Kind() != reflect.Map { + return nil, fmt.Errorf("expected map but got %s", m.Kind()) + } + v := m.MapIndex(reflect.ValueOf(key)) + if !v.IsValid() { + // return the zero value for the map value type + return reflect.Indirect(reflect.New(m.Type().Elem())).Interface(), nil + } + + return v.Interface(), nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/vendor/github.com/ovn-org/libovsdb/cache/doc.go new file mode 100644 index 000000000..3b176f277 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/doc.go @@ -0,0 +1,16 @@ +/* +Package cache provides a cache of model.Model elements that can be used in an OVSDB client or server. + +The cache can be accessed using a simple API: + + cache.Table("Open_vSwitch").Row("") + +It implements the ovsdb.NotificationHandler interface +such that it can be populated automatically by +update notifications + +It also contains an eventProcessor where callers +may registers functions that will get called on +every Add/Update/Delete event. +*/ +package cache diff --git a/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go new file mode 100644 index 000000000..f7c139737 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go @@ -0,0 +1,101 @@ +package cache + +type void struct{} +type uuidset map[string]void + +func newUUIDSet(uuids ...string) uuidset { + s := uuidset{} + for _, uuid := range uuids { + s[uuid] = void{} + } + return s +} + +func (s uuidset) add(uuid string) { + s[uuid] = void{} +} + +func (s uuidset) remove(uuid string) { + delete(s, uuid) +} + +func (s uuidset) has(uuid string) bool { + _, ok := s[uuid] + return ok +} + +func (s uuidset) equals(o uuidset) bool { + if len(s) != len(o) { + return false + } + for uuid := range s { + if !o.has(uuid) { + return false + } + } + return true +} + +func (s uuidset) getAny() string { + for k := range s { + return k + } + return "" +} + +func (s uuidset) list() []string { + uuids := make([]string, 0, len(s)) + for uuid := range s { + uuids = append(uuids, uuid) + } + return uuids +} + +func (s uuidset) empty() bool { + return len(s) == 0 +} + +func addUUIDSet(s1, s2 uuidset) uuidset { + if len(s2) == 0 { + return s1 + } + if s1 == nil { + s1 = uuidset{} + } + for uuid := range s2 { + s1.add(uuid) + } + return s1 +} + +func substractUUIDSet(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return s1 + } + for uuid := range s2 { + s1.remove(uuid) + } + return s1 +} + +func intersectUUIDSets(s1, s2 uuidset) uuidset { + if len(s1) == 0 || len(s2) == 0 { + return nil + } + var big uuidset + var small uuidset + if len(s1) > len(s2) { + big = s1 + small = s2 + } else { + big = s2 + small = s1 + } + f := uuidset{} + for uuid := range small { + if big.has(uuid) { + f.add(uuid) + } + } + return f +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/api.go b/vendor/github.com/ovn-org/libovsdb/client/api.go new file mode 100644 index 000000000..497758944 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/api.go @@ -0,0 +1,593 @@ +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/go-logr/logr" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// API defines basic operations to interact with the database +type API interface { + // List populates a slice of Models objects based on their type + // The function parameter must be a pointer to a slice of Models + // Models can be structs or pointers to structs + // If the slice is null, the entire cache will be copied into the slice + // If it has a capacity != 0, only 'capacity' elements will be filled in + List(ctx context.Context, result interface{}) error + + // Create a Conditional API from a Function that is used to filter cached data + // The function must accept a Model implementation and return a boolean. E.g: + // ConditionFromFunc(func(l *LogicalSwitch) bool { return l.Enabled }) + WhereCache(predicate interface{}) ConditionalAPI + + // Create a ConditionalAPI from a Model's index data, where operations + // apply to elements that match the values provided in one or more + // model.Models according to the indexes. All provided Models must be + // the same type or an error will be generated when operations are + // are performed on the ConditionalAPI. + Where(...model.Model) ConditionalAPI + + // WhereAny creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match any (eg, logical OR) of the + // conditions. + WhereAny(model.Model, ...model.Condition) ConditionalAPI + + // WhereAll creates a ConditionalAPI from a list of Conditions where + // operations apply to elements that match all (eg, logical AND) of the + // conditions. + WhereAll(model.Model, ...model.Condition) ConditionalAPI + + // Get retrieves a model from the cache + // The way the object will be fetch depends on the data contained in the + // provided model and the indexes defined in the associated schema + // For more complex ways of searching for elements in the cache, the + // preferred way is Where({condition}).List() + Get(context.Context, model.Model) error + + // Create returns the operation needed to add the model(s) to the Database + // Only fields with non-default values will be added to the transaction. If + // the field associated with column "_uuid" has some content other than a + // UUID, it will be treated as named-uuid + Create(...model.Model) ([]ovsdb.Operation, error) +} + +// ConditionalAPI is an interface used to perform operations that require / use Conditions +type ConditionalAPI interface { + // List uses the condition to search on the cache and populates + // the slice of Models objects based on their type + List(ctx context.Context, result interface{}) error + + // Mutate returns the operations needed to perform the mutation specified + // By the model and the list of Mutation objects + // Depending on the Condition, it might return one or many operations + Mutate(model.Model, ...model.Mutation) ([]ovsdb.Operation, error) + + // Update returns the operations needed to update any number of rows according + // to the data in the given model. + // By default, all the non-default values contained in model will be updated. + // Optional fields can be passed (pointer to fields in the model) to select the + // the fields to be updated + Update(model.Model, ...interface{}) ([]ovsdb.Operation, error) + + // Delete returns the Operations needed to delete the models selected via the condition + Delete() ([]ovsdb.Operation, error) + + // Wait returns the operations needed to perform the wait specified + // by the until condition, timeout, row and columns based on provided parameters. + Wait(ovsdb.WaitCondition, *int, model.Model, ...interface{}) ([]ovsdb.Operation, error) +} + +// ErrWrongType is used to report the user provided parameter has the wrong type +type ErrWrongType struct { + inputType reflect.Type + reason string +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong parameter type (%s): %s", e.inputType, e.reason) +} + +// ErrNotFound is used to inform the object or table was not found in the cache +var ErrNotFound = errors.New("object not found") + +// api struct implements both API and ConditionalAPI +// Where() can be used to create a ConditionalAPI api +type api struct { + cache *cache.TableCache + cond Conditional + logger *logr.Logger +} + +// List populates a slice of Models given as parameter based on the configured Condition +func (a api) List(ctx context.Context, result interface{}) error { + resultPtr := reflect.ValueOf(result) + if resultPtr.Type().Kind() != reflect.Ptr { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + resultVal := reflect.Indirect(resultPtr) + if resultVal.Type().Kind() != reflect.Slice { + return &ErrWrongType{resultPtr.Type(), "Expected pointer to slice of valid Models"} + } + + // List accepts a slice of Models that can be either structs or pointer to + // structs + var appendValue func(reflect.Value) + var m model.Model + if resultVal.Type().Elem().Kind() == reflect.Ptr { + m = reflect.New(resultVal.Type().Elem().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, v)) + } + } else { + m = reflect.New(resultVal.Type().Elem()).Interface() + appendValue = func(v reflect.Value) { + resultVal.Set(reflect.Append(resultVal, reflect.Indirect(v))) + } + } + + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + if a.cond != nil && a.cond.Table() != table { + return &ErrWrongType{resultPtr.Type(), + fmt.Sprintf("Table derived from input type (%s) does not match Table from Condition (%s)", table, a.cond.Table())} + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + var rows map[string]model.Model + if a.cond != nil { + rows, err = a.cond.Matches() + if err != nil { + return err + } + } else { + rows = tableCache.Rows() + } + // If given a null slice, fill it in the cache table completely, if not, just up to + // its capability. + if resultVal.IsNil() || resultVal.Cap() == 0 { + resultVal.Set(reflect.MakeSlice(resultVal.Type(), 0, len(rows))) + } + i := resultVal.Len() + maxCap := resultVal.Cap() + + for _, row := range rows { + if i >= maxCap { + break + } + appendValue(reflect.ValueOf(row)) + i++ + } + + return nil +} + +// Where returns a conditionalAPI based on model indexes. All provided models +// must be the same type. +func (a api) Where(models ...model.Model) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromModels(models), a.logger) +} + +// WhereAny returns a conditionalAPI based on a Condition list that matches any +// of the conditions individually +func (a api) WhereAny(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(false, m, cond...), a.logger) +} + +// WhereAll returns a conditionalAPI based on a Condition list that matches all +// of the conditions together +func (a api) WhereAll(m model.Model, cond ...model.Condition) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromExplicitConditions(true, m, cond...), a.logger) +} + +// WhereCache returns a conditionalAPI based a Predicate +func (a api) WhereCache(predicate interface{}) ConditionalAPI { + return newConditionalAPI(a.cache, a.conditionFromFunc(predicate), a.logger) +} + +// Conditional interface implementation +// FromFunc returns a Condition from a function +func (a api) conditionFromFunc(predicate interface{}) Conditional { + table, err := a.getTableFromFunc(predicate) + if err != nil { + return newErrorConditional(err) + } + + condition, err := newPredicateConditional(table, a.cache, predicate) + if err != nil { + return newErrorConditional(err) + } + return condition +} + +// conditionFromModels returns a Conditional from one or more models. +func (a api) conditionFromModels(models []model.Model) Conditional { + if len(models) == 0 { + return newErrorConditional(fmt.Errorf("at least one model required")) + } + tableName, err := a.getTableFromModel(models[0]) + if tableName == "" { + return newErrorConditional(err) + } + conditional, err := newEqualityConditional(tableName, a.cache, models) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// conditionFromExplicitConditions returns a Conditional from a model and a set +// of explicit conditions. If matchAll is true, then models that match all the given +// conditions are selected by the Conditional. If matchAll is false, then any model +// that matches one of the conditions is selected. +func (a api) conditionFromExplicitConditions(matchAll bool, m model.Model, cond ...model.Condition) Conditional { + if len(cond) == 0 { + return newErrorConditional(fmt.Errorf("at least one condition is required")) + } + tableName, err := a.getTableFromModel(m) + if tableName == "" { + return newErrorConditional(err) + } + conditional, err := newExplicitConditional(tableName, a.cache, matchAll, m, cond...) + if err != nil { + return newErrorConditional(err) + } + return conditional +} + +// Get is a generic Get function capable of returning (through a provided pointer) +// a instance of any row in the cache. +// 'result' must be a pointer to an Model that exists in the ClientDBModel +// +// The way the cache is searched depends on the fields already populated in 'result' +// Any table index (including _uuid) will be used for comparison +func (a api) Get(ctx context.Context, m model.Model) error { + table, err := a.getTableFromModel(m) + if err != nil { + return err + } + + tableCache := a.cache.Table(table) + if tableCache == nil { + return ErrNotFound + } + + _, found, err := tableCache.RowByModel(m) + if err != nil { + return err + } else if found == nil { + return ErrNotFound + } + + model.CloneInto(found, m) + + return nil +} + +// Create is a generic function capable of creating any row in the DB +// A valid Model (pointer to object) must be provided. +func (a api) Create(models ...model.Model) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + + for _, model := range models { + var realUUID, namedUUID string + var err error + + tableName, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + // Read _uuid field, and use it as named-uuid + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + if uuid, err := info.FieldByColumn("_uuid"); err == nil { + tmpUUID := uuid.(string) + if ovsdb.IsNamedUUID(tmpUUID) { + namedUUID = tmpUUID + } else if ovsdb.IsValidUUID(tmpUUID) { + realUUID = tmpUUID + } + } else { + return nil, err + } + + row, err := a.cache.Mapper().NewRow(info) + if err != nil { + return nil, err + } + // UUID is given in the operation, not the object + delete(row, "_uuid") + + operations = append(operations, ovsdb.Operation{ + Op: ovsdb.OperationInsert, + Table: tableName, + Row: row, + UUID: realUUID, + UUIDName: namedUUID, + }) + } + return operations, nil +} + +// Mutate returns the operations needed to transform the one Model into another one +func (a api) Mutate(model model.Model, mutationObjs ...model.Mutation) ([]ovsdb.Operation, error) { + var mutations []ovsdb.Mutation + var operations []ovsdb.Operation + + if len(mutationObjs) < 1 { + return nil, fmt.Errorf("at least one Mutation must be provided") + } + + tableName := a.cache.DatabaseModel().FindTable(reflect.ValueOf(model).Type()) + if tableName == "" { + return nil, fmt.Errorf("table not found for object") + } + table := a.cache.Mapper().Schema.Table(tableName) + if table == nil { + return nil, fmt.Errorf("schema error: table not found in Database Model for type %s", reflect.TypeOf(model)) + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + for _, mobj := range mutationObjs { + col, err := info.ColumnByPtr(mobj.Field) + if err != nil { + return nil, err + } + + mutation, err := a.cache.Mapper().NewMutation(info, col, mobj.Mutator, mobj.Value) + if err != nil { + return nil, err + } + mutations = append(mutations, *mutation) + } + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: tableName, + Mutations: mutations, + Where: condition, + }, + ) + } + + return operations, nil +} + +// Update is a generic function capable of updating any mutable field in any row in the database +// Additional fields can be passed (variadic opts) to indicate fields to be updated +// All immutable fields will be ignored +func (a api) Update(model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + table, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + tableSchema := a.cache.Mapper().Schema.Table(table) + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + if len(fields) > 0 { + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + if !tableSchema.Columns[colName].Mutable() { + return nil, fmt.Errorf("unable to update field %s of table %s as it is not mutable", colName, table) + } + } + } + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + + for colName, column := range tableSchema.Columns { + if !column.Mutable() { + a.logger.V(2).Info("removing immutable field", "name", colName) + delete(row, colName) + } + } + delete(row, "_uuid") + + if len(row) == 0 { + return nil, fmt.Errorf("attempted to update using an empty row. please check that all fields you wish to update are mutable") + } + + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: table, + Row: row, + Where: condition, + }, + ) + } + return operations, nil +} + +// Delete returns the Operation needed to delete the selected models from the database +func (a api) Delete() ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + for _, condition := range conditions { + operations = append(operations, + ovsdb.Operation{ + Op: ovsdb.OperationDelete, + Table: a.cond.Table(), + Where: condition, + }, + ) + } + + return operations, nil +} + +func (a api) Wait(untilConFun ovsdb.WaitCondition, timeout *int, model model.Model, fields ...interface{}) ([]ovsdb.Operation, error) { + var operations []ovsdb.Operation + + /* + Ref: https://datatracker.ietf.org/doc/html/rfc7047.txt#section-5.2.6 + + lb := &nbdb.LoadBalancer{} + condition := model.Condition{ + Field: &lb.Name, + Function: ovsdb.ConditionEqual, + Value: "lbName", + } + timeout0 := 0 + client.Where(lb, condition).Wait( + ovsdb.WaitConditionNotEqual, // Until + &timeout0, // Timeout + &lb, // Row (and Table) + &lb.Name, // Cols (aka fields) + ) + */ + + conditions, err := a.cond.Generate() + if err != nil { + return nil, err + } + + table, err := a.getTableFromModel(model) + if err != nil { + return nil, err + } + + info, err := a.cache.DatabaseModel().NewModelInfo(model) + if err != nil { + return nil, err + } + + var columnNames []string + if len(fields) > 0 { + columnNames = make([]string, 0, len(fields)) + for _, f := range fields { + colName, err := info.ColumnByPtr(f) + if err != nil { + return nil, err + } + columnNames = append(columnNames, colName) + } + } + + row, err := a.cache.Mapper().NewRow(info, fields...) + if err != nil { + return nil, err + } + rows := []ovsdb.Row{row} + + for _, condition := range conditions { + operation := ovsdb.Operation{ + Op: ovsdb.OperationWait, + Table: table, + Where: condition, + Until: string(untilConFun), + Columns: columnNames, + Rows: rows, + } + + if timeout != nil { + operation.Timeout = timeout + } + + operations = append(operations, operation) + } + + return operations, nil +} + +// getTableFromModel returns the table name from a Model object after performing +// type verifications on the model +func (a api) getTableFromModel(m interface{}) (string, error) { + if _, ok := m.(model.Model); !ok { + return "", &ErrWrongType{reflect.TypeOf(m), "Type does not implement Model interface"} + } + table := a.cache.DatabaseModel().FindTable(reflect.TypeOf(m)) + if table == "" { + return "", &ErrWrongType{reflect.TypeOf(m), "Model not found in Database Model"} + } + return table, nil +} + +// getTableFromModel returns the table name from a the predicate after performing +// type verifications +func (a api) getTableFromFunc(predicate interface{}) (string, error) { + predType := reflect.TypeOf(predicate) + if predType == nil || predType.Kind() != reflect.Func { + return "", &ErrWrongType{predType, "Expected function"} + } + if predType.NumIn() != 1 || predType.NumOut() != 1 || predType.Out(0).Kind() != reflect.Bool { + return "", &ErrWrongType{predType, "Expected func(Model) bool"} + } + + modelInterface := reflect.TypeOf((*model.Model)(nil)).Elem() + modelType := predType.In(0) + if !modelType.Implements(modelInterface) { + return "", &ErrWrongType{predType, + fmt.Sprintf("Type %s does not implement Model interface", modelType.String())} + } + + table := a.cache.DatabaseModel().FindTable(modelType) + if table == "" { + return "", &ErrWrongType{predType, + fmt.Sprintf("Model %s not found in Database Model", modelType.String())} + } + return table, nil +} + +// newAPI returns a new API to interact with the database +func newAPI(cache *cache.TableCache, logger *logr.Logger) API { + return api{ + cache: cache, + logger: logger, + } +} + +// newConditionalAPI returns a new ConditionalAPI to interact with the database +func newConditionalAPI(cache *cache.TableCache, cond Conditional, logger *logr.Logger) ConditionalAPI { + return api{ + cache: cache, + cond: cond, + logger: logger, + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go new file mode 100644 index 000000000..36ea476e0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go @@ -0,0 +1,167 @@ +package client + +import ( + "encoding/json" + "testing" + + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/stretchr/testify/assert" +) + +var apiTestSchema = []byte(`{ + "name": "OVN_Northbound", + "version": "5.31.0", + "cksum": "2352750632 28701", + "tables": { + "Logical_Switch": { + "columns": { + "name": {"type": "string"}, + "ports": {"type": {"key": {"type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "acls": {"type": {"key": {"type": "uuid", + "refTable": "ACL", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "qos_rules": {"type": {"key": {"type": "uuid", + "refTable": "QoS", + "refType": "strong"}, + "min": 0, + "max": "unlimited"}}, + "load_balancer": {"type": {"key": {"type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "dns_records": {"type": {"key": {"type": "uuid", + "refTable": "DNS", + "refType": "weak"}, + "min": 0, + "max": "unlimited"}}, + "other_config": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}, + "forwarding_groups": { + "type": {"key": {"type": "uuid", + "refTable": "Forwarding_Group", + "refType": "strong"}, + "min": 0, "max": "unlimited"}}}, + "isRoot": true}, + "Logical_Switch_Port": { + "columns": { + "name": {"type": "string"}, + "type": {"type": "string"}, + "options": { + "type": {"key": "string", + "value": "string", + "min": 0, + "max": "unlimited"}}, + "parent_name": {"type": {"key": "string", "min": 0, "max": 1}}, + "tag_request": { + "type": {"key": {"type": "integer", + "minInteger": 0, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "tag": { + "type": {"key": {"type": "integer", + "minInteger": 1, + "maxInteger": 4095}, + "min": 0, "max": 1}}, + "addresses": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "dynamic_addresses": {"type": {"key": "string", + "min": 0, + "max": 1}}, + "port_security": {"type": {"key": "string", + "min": 0, + "max": "unlimited"}}, + "up": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "enabled": {"type": {"key": "boolean", "min": 0, "max": 1}}, + "dhcpv4_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "dhcpv6_options": {"type": {"key": {"type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak"}, + "min": 0, + "max": 1}}, + "ha_chassis_group": { + "type": {"key": {"type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong"}, + "min": 0, + "max": 1}}, + "external_ids": { + "type": {"key": "string", "value": "string", + "min": 0, "max": "unlimited"}}}, + "indexes": [["name"]], + "isRoot": false} + } + }`) + +type testLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` + Ports []string `ovsdb:"ports"` + ExternalIds map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + QosRules []string `ovsdb:"qos_rules"` + LoadBalancer []string `ovsdb:"load_balancer"` + DNSRecords []string `ovsdb:"dns_records"` + OtherConfig map[string]string `ovsdb:"other_config"` + ForwardingGroups []string `ovsdb:"forwarding_groups"` + Acls []string `ovsdb:"acls"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitch) Table() string { + return "Logical_Switch" +} + +//LogicalSwitchPort struct defines an object in Logical_Switch_Port table +type testLogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Up *bool `ovsdb:"up"` + Dhcpv4Options *string `ovsdb:"dhcpv4_options"` + Name string `ovsdb:"name"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + Options map[string]string `ovsdb:"options"` + Enabled *bool `ovsdb:"enabled"` + Addresses []string `ovsdb:"addresses"` + Dhcpv6Options *string `ovsdb:"dhcpv6_options"` + TagRequest *int `ovsdb:"tag_request"` + Tag *int `ovsdb:"tag"` + PortSecurity []string `ovsdb:"port_security"` + ExternalIds map[string]string `ovsdb:"external_ids"` + Type string `ovsdb:"type"` + ParentName *string `ovsdb:"parent_name"` +} + +// Table returns the table name. It's part of the Model interface +func (*testLogicalSwitchPort) Table() string { + return "Logical_Switch_Port" +} + +func apiTestCache(t testing.TB, data map[string]map[string]model.Model) *cache.TableCache { + var schema ovsdb.DatabaseSchema + err := json.Unmarshal(apiTestSchema, &schema) + assert.Nil(t, err) + db, err := model.NewClientDBModel("OVN_Northbound", map[string]model.Model{"Logical_Switch": &testLogicalSwitch{}, "Logical_Switch_Port": &testLogicalSwitchPort{}}) + assert.Nil(t, err) + dbModel, errs := model.NewDatabaseModel(schema, db) + assert.Empty(t, errs) + cache, err := cache.NewTableCache(dbModel, data, nil) + assert.Nil(t, err) + return cache +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/client.go b/vendor/github.com/ovn-org/libovsdb/client/client.go new file mode 100644 index 000000000..10ea757ec --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/client.go @@ -0,0 +1,1480 @@ +package client + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "log" + "net" + "net/url" + "os" + "reflect" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/cenkalti/rpc2" + "github.com/cenkalti/rpc2/jsonrpc" + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/libovsdb/ovsdb/serverdb" +) + +// Constants defined for libovsdb +const ( + SSL = "ssl" + TCP = "tcp" + UNIX = "unix" +) + +const serverDB = "_Server" + +// ErrNotConnected is an error returned when the client is not connected +var ErrNotConnected = errors.New("not connected") + +// ErrAlreadyConnected is an error returned when the client is already connected +var ErrAlreadyConnected = errors.New("already connected") + +// ErrUnsupportedRPC is an error returned when an unsupported RPC method is called +var ErrUnsupportedRPC = errors.New("unsupported rpc") + +// Client represents an OVSDB Client Connection +// It provides all the necessary functionality to Connect to a server, +// perform transactions, and build your own replica of the database with +// Monitor or MonitorAll. It also provides a Cache that is populated from OVSDB +// update notifications. +type Client interface { + Connect(context.Context) error + Disconnect() + Close() + Schema() ovsdb.DatabaseSchema + Cache() *cache.TableCache + UpdateEndpoints([]string) + SetOption(Option) error + Connected() bool + DisconnectNotify() chan struct{} + Echo(context.Context) error + Transact(context.Context, ...ovsdb.Operation) ([]ovsdb.OperationResult, error) + Monitor(context.Context, *Monitor) (MonitorCookie, error) + MonitorAll(context.Context) (MonitorCookie, error) + MonitorCancel(ctx context.Context, cookie MonitorCookie) error + NewMonitor(...MonitorOption) *Monitor + CurrentEndpoint() string + API +} + +type bufferedUpdate struct { + updates *ovsdb.TableUpdates + updates2 *ovsdb.TableUpdates2 + lastTxnID string +} + +type epInfo struct { + address string + serverID string +} + +// ovsdbClient is an OVSDB client +type ovsdbClient struct { + options *options + metrics metrics + connected bool + rpcClient *rpc2.Client + rpcMutex sync.RWMutex + // endpoints contains all possible endpoints; the first element is + // the active endpoint if connected=true + endpoints []*epInfo + + // The name of the "primary" database - that is to say, the DB + // that the user expects to interact with. + primaryDBName string + databases map[string]*database + + errorCh chan error + stopCh chan struct{} + disconnect chan struct{} + shutdown bool + shutdownMutex sync.Mutex + + handlerShutdown *sync.WaitGroup + + trafficSeen chan struct{} + + logger *logr.Logger +} + +// database is everything needed to map between go types and an ovsdb Database +type database struct { + // model encapsulates the database schema and model of the database we're connecting to + model model.DatabaseModel + // modelMutex protects model from being replaced (via reconnect) while in use + modelMutex sync.RWMutex + + // cache is used to store the updates for monitored tables + cache *cache.TableCache + // cacheMutex protects cache from being replaced (via reconnect) while in use + cacheMutex sync.RWMutex + + api API + + // any ongoing monitors, so we can re-create them if we disconnect + monitors map[string]*Monitor + monitorsMutex sync.Mutex + + // tracks any outstanding updates while waiting for a monitor response + deferUpdates bool + deferredUpdates []*bufferedUpdate +} + +// NewOVSDBClient creates a new OVSDB Client with the provided +// database model. The client can be configured using one or more Option(s), +// like WithTLSConfig. If no WithEndpoint option is supplied, the default of +// unix:/var/run/openvswitch/ovsdb.sock is used +func NewOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (Client, error) { + return newOVSDBClient(clientDBModel, opts...) +} + +// newOVSDBClient creates a new ovsdbClient +func newOVSDBClient(clientDBModel model.ClientDBModel, opts ...Option) (*ovsdbClient, error) { + ovs := &ovsdbClient{ + primaryDBName: clientDBModel.Name(), + databases: map[string]*database{ + clientDBModel.Name(): { + model: model.NewPartialDatabaseModel(clientDBModel), + monitors: make(map[string]*Monitor), + deferUpdates: true, + deferredUpdates: make([]*bufferedUpdate, 0), + }, + }, + errorCh: make(chan error), + handlerShutdown: &sync.WaitGroup{}, + disconnect: make(chan struct{}), + } + var err error + ovs.options, err = newOptions(opts...) + if err != nil { + return nil, err + } + for _, address := range ovs.options.endpoints { + ovs.endpoints = append(ovs.endpoints, &epInfo{address: address}) + } + + if ovs.options.logger == nil { + // create a new logger to log to stdout + l := stdr.NewWithOptions(log.New(os.Stderr, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}).WithName("libovsdb").WithValues( + "database", ovs.primaryDBName, + ) + stdr.SetVerbosity(5) + ovs.logger = &l + } else { + // add the "database" value to the structured logger + // to make it easier to tell between different DBs (e.g. ovn nbdb vs. sbdb) + l := ovs.options.logger.WithValues( + "database", ovs.primaryDBName, + ) + ovs.logger = &l + } + ovs.metrics.init(clientDBModel.Name(), ovs.options.metricNamespace, ovs.options.metricSubsystem) + ovs.registerMetrics() + + // if we should only connect to the leader, then add the special "_Server" database as well + if ovs.options.leaderOnly { + sm, err := serverdb.FullDatabaseModel() + if err != nil { + return nil, fmt.Errorf("could not initialize model _Server: %w", err) + } + ovs.databases[serverDB] = &database{ + model: model.NewPartialDatabaseModel(sm), + monitors: make(map[string]*Monitor), + } + } + + return ovs, nil +} + +// Connect opens a connection to an OVSDB Server using the +// endpoint provided when the Client was created. +// The connection can be configured using one or more Option(s), like WithTLSConfig +// If no WithEndpoint option is supplied, the default of unix:/var/run/openvswitch/ovsdb.sock is used +func (o *ovsdbClient) Connect(ctx context.Context) error { + if err := o.connect(ctx, false); err != nil { + if err == ErrAlreadyConnected { + return nil + } + return err + } + if o.options.leaderOnly { + if err := o.watchForLeaderChange(); err != nil { + return err + } + } + return nil +} + +// moveEndpointFirst makes the endpoint requested by active the first element +// in the endpoints slice, indicating it is the active endpoint +func (o *ovsdbClient) moveEndpointFirst(i int) { + firstEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append([]*epInfo{firstEp}, othereps...) +} + +// moveEndpointLast moves the requested endpoint to the end of the list +func (o *ovsdbClient) moveEndpointLast(i int) { + lastEp := o.endpoints[i] + othereps := append(o.endpoints[:i], o.endpoints[i+1:]...) + o.endpoints = append(othereps, lastEp) +} + +func (o *ovsdbClient) resetRPCClient() { + if o.rpcClient != nil { + o.rpcClient.Close() + o.rpcClient = nil + } +} + +func (o *ovsdbClient) connect(ctx context.Context, reconnect bool) error { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient != nil { + return ErrAlreadyConnected + } + + connected := false + connectErrors := []error{} + for i, endpoint := range o.endpoints { + u, err := url.Parse(endpoint.address) + if err != nil { + return err + } + if sid, err := o.tryEndpoint(ctx, u); err != nil { + o.resetRPCClient() + connectErrors = append(connectErrors, + fmt.Errorf("failed to connect to %s: %w", endpoint.address, err)) + continue + } else { + o.logger.V(3).Info("successfully connected", "endpoint", endpoint.address, "sid", sid) + endpoint.serverID = sid + o.moveEndpointFirst(i) + connected = true + break + } + } + + if !connected { + if len(connectErrors) == 1 { + return connectErrors[0] + } + var combined []string + for _, e := range connectErrors { + combined = append(combined, e.Error()) + } + + return fmt.Errorf("unable to connect to any endpoints: %s", strings.Join(combined, ". ")) + } + + // if we're reconnecting, re-start all the monitors + if reconnect { + o.logger.V(3).Info("reconnected - restarting monitors") + for dbName, db := range o.databases { + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + + // Purge entire cache if no monitors exist to update dynamically + if len(db.monitors) == 0 { + db.cache.Purge(db.model) + continue + } + + // Restart all monitors; each monitor will handle purging + // the cache if necessary + for id, request := range db.monitors { + err := o.monitor(ctx, MonitorCookie{DatabaseName: dbName, ID: id}, true, request) + if err != nil { + o.resetRPCClient() + return err + } + } + } + } + + go o.handleDisconnectNotification() + if o.options.inactivityTimeout > 0 { + o.handlerShutdown.Add(1) + go o.handleInactivityProbes() + } + for _, db := range o.databases { + o.handlerShutdown.Add(1) + eventStopChan := make(chan struct{}) + go o.handleClientErrors(eventStopChan) + o.handlerShutdown.Add(1) + go func(db *database) { + defer o.handlerShutdown.Done() + db.cache.Run(o.stopCh) + close(eventStopChan) + }(db) + } + + o.connected = true + return nil +} + +// tryEndpoint connects to a single database endpoint. Returns the +// server ID (if clustered) on success, or an error. +func (o *ovsdbClient) tryEndpoint(ctx context.Context, u *url.URL) (string, error) { + o.logger.V(3).Info("trying to connect", "endpoint", fmt.Sprintf("%v", u)) + var dialer net.Dialer + var err error + var c net.Conn + + switch u.Scheme { + case UNIX: + c, err = dialer.DialContext(ctx, u.Scheme, u.Path) + case TCP: + c, err = dialer.DialContext(ctx, u.Scheme, u.Opaque) + case SSL: + dialer := tls.Dialer{ + Config: o.options.tlsConfig, + } + c, err = dialer.DialContext(ctx, "tcp", u.Opaque) + default: + err = fmt.Errorf("unknown network protocol %s", u.Scheme) + } + if err != nil { + return "", fmt.Errorf("failed to open connection: %w", err) + } + + o.createRPC2Client(c) + + serverDBNames, err := o.listDbs(ctx) + if err != nil { + return "", err + } + + // for every requested database, ensure the DB exists in the server and + // that the schema matches what we expect. + for dbName, db := range o.databases { + // check the server has what we want + found := false + for _, name := range serverDBNames { + if name == dbName { + found = true + break + } + } + if !found { + return "", fmt.Errorf("target database %s not found", dbName) + } + + // load and validate the schema + schema, err := o.getSchema(ctx, dbName) + if err != nil { + return "", err + } + + db.modelMutex.Lock() + var errors []error + db.model, errors = model.NewDatabaseModel(schema, db.model.Client()) + db.modelMutex.Unlock() + if len(errors) > 0 { + var combined []string + for _, err := range errors { + combined = append(combined, err.Error()) + } + return "", fmt.Errorf("database %s validation error (%d): %s", + dbName, len(errors), strings.Join(combined, ". ")) + } + + db.cacheMutex.Lock() + if db.cache == nil { + db.cache, err = cache.NewTableCache(db.model, nil, o.logger) + if err != nil { + db.cacheMutex.Unlock() + return "", err + } + db.api = newAPI(db.cache, o.logger) + } + db.cacheMutex.Unlock() + } + + // check that this is the leader + var sid string + if o.options.leaderOnly { + var leader bool + leader, sid, err = o.isEndpointLeader(ctx) + if err != nil { + return "", err + } + if !leader { + return "", fmt.Errorf("endpoint is not leader") + } + } + return sid, nil +} + +// createRPC2Client creates an rpcClient using the provided connection +// It is also responsible for setting up go routines for client-side event handling +// Should only be called when the mutex is held +func (o *ovsdbClient) createRPC2Client(conn net.Conn) { + o.stopCh = make(chan struct{}) + if o.options.inactivityTimeout > 0 { + o.trafficSeen = make(chan struct{}) + } + o.rpcClient = rpc2.NewClientWithCodec(jsonrpc.NewJSONCodec(conn)) + o.rpcClient.SetBlocking(true) + o.rpcClient.Handle("echo", func(_ *rpc2.Client, args []interface{}, reply *[]interface{}) error { + return o.echo(args, reply) + }) + o.rpcClient.Handle("update", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update(args, reply) + }) + o.rpcClient.Handle("update2", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update2(args, reply) + }) + o.rpcClient.Handle("update3", func(_ *rpc2.Client, args []json.RawMessage, reply *[]interface{}) error { + return o.update3(args, reply) + }) + go o.rpcClient.Run() +} + +// isEndpointLeader returns true if the currently connected endpoint is leader, +// otherwise false or an error. If the currently connected endpoint is the leader +// and the database is clustered, also returns the database's Server ID. +// Assumes rpcMutex is held. +func (o *ovsdbClient) isEndpointLeader(ctx context.Context) (bool, string, error) { + op := ovsdb.Operation{ + Op: ovsdb.OperationSelect, + Table: "Database", + Columns: []string{"name", "model", "leader", "sid"}, + } + results, err := o.transact(ctx, serverDB, true, op) + if err != nil { + return false, "", fmt.Errorf("could not check if server was leader: %w", err) + } + // for now, if no rows are returned, just accept this server + if len(results) != 1 { + return true, "", nil + } + result := results[0] + if len(result.Rows) == 0 { + return true, "", nil + } + + for _, row := range result.Rows { + dbName, ok := row["name"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse name") + } + if dbName != o.primaryDBName { + continue + } + + model, ok := row["model"].(string) + if !ok { + return false, "", fmt.Errorf("could not parse model") + } + + // the database reports whether or not it is part of a cluster via the + // "model" column. If it's not clustered, it is by definition leader. + if model != serverdb.DatabaseModelClustered { + return true, "", nil + } + + // Clustered database must have a Server ID + sid, ok := row["sid"].(ovsdb.UUID) + if !ok { + return false, "", fmt.Errorf("could not parse server id") + } + + leader, ok := row["leader"].(bool) + if !ok { + return false, "", fmt.Errorf("could not parse leader") + } + + return leader, sid.GoUUID, nil + } + + // Extremely unlikely: there is no _Server row for the desired DB (which we made sure existed) + // for now, just continue + o.logger.V(3).Info("Couldn't find a row in _Server for our database. Continuing without leader detection", "database", o.primaryDBName) + return true, "", nil +} + +func (o *ovsdbClient) primaryDB() *database { + return o.databases[o.primaryDBName] +} + +// Schema returns the DatabaseSchema that is being used by the client +// it will be nil until a connection has been established +func (o *ovsdbClient) Schema() ovsdb.DatabaseSchema { + db := o.primaryDB() + db.modelMutex.RLock() + defer db.modelMutex.RUnlock() + return db.model.Schema +} + +// Cache returns the TableCache that is populated from +// ovsdb update notifications. It will be nil until a connection +// has been established, and empty unless you call Monitor +func (o *ovsdbClient) Cache() *cache.TableCache { + db := o.primaryDB() + db.cacheMutex.RLock() + defer db.cacheMutex.RUnlock() + return db.cache +} + +// UpdateEndpoints sets client endpoints +// It is intended to be called at runtime +func (o *ovsdbClient) UpdateEndpoints(endpoints []string) { + o.logger.V(3).Info("update endpoints", "endpoints", endpoints) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if len(endpoints) == 0 { + endpoints = []string{defaultUnixEndpoint} + } + o.options.endpoints = endpoints + originEps := o.endpoints[:] + var newEps []*epInfo + activeIdx := -1 + for i, address := range o.options.endpoints { + var serverID string + for j, origin := range originEps { + if address == origin.address { + if j == 0 { + activeIdx = i + } + serverID = origin.serverID + break + } + } + newEps = append(newEps, &epInfo{address: address, serverID: serverID}) + } + o.endpoints = newEps + if activeIdx > 0 { + o.moveEndpointFirst(activeIdx) + } else if activeIdx == -1 { + o._disconnect() + } +} + +// SetOption sets a new value for an option. +// It may only be called when the client is not connected +func (o *ovsdbClient) SetOption(opt Option) error { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient != nil { + return fmt.Errorf("cannot set option when client is connected") + } + return opt(o.options) +} + +// Connected returns whether or not the client is currently connected to the server +func (o *ovsdbClient) Connected() bool { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + return o.connected +} + +func (o *ovsdbClient) CurrentEndpoint() string { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return "" + } + return o.endpoints[0].address +} + +// DisconnectNotify returns a channel which will notify the caller when the +// server has disconnected +func (o *ovsdbClient) DisconnectNotify() chan struct{} { + return o.disconnect +} + +// RFC 7047 : Section 4.1.6 : Echo +func (o *ovsdbClient) echo(args []interface{}, reply *[]interface{}) error { + *reply = args + return nil +} + +// RFC 7047 : Update Notification Section 4.1.6 +// params is an array of length 2: [json-value, table-updates] +// - json-value: the arbitrary json-value passed when creating the Monitor, i.e. the "cookie" +// - table-updates: map of table name to table-update. Table-update is a map of uuid to (old, new) row paris +func (o *ovsdbClient) update(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 2 { + return fmt.Errorf("update requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + o.metrics.numUpdates.WithLabelValues(cookie.DatabaseName).Inc() + for tableName := range updates { + o.metrics.numTableUpdates.WithLabelValues(cookie.DatabaseName, tableName).Inc() + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{&updates, nil, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update(cookie.ID, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update2 handling from ovsdb-server.7 +func (o *ovsdbClient) update2(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 2 { + return fmt.Errorf("update2 requires exactly 2 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[1], &updates) + if err != nil { + return err + } + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, ""}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err != nil { + o.errorCh <- err + } + + return err +} + +// update3 handling from ovsdb-server.7 +func (o *ovsdbClient) update3(params []json.RawMessage, reply *[]interface{}) error { + cookie := MonitorCookie{} + *reply = []interface{}{} + if len(params) > 3 { + return fmt.Errorf("update requires exactly 3 args") + } + err := json.Unmarshal(params[0], &cookie) + if err != nil { + return err + } + var lastTransactionID string + err = json.Unmarshal(params[1], &lastTransactionID) + if err != nil { + return err + } + var updates ovsdb.TableUpdates2 + err = json.Unmarshal(params[2], &updates) + if err != nil { + return err + } + + db := o.databases[cookie.DatabaseName] + if db == nil { + return fmt.Errorf("update: invalid database name: %s unknown", cookie.DatabaseName) + } + + db.cacheMutex.Lock() + if db.deferUpdates { + db.deferredUpdates = append(db.deferredUpdates, &bufferedUpdate{nil, &updates, lastTransactionID}) + db.cacheMutex.Unlock() + return nil + } + db.cacheMutex.Unlock() + + // Update the local DB cache with the tableUpdates + db.cacheMutex.RLock() + err = db.cache.Update2(cookie, updates) + db.cacheMutex.RUnlock() + + if err == nil { + db.monitorsMutex.Lock() + mon := db.monitors[cookie.ID] + mon.LastTransactionID = lastTransactionID + db.monitorsMutex.Unlock() + } + + return err +} + +// getSchema returns the schema in use for the provided database name +// RFC 7047 : get_schema +// Should only be called when mutex is held +func (o *ovsdbClient) getSchema(ctx context.Context, dbName string) (ovsdb.DatabaseSchema, error) { + args := ovsdb.NewGetSchemaArgs(dbName) + var reply ovsdb.DatabaseSchema + err := o.rpcClient.CallWithContext(ctx, "get_schema", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ovsdb.DatabaseSchema{}, ErrNotConnected + } + return ovsdb.DatabaseSchema{}, err + } + return reply, err +} + +// listDbs returns the list of databases on the server +// RFC 7047 : list_dbs +// Should only be called when mutex is held +func (o *ovsdbClient) listDbs(ctx context.Context) ([]string, error) { + var dbs []string + err := o.rpcClient.CallWithContext(ctx, "list_dbs", nil, &dbs) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, fmt.Errorf("listdbs failure - %v", err) + } + return dbs, err +} + +// logFromContext returns a Logger from ctx or return the default logger +func (o *ovsdbClient) logFromContext(ctx context.Context) *logr.Logger { + if logger, err := logr.FromContext(ctx); err == nil { + return &logger + } + return o.logger +} + +// Transact performs the provided Operations on the database +// RFC 7047 : transact +func (o *ovsdbClient) Transact(ctx context.Context, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + o.rpcMutex.RLock() + if o.rpcClient == nil || !o.connected { + o.rpcMutex.RUnlock() + if o.options.reconnect { + logger.V(5).Info("blocking transaction until reconnected", "operations", + fmt.Sprintf("%+v", operation)) + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + ReconnectWaitLoop: + for { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("%w: while awaiting reconnection", ctx.Err()) + case <-ticker.C: + o.rpcMutex.RLock() + if o.rpcClient != nil && o.connected { + break ReconnectWaitLoop + } + o.rpcMutex.RUnlock() + } + } + } else { + return nil, ErrNotConnected + } + } + defer o.rpcMutex.RUnlock() + return o.transact(ctx, o.primaryDBName, false, operation...) +} + +func (o *ovsdbClient) transact(ctx context.Context, dbName string, skipChWrite bool, operation ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + logger := o.logFromContext(ctx) + var reply []ovsdb.OperationResult + db := o.databases[dbName] + db.modelMutex.RLock() + schema := o.databases[dbName].model.Schema + db.modelMutex.RUnlock() + if reflect.DeepEqual(schema, ovsdb.DatabaseSchema{}) { + return nil, fmt.Errorf("cannot transact to database %s: schema unknown", dbName) + } + if ok := schema.ValidateOperations(operation...); !ok { + return nil, fmt.Errorf("validation failed for the operation") + } + + args := ovsdb.NewTransactArgs(dbName, operation...) + if o.rpcClient == nil { + return nil, ErrNotConnected + } + dbgLogger := logger.WithValues("database", dbName).V(4) + if dbgLogger.Enabled() { + dbgLogger.Info("transacting operations", "operations", fmt.Sprintf("%+v", operation)) + } + err := o.rpcClient.CallWithContext(ctx, "transact", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return nil, ErrNotConnected + } + return nil, err + } + + if !skipChWrite && o.trafficSeen != nil { + o.trafficSeen <- struct{}{} + } + return reply, nil +} + +// MonitorAll is a convenience method to monitor every table/column +func (o *ovsdbClient) MonitorAll(ctx context.Context) (MonitorCookie, error) { + m := newMonitor() + for name := range o.primaryDB().model.Types() { + m.Tables = append(m.Tables, TableMonitor{Table: name}) + } + return o.Monitor(ctx, m) +} + +// MonitorCancel will request cancel a previously issued monitor request +// RFC 7047 : monitor_cancel +func (o *ovsdbClient) MonitorCancel(ctx context.Context, cookie MonitorCookie) error { + var reply ovsdb.OperationResult + args := ovsdb.NewMonitorCancelArgs(cookie) + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "monitor_cancel", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + return err + } + if reply.Error != "" { + return fmt.Errorf("error while executing transaction: %s", reply.Error) + } + o.primaryDB().monitorsMutex.Lock() + defer o.primaryDB().monitorsMutex.Unlock() + delete(o.primaryDB().monitors, cookie.ID) + o.metrics.numMonitors.Dec() + return nil +} + +// Monitor will provide updates for a given table/column +// and populate the cache with them. Subsequent updates will be processed +// by the Update Notifications +// RFC 7047 : monitor +func (o *ovsdbClient) Monitor(ctx context.Context, monitor *Monitor) (MonitorCookie, error) { + cookie := newMonitorCookie(o.primaryDBName) + db := o.databases[o.primaryDBName] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return cookie, o.monitor(ctx, cookie, false, monitor) +} + +// If fields is provided, the request will be constrained to the provided columns +// If no fields are provided, all columns will be used +func newMonitorRequest(data *mapper.Info, fields []string, conditions []ovsdb.Condition) (*ovsdb.MonitorRequest, error) { + var columns []string + if len(fields) > 0 { + columns = append(columns, fields...) + } else { + for c := range data.Metadata.TableSchema.Columns { + columns = append(columns, c) + } + } + return &ovsdb.MonitorRequest{Columns: columns, Where: conditions, Select: ovsdb.NewDefaultMonitorSelect()}, nil +} + +// monitor must only be called with a lock on monitorsMutex +// +//gocyclo:ignore +func (o *ovsdbClient) monitor(ctx context.Context, cookie MonitorCookie, reconnecting bool, monitor *Monitor) error { + // if we're reconnecting, we already hold the rpcMutex + if !reconnecting { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + } + if o.rpcClient == nil { + return ErrNotConnected + } + if len(monitor.Errors) != 0 { + var errString []string + for _, err := range monitor.Errors { + errString = append(errString, err.Error()) + } + return fmt.Errorf(strings.Join(errString, ". ")) + } + if len(monitor.Tables) == 0 { + return fmt.Errorf("at least one table should be monitored") + } + dbName := cookie.DatabaseName + db := o.databases[dbName] + db.modelMutex.RLock() + typeMap := db.model.Types() + requests := make(map[string]ovsdb.MonitorRequest) + for _, o := range monitor.Tables { + _, ok := typeMap[o.Table] + if !ok { + return fmt.Errorf("type for table %s does not exist in model", o.Table) + } + model, err := db.model.NewModel(o.Table) + if err != nil { + return err + } + info, err := db.model.NewModelInfo(model) + if err != nil { + return err + } + request, err := newMonitorRequest(info, o.Fields, o.Conditions) + if err != nil { + return err + } + requests[o.Table] = *request + } + db.modelMutex.RUnlock() + + var args []interface{} + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + // If we are reconnecting a CondSince monitor that is the only + // monitor, then we can use its LastTransactionID since it is + // valid (because we're reconnecting) and we can safely keep + // the cache intact (because it's the only monitor). + transactionID := emptyUUID + if reconnecting && len(db.monitors) == 1 { + transactionID = monitor.LastTransactionID + } + args = ovsdb.NewMonitorCondSinceArgs(dbName, cookie, requests, transactionID) + } else { + args = ovsdb.NewMonitorArgs(dbName, cookie, requests) + } + var err error + var tableUpdates interface{} + + var lastTransactionFound bool + switch monitor.Method { + case ovsdb.MonitorRPC: + var reply ovsdb.TableUpdates + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorRPC: + var reply ovsdb.TableUpdates2 + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + tableUpdates = reply + case ovsdb.ConditionalMonitorSinceRPC: + var reply ovsdb.MonitorCondSinceReply + err = o.rpcClient.CallWithContext(ctx, monitor.Method, args, &reply) + if err == nil && reply.Found { + monitor.LastTransactionID = reply.LastTransactionID + lastTransactionFound = true + } + tableUpdates = reply.Updates + default: + return fmt.Errorf("unsupported monitor method: %v", monitor.Method) + } + + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + if err.Error() == "unknown method" { + if monitor.Method == ovsdb.ConditionalMonitorSinceRPC { + o.logger.V(3).Error(err, "method monitor_cond_since not supported, falling back to monitor_cond") + monitor.Method = ovsdb.ConditionalMonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + if monitor.Method == ovsdb.ConditionalMonitorRPC { + o.logger.V(3).Error(err, "method monitor_cond not supported, falling back to monitor") + monitor.Method = ovsdb.MonitorRPC + return o.monitor(ctx, cookie, reconnecting, monitor) + } + } + return err + } + + if !reconnecting { + db.monitors[cookie.ID] = monitor + o.metrics.numMonitors.Inc() + } + + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + + // On reconnect, purge the cache _unless_ the only monitor is a + // MonitorCondSince one, whose LastTransactionID was known to the + // server. In this case the reply contains only updates to the existing + // cache data, while otherwise it includes complete DB data so we must + // purge to get rid of old rows. + if reconnecting && (len(db.monitors) > 1 || !lastTransactionFound) { + db.cache.Purge(db.model) + } + + if monitor.Method == ovsdb.MonitorRPC { + u := tableUpdates.(ovsdb.TableUpdates) + err = db.cache.Populate(u) + } else { + u := tableUpdates.(ovsdb.TableUpdates2) + err = db.cache.Populate2(u) + } + + if err != nil { + return err + } + + // populate any deferred updates + db.deferUpdates = false + for _, update := range db.deferredUpdates { + if update.updates != nil { + if err = db.cache.Populate(*update.updates); err != nil { + return err + } + } + + if update.updates2 != nil { + if err = db.cache.Populate2(*update.updates2); err != nil { + return err + } + } + if len(update.lastTxnID) > 0 { + db.monitors[cookie.ID].LastTransactionID = update.lastTxnID + } + } + // clear deferred updates for next time + db.deferredUpdates = make([]*bufferedUpdate, 0) + + return err +} + +// Echo tests the liveness of the OVSDB connetion +func (o *ovsdbClient) Echo(ctx context.Context) error { + args := ovsdb.NewEchoArgs() + var reply []interface{} + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return ErrNotConnected + } + err := o.rpcClient.CallWithContext(ctx, "echo", args, &reply) + if err != nil { + if err == rpc2.ErrShutdown { + return ErrNotConnected + } + } + if !reflect.DeepEqual(args, reply) { + return fmt.Errorf("incorrect server response: %v, %v", args, reply) + } + return nil +} + +// watchForLeaderChange will trigger a reconnect if the connected endpoint +// ever loses leadership +func (o *ovsdbClient) watchForLeaderChange() error { + updates := make(chan model.Model) + o.databases[serverDB].cache.AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, _, new model.Model) { + if table == "Database" { + updates <- new + } + }, + }) + + m := newMonitor() + // NOTE: _Server does not support monitor_cond_since + m.Method = ovsdb.ConditionalMonitorRPC + m.Tables = []TableMonitor{{Table: "Database"}} + db := o.databases[serverDB] + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + err := o.monitor(context.Background(), newMonitorCookie(serverDB), false, m) + if err != nil { + return err + } + + go func() { + for m := range updates { + dbInfo, ok := m.(*serverdb.Database) + if !ok { + continue + } + + // Ignore the dbInfo for _Server + if dbInfo.Name != o.primaryDBName { + continue + } + + // Only handle leadership changes for clustered databases + if dbInfo.Model != serverdb.DatabaseModelClustered { + continue + } + + // Clustered database servers must have a valid Server ID + var sid string + if dbInfo.Sid != nil { + sid = *dbInfo.Sid + } + if sid == "" { + o.logger.V(3).Info("clustered database update contained invalid server ID") + continue + } + + o.rpcMutex.Lock() + if !dbInfo.Leader && o.connected { + activeEndpoint := o.endpoints[0] + if sid == activeEndpoint.serverID { + o.logger.V(3).Info("endpoint lost leader, reconnecting", + "endpoint", activeEndpoint.address, "sid", sid) + // don't immediately reconnect to the active endpoint since it's no longer leader + o.moveEndpointLast(0) + o._disconnect() + } else { + o.logger.V(3).Info("endpoint lost leader but had unexpected server ID", + "endpoint", activeEndpoint.address, + "expected", activeEndpoint.serverID, "found", sid) + } + } + o.rpcMutex.Unlock() + } + }() + return nil +} + +func (o *ovsdbClient) handleClientErrors(stopCh <-chan struct{}) { + defer o.handlerShutdown.Done() + var errColumnNotFound *mapper.ErrColumnNotFound + var errCacheInconsistent *cache.ErrCacheInconsistent + var errIndexExists *cache.ErrIndexExists + for { + select { + case <-stopCh: + return + case err := <-o.errorCh: + if errors.As(err, &errColumnNotFound) { + o.logger.V(3).Error(err, "error updating cache, DB schema may be newer than client!") + } else if errors.As(err, &errCacheInconsistent) || errors.As(err, &errIndexExists) { + // trigger a reconnect, which will purge the cache + // hopefully a rebuild will fix any inconsistency + o.logger.V(3).Error(err, "triggering reconnect to rebuild cache") + // for rebuilding cache with mon_cond_since (not yet fully supported in libovsdb) we + // need to reset the last txn ID + for _, db := range o.databases { + db.monitorsMutex.Lock() + for _, mon := range db.monitors { + mon.LastTransactionID = emptyUUID + } + db.monitorsMutex.Unlock() + } + o.Disconnect() + } else { + o.logger.V(3).Error(err, "error updating cache") + } + } + } +} + +func (o *ovsdbClient) sendEcho(args []interface{}, reply *[]interface{}) *rpc2.Call { + o.rpcMutex.RLock() + defer o.rpcMutex.RUnlock() + if o.rpcClient == nil { + return nil + } + return o.rpcClient.Go("echo", args, reply, make(chan *rpc2.Call, 1)) +} + +func (o *ovsdbClient) handleInactivityProbes() { + defer o.handlerShutdown.Done() + echoReplied := make(chan string) + var lastEcho string + stopCh := o.stopCh + trafficSeen := o.trafficSeen + for { + select { + case <-stopCh: + return + case <-trafficSeen: + // We got some traffic from the server, restart our timer + case ts := <-echoReplied: + // Got a response from the server, check it against lastEcho; if same clear lastEcho; if not same Disconnect() + if ts != lastEcho { + o.Disconnect() + return + } + lastEcho = "" + case <-time.After(o.options.inactivityTimeout): + // If there's a lastEcho already, then we didn't get a server reply, disconnect + if lastEcho != "" { + o.Disconnect() + return + } + // Otherwise send an echo + thisEcho := fmt.Sprintf("%d", time.Now().UnixMicro()) + args := []interface{}{"libovsdb echo", thisEcho} + var reply []interface{} + // Can't use o.Echo() because it blocks; we need the Call object direct from o.rpcClient.Go() + call := o.sendEcho(args, &reply) + if call == nil { + o.Disconnect() + return + } + lastEcho = thisEcho + go func() { + // Wait for the echo reply + select { + case <-stopCh: + return + case <-call.Done: + if call.Error != nil { + // RPC timeout; disconnect + o.logger.V(3).Error(call.Error, "server echo reply error") + o.Disconnect() + } else if !reflect.DeepEqual(args, reply) { + o.logger.V(3).Info("warning: incorrect server echo reply", + "expected", args, "reply", reply) + o.Disconnect() + } else { + // Otherwise stuff thisEcho into the echoReplied channel + echoReplied <- thisEcho + } + } + }() + } + } +} + +func (o *ovsdbClient) handleDisconnectNotification() { + <-o.rpcClient.DisconnectNotify() + // close the stopCh, which will stop the cache event processor + close(o.stopCh) + if o.trafficSeen != nil { + close(o.trafficSeen) + } + o.metrics.numDisconnects.Inc() + // wait for client related handlers to shutdown + o.handlerShutdown.Wait() + o.rpcMutex.Lock() + if o.options.reconnect && !o.shutdown { + o.rpcClient = nil + o.rpcMutex.Unlock() + suppressionCounter := 1 + connect := func() error { + // need to ensure deferredUpdates is cleared on every reconnect attempt + for _, db := range o.databases { + db.cacheMutex.Lock() + db.deferredUpdates = make([]*bufferedUpdate, 0) + db.deferUpdates = true + db.cacheMutex.Unlock() + } + ctx, cancel := context.WithTimeout(context.Background(), o.options.timeout) + defer cancel() + err := o.connect(ctx, true) + if err != nil { + if suppressionCounter < 5 { + o.logger.V(2).Error(err, "failed to reconnect") + } else if suppressionCounter == 5 { + o.logger.V(2).Error(err, "reconnect has failed 5 times, suppressing logging "+ + "for future attempts") + } + } + suppressionCounter++ + return err + } + o.logger.V(3).Info("connection lost, reconnecting", "endpoint", o.endpoints[0].address) + err := backoff.Retry(connect, o.options.backoff) + if err != nil { + // TODO: We should look at passing this back to the + // caller to handle + panic(err) + } + // this goroutine finishes, and is replaced with a new one (from Connect) + return + } + + // clear connection state + o.rpcClient = nil + o.rpcMutex.Unlock() + + for _, db := range o.databases { + db.cacheMutex.Lock() + defer db.cacheMutex.Unlock() + db.cache = nil + // need to defer updates if/when we reconnect and clear any stale updates + db.deferUpdates = true + db.deferredUpdates = make([]*bufferedUpdate, 0) + + db.modelMutex.Lock() + defer db.modelMutex.Unlock() + db.model = model.NewPartialDatabaseModel(db.model.Client()) + + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + db.monitors = make(map[string]*Monitor) + } + o.metrics.numMonitors.Set(0) + + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = false + + select { + case o.disconnect <- struct{}{}: + // sent disconnect notification to client + default: + // client is not listening to the channel + } +} + +// _disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards. Assumes rpcMutex is held. +func (o *ovsdbClient) _disconnect() { + o.connected = false + if o.rpcClient == nil { + return + } + o.rpcClient.Close() +} + +// Disconnect will close the connection to the OVSDB server +// If the client was created with WithReconnect then the client +// will reconnect afterwards +func (o *ovsdbClient) Disconnect() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o._disconnect() +} + +// Close will close the connection to the OVSDB server +// It will remove all stored state ready for the next connection +// Even If the client was created with WithReconnect it will not reconnect afterwards +func (o *ovsdbClient) Close() { + o.rpcMutex.Lock() + defer o.rpcMutex.Unlock() + o.connected = false + if o.rpcClient == nil { + return + } + o.shutdownMutex.Lock() + defer o.shutdownMutex.Unlock() + o.shutdown = true + o.rpcClient.Close() +} + +// Ensures the cache is consistent by evaluating that the client is connected +// and the monitor is fully setup, with the cache populated. Caller must hold +// the database's cache mutex for reading. +func isCacheConsistent(db *database) bool { + // This works because when a client is disconnected the deferUpdates variable + // will be set to true. deferUpdates is also protected by the db.cacheMutex. + // When the client reconnects and then re-establishes the monitor; the final step + // is to process all deferred updates, set deferUpdates back to false, and unlock cacheMutex + return !db.deferUpdates +} + +// best effort to ensure cache is in a good state for reading. RLocks the +// database's cache before returning; caller must always unlock. +func waitForCacheConsistent(ctx context.Context, db *database, logger *logr.Logger, dbName string) { + if !hasMonitors(db) { + db.cacheMutex.RLock() + return + } + // Check immediately as a fastpath + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.V(3).Info("warning: unable to ensure cache consistency for reading", + "database", dbName) + db.cacheMutex.RLock() + return + case <-ticker.C: + db.cacheMutex.RLock() + if isCacheConsistent(db) { + return + } + db.cacheMutex.RUnlock() + } + } +} + +func hasMonitors(db *database) bool { + db.monitorsMutex.Lock() + defer db.monitorsMutex.Unlock() + return len(db.monitors) > 0 +} + +// Client API interface wrapper functions +// We add this wrapper to allow users to access the API directly on the +// client object + +// Get implements the API interface's Get function +func (o *ovsdbClient) Get(ctx context.Context, model model.Model) error { + primaryDB := o.primaryDB() + waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) + defer primaryDB.cacheMutex.RUnlock() + return primaryDB.api.Get(ctx, model) +} + +// Create implements the API interface's Create function +func (o *ovsdbClient) Create(models ...model.Model) ([]ovsdb.Operation, error) { + return o.primaryDB().api.Create(models...) +} + +// List implements the API interface's List function +func (o *ovsdbClient) List(ctx context.Context, result interface{}) error { + primaryDB := o.primaryDB() + waitForCacheConsistent(ctx, primaryDB, o.logger, o.primaryDBName) + defer primaryDB.cacheMutex.RUnlock() + return primaryDB.api.List(ctx, result) +} + +// Where implements the API interface's Where function +func (o *ovsdbClient) Where(models ...model.Model) ConditionalAPI { + return o.primaryDB().api.Where(models...) +} + +// WhereAny implements the API interface's WhereAny function +func (o *ovsdbClient) WhereAny(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAny(m, conditions...) +} + +// WhereAll implements the API interface's WhereAll function +func (o *ovsdbClient) WhereAll(m model.Model, conditions ...model.Condition) ConditionalAPI { + return o.primaryDB().api.WhereAll(m, conditions...) +} + +// WhereCache implements the API interface's WhereCache function +func (o *ovsdbClient) WhereCache(predicate interface{}) ConditionalAPI { + return o.primaryDB().api.WhereCache(predicate) +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/condition.go b/vendor/github.com/ovn-org/libovsdb/client/condition.go new file mode 100644 index 000000000..1dfabda02 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/condition.go @@ -0,0 +1,248 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Conditional is the interface used by the ConditionalAPI to match on cache objects +// and generate ovsdb conditions +type Conditional interface { + // Generate returns a list of lists of conditions to be used in Operations + // Each element in the (outer) list corresponds to an operation + Generate() ([][]ovsdb.Condition, error) + // Returns the models that match the conditions + Matches() (map[string]model.Model, error) + // returns the table that this condition is associated with + Table() string +} + +func generateConditionsFromModels(dbModel model.DatabaseModel, models map[string]model.Model) ([][]ovsdb.Condition, error) { + anyConditions := make([][]ovsdb.Condition, 0, len(models)) + for _, model := range models { + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + allConditions, err := dbModel.Mapper.NewEqualityCondition(info) + if err != nil { + return nil, err + } + anyConditions = append(anyConditions, allConditions) + } + return anyConditions, nil +} + +func generateOvsdbConditionsFromModelConditions(dbModel model.DatabaseModel, info *mapper.Info, conditions []model.Condition, singleOp bool) ([][]ovsdb.Condition, error) { + anyConditions := [][]ovsdb.Condition{} + if singleOp { + anyConditions = append(anyConditions, []ovsdb.Condition{}) + } + for _, condition := range conditions { + ovsdbCond, err := dbModel.Mapper.NewCondition(info, condition.Field, condition.Function, condition.Value) + if err != nil { + return nil, err + } + allConditions := []ovsdb.Condition{*ovsdbCond} + if singleOp { + anyConditions[0] = append(anyConditions[0], allConditions...) + } else { + anyConditions = append(anyConditions, allConditions) + } + } + return anyConditions, nil +} + +// equalityConditional uses the indexes available in a provided model to find a +// matching model in the database. +type equalityConditional struct { + tableName string + models []model.Model + cache *cache.TableCache +} + +func (c *equalityConditional) Table() string { + return c.tableName +} + +// Returns the models that match the indexes available through the provided +// model. +func (c *equalityConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + return tableCache.RowsByModels(c.models) +} + +// Generate conditions based on the equality of the first available index. If +// the index can be matched against a model in the cache, the condition will be +// based on the UUID of the found model. Otherwise, the conditions will be based +// on the index. +func (c *equalityConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, generate condition from models we were given + modelMap := make(map[string]model.Model, len(c.models)) + for i, m := range c.models { + // generateConditionsFromModels() ignores the map keys + // so just use the range index + modelMap[fmt.Sprintf("%d", i)] = m + } + return generateConditionsFromModels(c.cache.DatabaseModel(), modelMap) + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// NewEqualityCondition creates a new equalityConditional +func newEqualityConditional(table string, cache *cache.TableCache, models []model.Model) (Conditional, error) { + return &equalityConditional{ + tableName: table, + models: models, + cache: cache, + }, nil +} + +// explicitConditional generates conditions based on the provided Condition list +type explicitConditional struct { + tableName string + anyConditions [][]ovsdb.Condition + cache *cache.TableCache +} + +func (c *explicitConditional) Table() string { + return c.tableName +} + +// Returns the models that match the conditions +func (c *explicitConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + for _, allConditions := range c.anyConditions { + models, err := tableCache.RowsByCondition(allConditions) + if err != nil { + return nil, err + } + for uuid, model := range models { + found[uuid] = model + } + } + return found, nil +} + +// Generate returns conditions based on the provided Condition list +func (c *explicitConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil && err != ErrNotFound { + return nil, err + } + if len(models) == 0 { + // no cache hits, return conditions we were given + return c.anyConditions, nil + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newExplicitConditional creates a new explicitConditional +func newExplicitConditional(table string, cache *cache.TableCache, matchAll bool, model model.Model, cond ...model.Condition) (Conditional, error) { + dbModel := cache.DatabaseModel() + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + anyConditions, err := generateOvsdbConditionsFromModelConditions(dbModel, info, cond, matchAll) + if err != nil { + return nil, err + } + return &explicitConditional{ + tableName: table, + anyConditions: anyConditions, + cache: cache, + }, nil +} + +// predicateConditional is a Conditional that calls a provided function pointer +// to match on models. +type predicateConditional struct { + tableName string + predicate interface{} + cache *cache.TableCache +} + +// matches returns the result of the execution of the predicate +// Type verifications are not performed +// Returns the models that match the conditions +func (c *predicateConditional) Matches() (map[string]model.Model, error) { + tableCache := c.cache.Table(c.tableName) + if tableCache == nil { + return nil, ErrNotFound + } + found := map[string]model.Model{} + // run the predicate on a shallow copy of the models for speed and only + // clone the matches + for u, m := range tableCache.RowsShallow() { + ret := reflect.ValueOf(c.predicate).Call([]reflect.Value{reflect.ValueOf(m)}) + if ret[0].Bool() { + found[u] = model.Clone(m) + } + } + return found, nil +} + +func (c *predicateConditional) Table() string { + return c.tableName +} + +// generate returns a list of conditions that match, by _uuid equality, all the objects that +// match the predicate +func (c *predicateConditional) Generate() ([][]ovsdb.Condition, error) { + models, err := c.Matches() + if err != nil { + return nil, err + } + return generateConditionsFromModels(c.cache.DatabaseModel(), models) +} + +// newPredicateConditional creates a new predicateConditional +func newPredicateConditional(table string, cache *cache.TableCache, predicate interface{}) (Conditional, error) { + return &predicateConditional{ + tableName: table, + predicate: predicate, + cache: cache, + }, nil +} + +// errorConditional is a conditional that encapsulates an error +// It is used to delay the reporting of errors from conditional creation to API method call +type errorConditional struct { + err error +} + +func (e *errorConditional) Matches() (map[string]model.Model, error) { + return nil, e.err +} + +func (e *errorConditional) Table() string { + return "" +} + +func (e *errorConditional) Generate() ([][]ovsdb.Condition, error) { + return nil, e.err +} + +func newErrorConditional(err error) Conditional { + return &errorConditional{ + err: fmt.Errorf("conditionerror: %s", err.Error()), + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/config.go b/vendor/github.com/ovn-org/libovsdb/client/config.go new file mode 100644 index 000000000..a9c00f56a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/config.go @@ -0,0 +1,27 @@ +/** + * Copyright (c) 2019 eBay Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + **/ + +package client + +import ( + "crypto/tls" +) + +// Config is a structure used in provisioning a connection to ovsdb. +type Config struct { + Addr string + TLSConfig *tls.Config +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/doc.go b/vendor/github.com/ovn-org/libovsdb/client/doc.go new file mode 100644 index 000000000..90e409ee7 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/doc.go @@ -0,0 +1,164 @@ +/* +Package client connects to, monitors and interacts with OVSDB servers (RFC7047). + +This package uses structs, that contain the 'ovs' field tag to determine which field goes to +which column in the database. We refer to pointers to this structs as Models. Example: + + type MyLogicalSwitch struct { + UUID string `ovsdb:"_uuid"` // _uuid tag is mandatory + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + Config map[string]string `ovsdb:"other_config"` + } + +Based on these Models a Database Model (see ClientDBModel type) is built to represent +the entire OVSDB: + + clientDBModel, _ := client.NewClientDBModel("OVN_Northbound", + map[string]client.Model{ + "Logical_Switch": &MyLogicalSwitch{}, + }) + + +The ClientDBModel represents the entire Database (or the part of it we're interested in). +Using it, the libovsdb.client package is able to properly encode and decode OVSDB messages +and store them in Model instances. +A client instance is created by simply specifying the connection information and the database model: + + ovs, _ := client.Connect(context.Background(), clientDBModel) + +Main API + +After creating a OvsdbClient using the Connect() function, we can use a number of CRUD-like +to interact with the database: +List(), Get(), Create(), Update(), Mutate(), Delete(). + +The specific database table that the operation targets is automatically determined based on the type +of the parameter. + +In terms of return values, some of these functions like Create(), Update(), Mutate() and Delete(), +interact with the database so they return list of ovsdb.Operation objects that can be grouped together +and passed to client.Transact(). + +Others, such as List() and Get(), interact with the client's internal cache and are able to +return Model instances (or a list thereof) directly. + +Conditions + +Some API functions (Create() and Get()), can be run directly. Others, require us to use +a ConditionalAPI. The ConditionalAPI injects RFC7047 Conditions into ovsdb Operations as well as +uses the Conditions to search the internal cache. + +The ConditionalAPI is created using the Where(), WhereCache() and WhereAll() functions. + +Where() accepts a Model (pointer to a struct with ovs tags) and a number of Condition instances. +Conditions must refer to fields of the provided Model (via pointer to fields). Example: + + ls = &MyLogicalSwitch {} + ovs.Where(ls, client.Condition { + Field: &ls.Ports, + Function: ovsdb.ConditionIncludes, + Value: []string{"portUUID"}, + }) + +If no client.Condition is provided, the client will use any of fields that correspond to indexes to +generate an appropriate condition. Therefore the following two statements are equivalent: + + ls = &MyLogicalSwitch {UUID:"myUUID"} + + ovs.Where(ls) + + ovs.Where(ls, client.Condition { + Field: &ls.UUID, + Function: ovsdb.ConditionEqual, + Value: "myUUID"}, + }) + +Where() accepts multiple Condition instances (through variadic arguments). +If provided, the client will generate multiple operations each matching one condition. +For example, the following operation will delete all the Logical Switches named "foo" OR "bar": + + ops, err := ovs.Where(ls, + client.Condition { + Field: &ls.Name + Function: ovsdb.ConditionEqual, + Value: "foo", + },client.Condition { + Field: &ls.Port, + Function: ovsdb.ConditionIncludes, + Value: "bar", + }).Delete() + +To create a Condition that matches all of the conditions simultaneously (i.e: AND semantics), use WhereAll(). + +Where() or WhereAll() evaluate the provided index values or explicit conditions against the cache and generate +conditions based on the UUIDs of matching models. If no matches are found in the cache, the generated conditions +will be based on the index or condition fields themselves. + +A more flexible mechanism to search the cache is available: WhereCache() + +WhereCache() accepts a function that takes any Model as argument and returns a boolean. +It is used to search the cache so commonly used with List() function. For example: + + lsList := &[]LogicalSwitch{} + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +Server side operations can be executed using WhereCache() conditions but it's not recommended. For each matching +cache element, an operation will be created matching on the "_uuid" column. The number of operations can be +quite large depending on the cache size and the provided function. Most likely there is a way to express the +same condition using Where() or WhereAll() which will be more efficient. + +Get + +Get() operation is a simple operation capable of retrieving one Model based on some of its schema indexes. E.g: + + ls := &LogicalSwitch{UUID:"myUUID"} + err := ovs.Get(ls) + fmt.Printf("Name of the switch is: &s", ls.Name) + +List + +List() searches the cache and populates a slice of Models. It can be used directly or using WhereCache() + + lsList := &[]LogicalSwitch{} + err := ovs.List(lsList) // List all elements + + err := ovs.WhereCache( + func(ls *LogicalSwitch) bool { + return strings.HasPrefix(ls.Name, "ext_") + }).List(lsList) + +Create + +Create returns a list of operations to create the models provided. E.g: + + ops, err := ovs.Create(&LogicalSwitch{Name:"foo")}, &LogicalSwitch{Name:"bar"}) + +Update +Update returns a list of operations to update the matching rows to match the values of the provided model. E.g: + + ls := &LogicalSwitch{ExternalIDs: map[string]string {"foo": "bar"}} + ops, err := ovs.Where(...).Update(&ls, &ls.ExternalIDs} + +Mutate + +Mutate returns a list of operations needed to mutate the matching rows as described by the list of Mutation objects. E.g: + + ls := &LogicalSwitch{} + ops, err := ovs.Where(...).Mutate(&ls, client.Mutation { + Field: &ls.Config, + Mutator: ovsdb.MutateOperationInsert, + Value: map[string]string{"foo":"bar"}, + }) + +Delete + +Delete returns a list of operations needed to delete the matching rows. E.g: + + ops, err := ovs.Where(...).Delete() + +*/ +package client diff --git a/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/vendor/github.com/ovn-org/libovsdb/client/metrics.go new file mode 100644 index 000000000..8c4e5f6f2 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/metrics.go @@ -0,0 +1,88 @@ +package client + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +const libovsdbName = "libovsdb" + +type metrics struct { + numUpdates *prometheus.CounterVec + numTableUpdates *prometheus.CounterVec + numDisconnects prometheus.Counter + numMonitors prometheus.Gauge + registerOnce sync.Once +} + +func (m *metrics) init(modelName string, namespace, subsystem string) { + // labels that are the same across all metrics + constLabels := prometheus.Labels{"primary_model": modelName} + + if namespace == "" { + namespace = libovsdbName + subsystem = "" + } + + m.numUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "update_messages_total", + Help: "Count of libovsdb monitor update messages processed, partitioned by database", + ConstLabels: constLabels, + }, + []string{"database"}, + ) + + m.numTableUpdates = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "table_updates_total", + Help: "Count of libovsdb monitor update messages per table", + ConstLabels: constLabels, + }, + []string{"database", "table"}, + ) + + m.numDisconnects = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "disconnects_total", + Help: "Count of libovsdb disconnects encountered", + ConstLabels: constLabels, + }, + ) + + m.numMonitors = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "monitors", + Help: "Number of running libovsdb ovsdb monitors", + ConstLabels: constLabels, + }, + ) +} + +func (m *metrics) register(r prometheus.Registerer) { + m.registerOnce.Do(func() { + r.MustRegister( + m.numUpdates, + m.numTableUpdates, + m.numDisconnects, + m.numMonitors, + ) + }) +} + +func (o *ovsdbClient) registerMetrics() { + if !o.options.shouldRegisterMetrics || o.options.registry == nil { + return + } + o.metrics.register(o.options.registry) + o.options.shouldRegisterMetrics = false +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/vendor/github.com/ovn-org/libovsdb/client/monitor.go new file mode 100644 index 000000000..4a0270a87 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/monitor.go @@ -0,0 +1,136 @@ +package client + +import ( + "fmt" + "reflect" + + "github.com/google/uuid" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +const emptyUUID = "00000000-0000-0000-0000-000000000000" + +// Monitor represents a monitor +type Monitor struct { + Method string + Tables []TableMonitor + Errors []error + LastTransactionID string +} + +// newMonitor creates a new *Monitor with default values +func newMonitor() *Monitor { + return &Monitor{ + Method: ovsdb.ConditionalMonitorSinceRPC, + Errors: make([]error, 0), + LastTransactionID: emptyUUID, + } +} + +// NewMonitor creates a new Monitor with the provided options +func (o *ovsdbClient) NewMonitor(opts ...MonitorOption) *Monitor { + m := newMonitor() + for _, opt := range opts { + err := opt(o, m) + if err != nil { + m.Errors = append(m.Errors, err) + } + } + return m +} + +// MonitorOption adds Tables to a Monitor +type MonitorOption func(o *ovsdbClient, m *Monitor) error + +// MonitorCookie is the struct we pass to correlate from updates back to their +// originating Monitor request. +type MonitorCookie struct { + DatabaseName string `json:"databaseName"` + ID string `json:"id"` +} + +func newMonitorCookie(dbName string) MonitorCookie { + return MonitorCookie{ + DatabaseName: dbName, + ID: uuid.NewString(), + } +} + +// TableMonitor is a table to be monitored +type TableMonitor struct { + // Table is the table to be monitored + Table string + // Conditions are the conditions under which the table should be monitored + Conditions []ovsdb.Condition + // Fields are the fields in the model to monitor + // If none are supplied, all fields will be used + Fields []string +} + +func newTableMonitor(o *ovsdbClient, m model.Model, conditions []model.Condition, fields []interface{}) (*TableMonitor, error) { + dbModel := o.primaryDB().model + tableName := dbModel.FindTable(reflect.TypeOf(m)) + if tableName == "" { + return nil, fmt.Errorf("object of type %s is not part of the ClientDBModel", reflect.TypeOf(m)) + } + + var columns []string + var ovsdbConds []ovsdb.Condition + + if len(fields) == 0 && len(conditions) == 0 { + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil + } + + data, err := dbModel.NewModelInfo(m) + if err != nil { + return nil, fmt.Errorf("unable to obtain info from model %v: %v", m, err) + } + for _, f := range fields { + column, err := data.ColumnByPtr(f) + if err != nil { + return nil, fmt.Errorf("unable to obtain column from model %v: %v", data, err) + } + columns = append(columns, column) + } + db := o.databases[o.primaryDBName] + mmapper := db.model.Mapper + for _, modelCond := range conditions { + ovsdbCond, err := mmapper.NewCondition(data, modelCond.Field, modelCond.Function, modelCond.Value) + if err != nil { + return nil, fmt.Errorf("unable to convert condition %v: %v", modelCond, err) + } + ovsdbConds = append(ovsdbConds, *ovsdbCond) + } + return &TableMonitor{ + Table: tableName, + Conditions: ovsdbConds, + Fields: columns, + }, nil +} + +func WithTable(m model.Model, fields ...interface{}) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, []model.Condition{}, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} + +func WithConditionalTable(m model.Model, conditions []model.Condition, fields ...interface{}) MonitorOption { + return func(o *ovsdbClient, monitor *Monitor) error { + tableMonitor, err := newTableMonitor(o, m, conditions, fields) + if err != nil { + return err + } + monitor.Tables = append(monitor.Tables, *tableMonitor) + return nil + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/client/options.go b/vendor/github.com/ovn-org/libovsdb/client/options.go new file mode 100644 index 000000000..81ccffe20 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/client/options.go @@ -0,0 +1,164 @@ +package client + +import ( + "crypto/tls" + "net/url" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/go-logr/logr" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultTCPEndpoint = "tcp:127.0.0.1:6640" + defaultSSLEndpoint = "ssl:127.0.0.1:6640" + defaultUnixEndpoint = "unix:/var/run/openvswitch/ovsdb.sock" +) + +type options struct { + endpoints []string + tlsConfig *tls.Config + reconnect bool + leaderOnly bool + timeout time.Duration + backoff backoff.BackOff + logger *logr.Logger + registry prometheus.Registerer + shouldRegisterMetrics bool // in case metrics are changed after-the-fact + metricNamespace string // prometheus metric namespace + metricSubsystem string // prometheus metric subsystem + inactivityTimeout time.Duration +} + +type Option func(o *options) error + +func newOptions(opts ...Option) (*options, error) { + o := &options{} + for _, opt := range opts { + if err := opt(o); err != nil { + return nil, err + } + } + // if no endpoints are supplied, use the default unix socket + if len(o.endpoints) == 0 { + o.endpoints = []string{defaultUnixEndpoint} + } + return o, nil +} + +// WithTLSConfig sets the tls.Config for use by the client +func WithTLSConfig(cfg *tls.Config) Option { + return func(o *options) error { + o.tlsConfig = cfg + return nil + } +} + +// WithEndpoint sets the endpoint to be used by the client +// It can be used multiple times, and the first endpoint that +// successfully connects will be used. +// Endpoints are specified in OVSDB Connection Format +// For more details, see the ovsdb(7) man page +func WithEndpoint(endpoint string) Option { + return func(o *options) error { + ep, err := url.Parse(endpoint) + if err != nil { + return err + } + switch ep.Scheme { + case UNIX: + if len(ep.Path) == 0 { + o.endpoints = append(o.endpoints, defaultUnixEndpoint) + return nil + } + case TCP: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultTCPEndpoint) + return nil + } + case SSL: + if len(ep.Opaque) == 0 { + o.endpoints = append(o.endpoints, defaultSSLEndpoint) + return nil + } + } + o.endpoints = append(o.endpoints, endpoint) + return nil + } +} + +// WithLeaderOnly tells the client to treat endpoints that are clustered +// and not the leader as down. +func WithLeaderOnly(leaderOnly bool) Option { + return func(o *options) error { + o.leaderOnly = leaderOnly + return nil + } +} + +// WithReconnect tells the client to automatically reconnect when +// disconnected. The timeout is used to construct the context on +// each call to Connect, while backoff dictates the backoff +// algorithm to use. Using WithReconnect implies that +// requested transactions will block until the client has fully reconnected, +// rather than immediately returning an error if there is no connection. +func WithReconnect(timeout time.Duration, backoff backoff.BackOff) Option { + return func(o *options) error { + o.reconnect = true + o.timeout = timeout + o.backoff = backoff + return nil + } +} + +// WithInactivityCheck tells the client to send Echo request to ovsdb server periodically +// upon inactivityTimeout. When Echo request fails, then it attempts to reconnect +// with server. The inactivity check is performed as long as the connection is established. +// The reconnectTimeout argument is used to construct the context on each call to Connect, +// while reconnectBackoff dictates the backoff algorithm to use. +func WithInactivityCheck(inactivityTimeout, reconnectTimeout time.Duration, + reconnectBackoff backoff.BackOff) Option { + return func(o *options) error { + o.reconnect = true + o.timeout = reconnectTimeout + o.backoff = reconnectBackoff + o.inactivityTimeout = inactivityTimeout + return nil + } +} + +// WithLogger allows setting a specific log sink. Otherwise, the default +// go log package is used. +func WithLogger(l *logr.Logger) Option { + return func(o *options) error { + o.logger = l + return nil + } +} + +// WithMetricsRegistry allows the user to specify a Prometheus metrics registry. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistry(r prometheus.Registerer) Option { + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + return nil + } +} + +// WithMetricsRegistryNamespaceSubsystem allows the user to specify a Prometheus metrics registry +// and Prometheus metric namespace and subsystem of the component utilizing libovsdb. +// If supplied, the metrics as defined in metrics.go will be registered. +func WithMetricsRegistryNamespaceSubsystem(r prometheus.Registerer, namespace, subsystem string) Option { + if namespace == "" || subsystem == "" { + panic("libovsdb function WithMetricsRegistryNamespaceSubsystem arguments 'namespace' and 'subsystem' must not be empty") + } + return func(o *options) error { + o.registry = r + o.shouldRegisterMetrics = (r != nil) + o.metricNamespace = namespace + o.metricSubsystem = subsystem + return nil + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/database/database.go b/vendor/github.com/ovn-org/libovsdb/database/database.go new file mode 100644 index 000000000..12f1222f1 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/database.go @@ -0,0 +1,33 @@ +package database + +import ( + "github.com/google/uuid" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Database abstracts a database that a server can use to store and transact data +type Database interface { + CreateDatabase(database string, model ovsdb.DatabaseSchema) error + Exists(database string) bool + NewTransaction(database string) Transaction + Commit(database string, id uuid.UUID, update Update) error + CheckIndexes(database string, table string, m model.Model) error + List(database, table string, conditions ...ovsdb.Condition) (map[string]model.Model, error) + Get(database, table string, uuid string) (model.Model, error) + GetReferences(database, table, row string) (References, error) +} + +// Transaction abstracts a database transaction that can generate database +// updates +type Transaction interface { + Transact(operations ...ovsdb.Operation) ([]*ovsdb.OperationResult, Update) +} + +// Update abstracts an update that can be committed to a database +type Update interface { + GetUpdatedTables() []string + ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error + ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error + ForReferenceUpdates(do func(references References) error) error +} diff --git a/vendor/github.com/ovn-org/libovsdb/database/doc.go b/vendor/github.com/ovn-org/libovsdb/database/doc.go new file mode 100644 index 000000000..c0a858c20 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/doc.go @@ -0,0 +1,5 @@ +/* +Package database collects database related types, interfaces and +implementations. +*/ +package database diff --git a/vendor/github.com/ovn-org/libovsdb/database/references.go b/vendor/github.com/ovn-org/libovsdb/database/references.go new file mode 100644 index 000000000..d8181a7a5 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/database/references.go @@ -0,0 +1,71 @@ +package database + +// References tracks the references to rows from other rows at specific +// locations in the schema. +type References map[ReferenceSpec]Reference + +// ReferenceSpec specifies details about where in the schema a reference occurs. +type ReferenceSpec struct { + // ToTable is the table of the row to which the reference is made + ToTable string + + // FromTable is the table of the row from which the reference is made + FromTable string + + // FromColumn is the column of the row from which the reference is made + FromColumn string + + // FromValue flags if the reference is made on a map key or map value when + // the column is a map + FromValue bool +} + +// Reference maps the UUIDs of rows to which the reference is made to the +// rows it is made from +type Reference map[string][]string + +// GetReferences gets references to a row +func (rs References) GetReferences(table, uuid string) References { + refs := References{} + for spec, values := range rs { + if spec.ToTable != table { + continue + } + if _, ok := values[uuid]; ok { + refs[spec] = Reference{uuid: values[uuid]} + } + } + return refs +} + +// UpdateReferences updates the references with the provided ones. Dangling +// references, that is, the references of rows that are no longer referenced +// from anywhere, are cleaned up. +func (rs References) UpdateReferences(other References) { + for spec, otherRefs := range other { + for to, from := range otherRefs { + rs.updateReference(spec, to, from) + } + } +} + +// updateReference updates the references to a row at a specific location in the +// schema +func (rs References) updateReference(spec ReferenceSpec, to string, from []string) { + thisRefs, ok := rs[spec] + if !ok && len(from) > 0 { + // add references from a previously untracked location + rs[spec] = Reference{to: from} + return + } + if len(from) > 0 { + // replace references to this row at this specific location + thisRefs[to] = from + return + } + // otherwise remove previously tracked references + delete(thisRefs, to) + if len(thisRefs) == 0 { + delete(rs, spec) + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/vendor/github.com/ovn-org/libovsdb/mapper/info.go new file mode 100644 index 000000000..8ac436c79 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/mapper/info.go @@ -0,0 +1,179 @@ +package mapper + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ErrColumnNotFound is an error that can occur when the column does not exist for a table +type ErrColumnNotFound struct { + column string + table string +} + +// Error implements the error interface +func (e *ErrColumnNotFound) Error() string { + return fmt.Sprintf("column: %s not found in table: %s", e.column, e.table) +} + +func NewErrColumnNotFound(column, table string) *ErrColumnNotFound { + return &ErrColumnNotFound{ + column: column, + table: table, + } +} + +// Info is a struct that wraps an object with its metadata +type Info struct { + // FieldName indexed by column + Obj interface{} + Metadata Metadata +} + +// Metadata represents the information needed to know how to map OVSDB columns into an objetss fields +type Metadata struct { + Fields map[string]string // Map of ColumnName -> FieldName + TableSchema *ovsdb.TableSchema // TableSchema associated + TableName string // Table name +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) FieldByColumn(column string) (interface{}, error) { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return nil, NewErrColumnNotFound(column, i.Metadata.TableName) + } + return reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName).Interface(), nil +} + +// FieldByColumn returns the field value that corresponds to a column +func (i *Info) hasColumn(column string) bool { + _, ok := i.Metadata.Fields[column] + return ok +} + +// SetField sets the field in the column to the specified value +func (i *Info) SetField(column string, value interface{}) error { + fieldName, ok := i.Metadata.Fields[column] + if !ok { + return fmt.Errorf("SetField: column %s not found in orm info", column) + } + fieldValue := reflect.ValueOf(i.Obj).Elem().FieldByName(fieldName) + + if !fieldValue.Type().AssignableTo(reflect.TypeOf(value)) { + return fmt.Errorf("column %s: native value %v (%s) is not assignable to field %s (%s)", + column, value, reflect.TypeOf(value), fieldName, fieldValue.Type()) + } + fieldValue.Set(reflect.ValueOf(value)) + return nil +} + +// ColumnByPtr returns the column name that corresponds to the field by the field's pointer +func (i *Info) ColumnByPtr(fieldPtr interface{}) (string, error) { + fieldPtrVal := reflect.ValueOf(fieldPtr) + if fieldPtrVal.Kind() != reflect.Ptr { + return "", ovsdb.NewErrWrongType("ColumnByPointer", "pointer to a field in the struct", fieldPtr) + } + offset := fieldPtrVal.Pointer() - reflect.ValueOf(i.Obj).Pointer() + objType := reflect.TypeOf(i.Obj).Elem() + for j := 0; j < objType.NumField(); j++ { + if objType.Field(j).Offset == offset { + column := objType.Field(j).Tag.Get("ovsdb") + if _, ok := i.Metadata.Fields[column]; !ok { + return "", fmt.Errorf("field does not have orm column information") + } + return column, nil + } + } + return "", fmt.Errorf("field pointer does not correspond to orm struct") +} + +// getValidIndexes inspects the object and returns the a list of indexes (set of columns) for witch +// the object has non-default values +func (i *Info) getValidIndexes() ([][]string, error) { + var validIndexes [][]string + var possibleIndexes [][]string + + possibleIndexes = append(possibleIndexes, []string{"_uuid"}) + possibleIndexes = append(possibleIndexes, i.Metadata.TableSchema.Indexes...) + + // Iterate through indexes and validate them +OUTER: + for _, idx := range possibleIndexes { + for _, col := range idx { + if !i.hasColumn(col) { + continue OUTER + } + columnSchema := i.Metadata.TableSchema.Column(col) + if columnSchema == nil { + continue OUTER + } + field, err := i.FieldByColumn(col) + if err != nil { + return nil, err + } + if !reflect.ValueOf(field).IsValid() || ovsdb.IsDefaultValue(columnSchema, field) { + continue OUTER + } + } + validIndexes = append(validIndexes, idx) + } + return validIndexes, nil +} + +// NewInfo creates a MapperInfo structure around an object based on a given table schema +func NewInfo(tableName string, table *ovsdb.TableSchema, obj interface{}) (*Info, error) { + objPtrVal := reflect.ValueOf(obj) + if objPtrVal.Type().Kind() != reflect.Ptr { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objVal := reflect.Indirect(objPtrVal) + if objVal.Kind() != reflect.Struct { + return nil, ovsdb.NewErrWrongType("NewMapperInfo", "pointer to a struct", obj) + } + objType := objVal.Type() + + fields := make(map[string]string, objType.NumField()) + for i := 0; i < objType.NumField(); i++ { + field := objType.Field(i) + colName := field.Tag.Get("ovsdb") + if colName == "" { + // Untagged fields are ignored + continue + } + column := table.Column(colName) + if column == nil { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: "Column does not exist in schema", + } + } + + // Perform schema-based type checking + expType := ovsdb.NativeType(column) + if expType != field.Type { + return nil, &ErrMapper{ + objType: objType.String(), + field: field.Name, + fieldType: field.Type.String(), + fieldTag: colName, + reason: fmt.Sprintf("Wrong type, column expects %s", expType), + } + } + fields[colName] = field.Name + } + + return &Info{ + Obj: obj, + Metadata: Metadata{ + Fields: fields, + TableSchema: table, + TableName: tableName, + }, + }, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go new file mode 100644 index 000000000..5ca7a412b --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go @@ -0,0 +1,317 @@ +package mapper + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// Mapper offers functions to interact with libovsdb through user-provided native structs. +// The way to specify what field of the struct goes +// to what column in the database id through field a field tag. +// The tag used is "ovsdb" and has the following structure +// 'ovsdb:"${COLUMN_NAME}"' +// where COLUMN_NAME is the name of the column and must match the schema +// +//Example: +// type MyObj struct { +// Name string `ovsdb:"name"` +// } +type Mapper struct { + Schema ovsdb.DatabaseSchema +} + +// ErrMapper describes an error in an Mapper type +type ErrMapper struct { + objType string + field string + fieldType string + fieldTag string + reason string +} + +func (e *ErrMapper) Error() string { + return fmt.Sprintf("Mapper Error. Object type %s contains field %s (%s) ovs tag %s: %s", + e.objType, e.field, e.fieldType, e.fieldTag, e.reason) +} + +// NewMapper returns a new mapper +func NewMapper(schema ovsdb.DatabaseSchema) Mapper { + return Mapper{ + Schema: schema, + } +} + +// GetRowData transforms a Row to a struct based on its tags +// The result object must be given as pointer to an object with the right tags +func (m Mapper) GetRowData(row *ovsdb.Row, result *Info) error { + if row == nil { + return nil + } + return m.getData(*row, result) +} + +// getData transforms a map[string]interface{} containing OvS types (e.g: a ResultRow +// has this format) to orm struct +// The result object must be given as pointer to an object with the right tags +func (m Mapper) getData(ovsData ovsdb.Row, result *Info) error { + for name, column := range result.Metadata.TableSchema.Columns { + if !result.hasColumn(name) { + // If provided struct does not have a field to hold this value, skip it + continue + } + + ovsElem, ok := ovsData[name] + if !ok { + // Ignore missing columns + continue + } + + nativeElem, err := ovsdb.OvsToNative(column, ovsElem) + if err != nil { + return fmt.Errorf("table %s, column %s: failed to extract native element: %s", + result.Metadata.TableName, name, err.Error()) + } + + if err := result.SetField(name, nativeElem); err != nil { + return err + } + } + return nil +} + +// NewRow transforms an orm struct to a map[string] interface{} that can be used as libovsdb.Row +// By default, default or null values are skipped. This behavior can be modified by specifying +// a list of fields (pointers to fields in the struct) to be added to the row +func (m Mapper) NewRow(data *Info, fields ...interface{}) (ovsdb.Row, error) { + columns := make(map[string]*ovsdb.ColumnSchema) + for k, v := range data.Metadata.TableSchema.Columns { + columns[k] = v + } + columns["_uuid"] = &ovsdb.UUIDColumn + ovsRow := make(map[string]interface{}, len(columns)) + for name, column := range columns { + nativeElem, err := data.FieldByColumn(name) + if err != nil { + // If provided struct does not have a field to hold this value, skip it + continue + } + + // add specific fields + if len(fields) > 0 { + found := false + for _, f := range fields { + col, err := data.ColumnByPtr(f) + if err != nil { + return nil, err + } + if col == name { + found = true + break + } + } + if !found { + continue + } + } + if len(fields) == 0 && ovsdb.IsDefaultValue(column, nativeElem) { + continue + } + ovsElem, err := ovsdb.NativeToOvs(column, nativeElem) + if err != nil { + return nil, fmt.Errorf("table %s, column %s: failed to generate ovs element. %s", data.Metadata.TableName, name, err.Error()) + } + ovsRow[name] = ovsElem + } + return ovsRow, nil +} + +// NewEqualityCondition returns a list of equality conditions that match a given object +// A list of valid columns that shall be used as a index can be provided. +// If none are provided, we will try to use object's field that matches the '_uuid' ovsdb tag +// If it does not exist or is null (""), then we will traverse all of the table indexes and +// use the first index (list of simultaneously unique columns) for which the provided mapper +// object has valid data. The order in which they are traversed matches the order defined +// in the schema. +// By `valid data` we mean non-default data. +func (m Mapper) NewEqualityCondition(data *Info, fields ...interface{}) ([]ovsdb.Condition, error) { + var conditions []ovsdb.Condition + var condIndex [][]string + + // If index is provided, use it. If not, obtain the valid indexes from the mapper info + if len(fields) > 0 { + providedIndex := []string{} + for i := range fields { + if col, err := data.ColumnByPtr(fields[i]); err == nil { + providedIndex = append(providedIndex, col) + } else { + return nil, err + } + } + condIndex = append(condIndex, providedIndex) + } else { + var err error + condIndex, err = data.getValidIndexes() + if err != nil { + return nil, err + } + } + + if len(condIndex) == 0 { + return nil, fmt.Errorf("failed to find a valid index") + } + + // Pick the first valid index + for _, col := range condIndex[0] { + field, err := data.FieldByColumn(col) + if err != nil { + return nil, err + } + + column := data.Metadata.TableSchema.Column(col) + if column == nil { + return nil, fmt.Errorf("column %s not found", col) + } + ovsVal, err := ovsdb.NativeToOvs(column, field) + if err != nil { + return nil, err + } + conditions = append(conditions, ovsdb.NewCondition(col, ovsdb.ConditionEqual, ovsVal)) + } + return conditions, nil +} + +// EqualFields compares two mapped objects. +// The indexes to use for comparison are, the _uuid, the table indexes and the columns that correspond +// to the mapped fields pointed to by 'fields'. They must be pointers to fields on the first mapped element (i.e: one) +func (m Mapper) EqualFields(one, other *Info, fields ...interface{}) (bool, error) { + indexes := []string{} + for _, f := range fields { + col, err := one.ColumnByPtr(f) + if err != nil { + return false, err + } + indexes = append(indexes, col) + } + return m.equalIndexes(one, other, indexes...) +} + +// NewCondition returns a ovsdb.Condition based on the model +func (m Mapper) NewCondition(data *Info, field interface{}, function ovsdb.ConditionFunction, value interface{}) (*ovsdb.Condition, error) { + column, err := data.ColumnByPtr(field) + if err != nil { + return nil, err + } + + // Check that the condition is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateCondition(columnSchema, function, value); err != nil { + return nil, err + } + + ovsValue, err := ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + + ovsdbCondition := ovsdb.NewCondition(column, function, ovsValue) + + return &ovsdbCondition, nil + +} + +// NewMutation creates a RFC7047 mutation object based on an ORM object and the mutation fields (in native format) +// It takes care of field validation against the column type +func (m Mapper) NewMutation(data *Info, column string, mutator ovsdb.Mutator, value interface{}) (*ovsdb.Mutation, error) { + // Check the column exists in the object + if !data.hasColumn(column) { + return nil, fmt.Errorf("mutation contains column %s that does not exist in object %v", column, data) + } + // Check that the mutation is valid + columnSchema := data.Metadata.TableSchema.Column(column) + if columnSchema == nil { + return nil, fmt.Errorf("column %s not found", column) + } + if err := ovsdb.ValidateMutation(columnSchema, mutator, value); err != nil { + return nil, err + } + + var ovsValue interface{} + var err error + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutator == "delete" && columnSchema.Type == ovsdb.TypeMap && reflect.TypeOf(value).Kind() != reflect.Map { + // It's OK to cast the value to a list of elements because validation has passed + ovsSet, err := ovsdb.NewOvsSet(value) + if err != nil { + return nil, err + } + ovsValue = ovsSet + } else { + ovsValue, err = ovsdb.NativeToOvs(columnSchema, value) + if err != nil { + return nil, err + } + } + + return &ovsdb.Mutation{Column: column, Mutator: mutator, Value: ovsValue}, nil +} + +// equalIndexes returns whether both models are equal from the DB point of view +// Two objects are considered equal if any of the following conditions is true +// They have a field tagged with column name '_uuid' and their values match +// For any of the indexes defined in the Table Schema, the values all of its columns are simultaneously equal +// (as per RFC7047) +// The values of all of the optional indexes passed as variadic parameter to this function are equal. +func (m Mapper) equalIndexes(one, other *Info, indexes ...string) (bool, error) { + match := false + + oneIndexes, err := one.getValidIndexes() + if err != nil { + return false, err + } + + otherIndexes, err := other.getValidIndexes() + if err != nil { + return false, err + } + + oneIndexes = append(oneIndexes, indexes) + otherIndexes = append(otherIndexes, indexes) + + for _, lidx := range oneIndexes { + for _, ridx := range otherIndexes { + if reflect.DeepEqual(ridx, lidx) { + // All columns in an index must be simultaneously equal + for _, col := range lidx { + if !one.hasColumn(col) || !other.hasColumn(col) { + break + } + lfield, err := one.FieldByColumn(col) + if err != nil { + return false, err + } + rfield, err := other.FieldByColumn(col) + if err != nil { + return false, err + } + if reflect.DeepEqual(lfield, rfield) { + match = true + } else { + match = false + break + } + } + if match { + return true, nil + } + } + } + } + return false, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/client.go b/vendor/github.com/ovn-org/libovsdb/model/client.go new file mode 100644 index 000000000..5eb686244 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/client.go @@ -0,0 +1,171 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ColumnKey addresses a column and optionally a key within a column +type ColumnKey struct { + Column string + Key interface{} +} + +// ClientIndex defines a client index by a set of columns +type ClientIndex struct { + Columns []ColumnKey +} + +// ClientDBModel contains the client information needed to build a DatabaseModel +type ClientDBModel struct { + name string + types map[string]reflect.Type + indexes map[string][]ClientIndex +} + +// NewModel returns a new instance of a model from a specific string +func (db ClientDBModel) newModel(table string) (Model, error) { + mtype, ok := db.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Name returns the database name +func (db ClientDBModel) Name() string { + return db.name +} + +// Indexes returns the client indexes for a model +func (db ClientDBModel) Indexes(table string) []ClientIndex { + if len(db.indexes) == 0 { + return nil + } + if _, ok := db.indexes[table]; ok { + return copyIndexes(db.indexes)[table] + } + return nil +} + +// SetIndexes sets the client indexes. Client indexes are optional, similar to +// schema indexes and are only tracked in the specific client instances that are +// provided with this client model. A client index may point to multiple models +// as uniqueness is not enforced. They are defined per table and multiple +// indexes can be defined for a table. Each index consists of a set of columns. +// If the column is a map, specific keys of that map can be addressed for the +// index. +func (db *ClientDBModel) SetIndexes(indexes map[string][]ClientIndex) { + db.indexes = copyIndexes(indexes) +} + +// Validate validates the DatabaseModel against the input schema +// Returns all the errors detected +func (db ClientDBModel) validate(schema ovsdb.DatabaseSchema) []error { + var errors []error + if db.name != schema.Name { + errors = append(errors, fmt.Errorf("database model name (%s) does not match schema (%s)", + db.name, schema.Name)) + } + + infos := make(map[string]*mapper.Info, len(db.types)) + for tableName := range db.types { + tableSchema := schema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database model contains a model for table %s that does not exist in schema", tableName)) + continue + } + model, err := db.newModel(tableName) + if err != nil { + errors = append(errors, err) + continue + } + info, err := mapper.NewInfo(tableName, tableSchema, model) + if err != nil { + errors = append(errors, err) + continue + } + infos[tableName] = info + } + + for tableName, indexSets := range db.indexes { + info, ok := infos[tableName] + if !ok { + errors = append(errors, fmt.Errorf("database model contains a client index for table %s that does not exist in schema", tableName)) + continue + } + for _, indexSet := range indexSets { + for _, indexColumn := range indexSet.Columns { + f, err := info.FieldByColumn(indexColumn.Column) + if err != nil { + errors = append( + errors, + fmt.Errorf("database model contains a client index for column %s that does not exist in table %s", + indexColumn.Column, + tableName)) + continue + } + if indexColumn.Key != nil && reflect.ValueOf(f).Kind() != reflect.Map { + errors = append( + errors, + fmt.Errorf("database model contains a client index for key %s in column %s of table %s that is not a map", + indexColumn.Key, + indexColumn.Column, + tableName)) + continue + } + } + } + } + return errors +} + +// NewClientDBModel constructs a ClientDBModel based on a database name and dictionary of models indexed by table name +func NewClientDBModel(name string, models map[string]Model) (ClientDBModel, error) { + types := make(map[string]reflect.Type, len(models)) + for table, model := range models { + modelType := reflect.TypeOf(model) + if modelType.Kind() != reflect.Ptr || modelType.Elem().Kind() != reflect.Struct { + return ClientDBModel{}, fmt.Errorf("model is expected to be a pointer to struct") + } + hasUUID := false + for i := 0; i < modelType.Elem().NumField(); i++ { + if field := modelType.Elem().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + hasUUID = true + break + } + } + if !hasUUID { + return ClientDBModel{}, fmt.Errorf("model is expected to have a string field called uuid") + } + + types[table] = modelType + } + return ClientDBModel{ + types: types, + name: name, + }, nil +} + +func copyIndexes(src map[string][]ClientIndex) map[string][]ClientIndex { + if len(src) == 0 { + return nil + } + dst := make(map[string][]ClientIndex, len(src)) + for table, indexSets := range src { + dst[table] = make([]ClientIndex, 0, len(indexSets)) + for _, indexSet := range indexSets { + indexSetCopy := ClientIndex{ + Columns: make([]ColumnKey, len(indexSet.Columns)), + } + copy(indexSetCopy.Columns, indexSet.Columns) + dst[table] = append(dst[table], indexSetCopy) + } + } + return dst +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/database.go b/vendor/github.com/ovn-org/libovsdb/model/database.go new file mode 100644 index 000000000..0857d903f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/database.go @@ -0,0 +1,118 @@ +package model + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// A DatabaseModel represents libovsdb's metadata about the database. +// It's the result of combining the client's ClientDBModel and the server's Schema +type DatabaseModel struct { + client ClientDBModel + Schema ovsdb.DatabaseSchema + Mapper mapper.Mapper + metadata map[reflect.Type]mapper.Metadata +} + +// NewDatabaseModel returns a new DatabaseModel +func NewDatabaseModel(schema ovsdb.DatabaseSchema, client ClientDBModel) (DatabaseModel, []error) { + dbModel := &DatabaseModel{ + Schema: schema, + client: client, + } + errs := client.validate(schema) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.Mapper = mapper.NewMapper(schema) + var metadata map[reflect.Type]mapper.Metadata + metadata, errs = generateModelInfo(schema, client.types) + if len(errs) > 0 { + return DatabaseModel{}, errs + } + dbModel.metadata = metadata + return *dbModel, nil +} + +// NewPartialDatabaseModel returns a DatabaseModel what does not have a schema yet +func NewPartialDatabaseModel(client ClientDBModel) DatabaseModel { + return DatabaseModel{ + client: client, + } +} + +// Valid returns whether the DatabaseModel is fully functional +func (db DatabaseModel) Valid() bool { + return !reflect.DeepEqual(db.Schema, ovsdb.DatabaseSchema{}) +} + +// Client returns the DatabaseModel's client dbModel +func (db DatabaseModel) Client() ClientDBModel { + return db.client +} + +// NewModel returns a new instance of a model from a specific string +func (db DatabaseModel) NewModel(table string) (Model, error) { + mtype, ok := db.client.types[table] + if !ok { + return nil, fmt.Errorf("table %s not found in database model", string(table)) + } + model := reflect.New(mtype.Elem()) + return model.Interface().(Model), nil +} + +// Types returns the DatabaseModel Types +// the DatabaseModel types is a map of reflect.Types indexed by string +// The reflect.Type is a pointer to a struct that contains 'ovs' tags +// as described above. Such pointer to struct also implements the Model interface +func (db DatabaseModel) Types() map[string]reflect.Type { + return db.client.types +} + +// FindTable returns the string associated with a reflect.Type or "" +func (db DatabaseModel) FindTable(mType reflect.Type) string { + for table, tType := range db.client.types { + if tType == mType { + return table + } + } + return "" +} + +// generateModelMetadata creates metadata objects from all models included in the +// database and caches them for future re-use +func generateModelInfo(dbSchema ovsdb.DatabaseSchema, modelTypes map[string]reflect.Type) (map[reflect.Type]mapper.Metadata, []error) { + errors := []error{} + metadata := make(map[reflect.Type]mapper.Metadata, len(modelTypes)) + for tableName, tType := range modelTypes { + tableSchema := dbSchema.Table(tableName) + if tableSchema == nil { + errors = append(errors, fmt.Errorf("database Model contains model for table %s which is not present in schema", tableName)) + continue + } + + obj := reflect.New(tType.Elem()).Interface().(Model) + info, err := mapper.NewInfo(tableName, tableSchema, obj) + if err != nil { + errors = append(errors, err) + continue + } + metadata[tType] = info.Metadata + } + return metadata, errors +} + +// NewModelInfo returns a mapper.Info object based on a provided model +func (db DatabaseModel) NewModelInfo(obj interface{}) (*mapper.Info, error) { + meta, ok := db.metadata[reflect.TypeOf(obj)] + if !ok { + return nil, ovsdb.NewErrWrongType("NewModelInfo", "type that is part of the DatabaseModel", obj) + } + return &mapper.Info{ + Obj: obj, + Metadata: meta, + }, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/model/model.go b/vendor/github.com/ovn-org/libovsdb/model/model.go new file mode 100644 index 000000000..c8575f5bf --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/model/model.go @@ -0,0 +1,130 @@ +package model + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +// A Model is the base interface used to build Database Models. It is used +// to express how data from a specific Database Table shall be translated into structs +// A Model is a struct with at least one (most likely more) field tagged with the 'ovs' tag +// The value of 'ovs' field must be a valid column name in the OVS Database +// A field associated with the "_uuid" column mandatory. The rest of the columns are optional +// The struct may also have non-tagged fields (which will be ignored by the API calls) +// The Model interface must be implemented by the pointer to such type +// Example: +//type MyLogicalRouter struct { +// UUID string `ovsdb:"_uuid"` +// Name string `ovsdb:"name"` +// ExternalIDs map[string]string `ovsdb:"external_ids"` +// LoadBalancers []string `ovsdb:"load_balancer"` +//} +type Model interface{} + +type CloneableModel interface { + CloneModel() Model + CloneModelInto(Model) +} + +type ComparableModel interface { + EqualsModel(Model) bool +} + +// Clone creates a deep copy of a model +func Clone(a Model) Model { + if cloner, ok := a.(CloneableModel); ok { + return cloner.CloneModel() + } + + val := reflect.Indirect(reflect.ValueOf(a)) + b := reflect.New(val.Type()).Interface() + aBytes, _ := json.Marshal(a) + _ = json.Unmarshal(aBytes, b) + return b +} + +// CloneInto deep copies a model into another one +func CloneInto(src, dst Model) { + if cloner, ok := src.(CloneableModel); ok { + cloner.CloneModelInto(dst) + return + } + + aBytes, _ := json.Marshal(src) + _ = json.Unmarshal(aBytes, dst) +} + +func Equal(l, r Model) bool { + if comparator, ok := l.(ComparableModel); ok { + return comparator.EqualsModel(r) + } + + return reflect.DeepEqual(l, r) +} + +func modelSetUUID(model Model, uuid string) error { + modelVal := reflect.ValueOf(model).Elem() + for i := 0; i < modelVal.NumField(); i++ { + if field := modelVal.Type().Field(i); field.Tag.Get("ovsdb") == "_uuid" && + field.Type.Kind() == reflect.String { + modelVal.Field(i).Set(reflect.ValueOf(uuid)) + return nil + } + } + return fmt.Errorf("model is expected to have a string field mapped to column _uuid") +} + +// Condition is a model-based representation of an OVSDB Condition +type Condition struct { + // Pointer to the field of the model where the operation applies + Field interface{} + // Condition function + Function ovsdb.ConditionFunction + // Value to use in the condition + Value interface{} +} + +// Mutation is a model-based representation of an OVSDB Mutation +type Mutation struct { + // Pointer to the field of the model that shall be mutated + Field interface{} + // String representing the mutator (as per RFC7047) + Mutator ovsdb.Mutator + // Value to use in the mutation + Value interface{} +} + +// CreateModel creates a new Model instance based on an OVSDB Row information +func CreateModel(dbModel DatabaseModel, tableName string, row *ovsdb.Row, uuid string) (Model, error) { + if !dbModel.Valid() { + return nil, fmt.Errorf("database model not valid") + } + + table := dbModel.Schema.Table(tableName) + if table == nil { + return nil, fmt.Errorf("table %s not found", tableName) + } + model, err := dbModel.NewModel(tableName) + if err != nil { + return nil, err + } + info, err := dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + err = dbModel.Mapper.GetRowData(row, info) + if err != nil { + return nil, err + } + + if uuid != "" { + if err := info.SetField("_uuid", uuid); err != nil { + return nil, err + } + } + + return model, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go new file mode 100644 index 000000000..aebe2c2d0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go @@ -0,0 +1,427 @@ +package ovsdb + +import ( + "fmt" + "reflect" +) + +var ( + intType = reflect.TypeOf(0) + realType = reflect.TypeOf(0.0) + boolType = reflect.TypeOf(true) + strType = reflect.TypeOf("") +) + +// ErrWrongType describes typing error +type ErrWrongType struct { + from string + expected string + got interface{} +} + +func (e *ErrWrongType) Error() string { + return fmt.Sprintf("Wrong Type (%s): expected %s but got %+v (%s)", + e.from, e.expected, e.got, reflect.TypeOf(e.got)) +} + +// NewErrWrongType creates a new ErrWrongType +func NewErrWrongType(from, expected string, got interface{}) error { + return &ErrWrongType{ + from: from, + expected: expected, + got: got, + } +} + +// NativeTypeFromAtomic returns the native type that can hold a value of an +// AtomicType +func NativeTypeFromAtomic(basicType string) reflect.Type { + switch basicType { + case TypeInteger: + return intType + case TypeReal: + return realType + case TypeBoolean: + return boolType + case TypeString: + return strType + case TypeUUID: + return strType + default: + panic("Unknown basic type %s basicType") + } +} + +// NativeType returns the reflect.Type that can hold the value of a column +// OVS Type to Native Type convertions: +// +// OVS sets -> go slices or a go native type depending on the key +// OVS uuid -> go strings +// OVS map -> go map +// OVS enum -> go native type depending on the type of the enum key +func NativeType(column *ColumnSchema) reflect.Type { + switch column.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeUUID, TypeString: + return NativeTypeFromAtomic(column.Type) + case TypeEnum: + return NativeTypeFromAtomic(column.TypeObj.Key.Type) + case TypeMap: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + valueType := NativeTypeFromAtomic(column.TypeObj.Value.Type) + return reflect.MapOf(keyType, valueType) + case TypeSet: + keyType := NativeTypeFromAtomic(column.TypeObj.Key.Type) + // optional type + if column.TypeObj.Min() == 0 && column.TypeObj.Max() == 1 { + return reflect.PtrTo(keyType) + } + // non-optional type with max 1 + if column.TypeObj.Min() == 1 && column.TypeObj.Max() == 1 { + return keyType + } + return reflect.SliceOf(keyType) + default: + panic(fmt.Errorf("unknown extended type %s", column.Type)) + } +} + +// OvsToNativeAtomic returns the native type of the basic ovs type +func OvsToNativeAtomic(basicType string, ovsElem interface{}) (interface{}, error) { + switch basicType { + case TypeReal, TypeString, TypeBoolean: + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(ovsElem) != naType { + return nil, NewErrWrongType("OvsToNativeAtomic", naType.String(), ovsElem) + } + return ovsElem, nil + case TypeInteger: + naType := NativeTypeFromAtomic(basicType) + // Default decoding of numbers is float64, convert them to int + if !reflect.TypeOf(ovsElem).ConvertibleTo(naType) { + return nil, NewErrWrongType("OvsToNativeAtomic", fmt.Sprintf("Convertible to %s", naType), ovsElem) + } + return reflect.ValueOf(ovsElem).Convert(naType).Interface(), nil + case TypeUUID: + uuid, ok := ovsElem.(UUID) + if !ok { + return nil, NewErrWrongType("OvsToNativeAtomic", "UUID", ovsElem) + } + return uuid.GoUUID, nil + default: + panic(fmt.Errorf("unknown atomic type %s", basicType)) + } +} + +func OvsToNativeSlice(baseType string, ovsElem interface{}) (interface{}, error) { + naType := NativeTypeFromAtomic(baseType) + var nativeSet reflect.Value + switch ovsSet := ovsElem.(type) { + case OvsSet: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, len(ovsSet.GoSet)) + for _, v := range ovsSet.GoSet { + nv, err := OvsToNativeAtomic(baseType, v) + if err != nil { + return nil, err + } + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + + default: + nativeSet = reflect.MakeSlice(reflect.SliceOf(naType), 0, 1) + nv, err := OvsToNativeAtomic(baseType, ovsElem) + if err != nil { + return nil, err + } + + nativeSet = reflect.Append(nativeSet, reflect.ValueOf(nv)) + } + return nativeSet.Interface(), nil +} + +// OvsToNative transforms an ovs type to native one based on the column type information +func OvsToNative(column *ColumnSchema, ovsElem interface{}) (interface{}, error) { + switch column.Type { + case TypeReal, TypeString, TypeBoolean, TypeInteger, TypeUUID: + return OvsToNativeAtomic(column.Type, ovsElem) + case TypeEnum: + return OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + case TypeSet: + naType := NativeType(column) + // The inner slice is []interface{} + // We need to convert it to the real type os slice + switch naType.Kind() { + case reflect.Ptr: + switch ovsSet := ovsElem.(type) { + case OvsSet: + if len(ovsSet.GoSet) > 1 { + return nil, fmt.Errorf("expected a slice of len =< 1, but got a slice with %d elements", len(ovsSet.GoSet)) + } + if len(ovsSet.GoSet) == 0 { + return reflect.Zero(naType).Interface(), nil + } + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsSet.GoSet[0]) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + default: + native, err := OvsToNativeAtomic(column.TypeObj.Key.Type, ovsElem) + if err != nil { + return nil, err + } + pv := reflect.New(naType.Elem()) + pv.Elem().Set(reflect.ValueOf(native)) + return pv.Interface(), nil + } + case reflect.Slice: + return OvsToNativeSlice(column.TypeObj.Key.Type, ovsElem) + default: + return nil, fmt.Errorf("native type was not slice or pointer. got %d", naType.Kind()) + } + case TypeMap: + naType := NativeType(column) + ovsMap, ok := ovsElem.(OvsMap) + if !ok { + return nil, NewErrWrongType("OvsToNative", "OvsMap", ovsElem) + } + // The inner slice is map[interface]interface{} + // We need to convert it to the real type os slice + nativeMap := reflect.MakeMapWithSize(naType, len(ovsMap.GoMap)) + for k, v := range ovsMap.GoMap { + nk, err := OvsToNativeAtomic(column.TypeObj.Key.Type, k) + if err != nil { + return nil, err + } + nv, err := OvsToNativeAtomic(column.TypeObj.Value.Type, v) + if err != nil { + return nil, err + } + nativeMap.SetMapIndex(reflect.ValueOf(nk), reflect.ValueOf(nv)) + } + return nativeMap.Interface(), nil + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// NativeToOvsAtomic returns the OVS type of the atomic native value +func NativeToOvsAtomic(basicType string, nativeElem interface{}) (interface{}, error) { + naType := NativeTypeFromAtomic(basicType) + if reflect.TypeOf(nativeElem) != naType { + return nil, NewErrWrongType("NativeToOvsAtomic", naType.String(), nativeElem) + } + switch basicType { + case TypeUUID: + return UUID{GoUUID: nativeElem.(string)}, nil + default: + return nativeElem, nil + } +} + +// NativeToOvs transforms an native type to a ovs type based on the column type information +func NativeToOvs(column *ColumnSchema, rawElem interface{}) (interface{}, error) { + naType := NativeType(column) + if t := reflect.TypeOf(rawElem); t != naType { + return nil, NewErrWrongType("NativeToOvs", naType.String(), rawElem) + } + + switch column.Type { + case TypeInteger, TypeReal, TypeString, TypeBoolean, TypeEnum: + return rawElem, nil + case TypeUUID: + return UUID{GoUUID: rawElem.(string)}, nil + case TypeSet: + var ovsSet OvsSet + if column.TypeObj.Key.Type == TypeUUID { + ovsSlice := []interface{}{} + if _, ok := rawElem.([]string); ok { + for _, v := range rawElem.([]string) { + uuid := UUID{GoUUID: v} + ovsSlice = append(ovsSlice, uuid) + } + } else if _, ok := rawElem.(*string); ok { + v := rawElem.(*string) + if v != nil { + uuid := UUID{GoUUID: *v} + ovsSlice = append(ovsSlice, uuid) + } + } else { + return nil, fmt.Errorf("uuid slice was neither []string or *string") + } + ovsSet = OvsSet{GoSet: ovsSlice} + + } else { + var err error + ovsSet, err = NewOvsSet(rawElem) + if err != nil { + return nil, err + } + } + return ovsSet, nil + case TypeMap: + nativeMapVal := reflect.ValueOf(rawElem) + ovsMap := make(map[interface{}]interface{}, nativeMapVal.Len()) + for _, key := range nativeMapVal.MapKeys() { + ovsKey, err := NativeToOvsAtomic(column.TypeObj.Key.Type, key.Interface()) + if err != nil { + return nil, err + } + ovsVal, err := NativeToOvsAtomic(column.TypeObj.Value.Type, nativeMapVal.MapIndex(key).Interface()) + if err != nil { + return nil, err + } + ovsMap[ovsKey] = ovsVal + } + return OvsMap{GoMap: ovsMap}, nil + + default: + panic(fmt.Sprintf("Unknown Type: %v", column.Type)) + } +} + +// IsDefaultValue checks if a provided native element corresponds to the default value of its +// designated column type +func IsDefaultValue(column *ColumnSchema, nativeElem interface{}) bool { + switch column.Type { + case TypeEnum: + return isDefaultBaseValue(nativeElem, column.TypeObj.Key.Type) + default: + return isDefaultBaseValue(nativeElem, column.Type) + } +} + +// ValidateMutationAtomic checks if the mutation is valid for a specific AtomicType +func validateMutationAtomic(atype string, mutator Mutator, value interface{}) error { + nType := NativeTypeFromAtomic(atype) + if reflect.TypeOf(value) != nType { + return NewErrWrongType(fmt.Sprintf("Mutation of atomic type %s", atype), nType.String(), value) + } + + switch atype { + case TypeUUID, TypeString, TypeBoolean: + return fmt.Errorf("atomictype %s does not support mutation", atype) + case TypeReal: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide: + return nil + default: + return fmt.Errorf("wrong mutator for real type %s", mutator) + } + case TypeInteger: + switch mutator { + case MutateOperationAdd, MutateOperationSubtract, MutateOperationMultiply, MutateOperationDivide, MutateOperationModulo: + return nil + default: + return fmt.Errorf("wrong mutator for integer type: %s", mutator) + } + default: + panic("Unsupported Atomic Type") + } +} + +// ValidateMutation checks if the mutation value and mutator string area appropriate +// for a given column based on the rules specified RFC7047 +func ValidateMutation(column *ColumnSchema, mutator Mutator, value interface{}) error { + if !column.Mutable() { + return fmt.Errorf("column is not mutable") + } + switch column.Type { + case TypeSet: + switch mutator { + case MutateOperationInsert, MutateOperationDelete: + // RFC7047 says a may be an with a single + // element. Check if we can store this value in our column + if reflect.TypeOf(value).Kind() != reflect.Slice { + if NativeType(column) != reflect.SliceOf(reflect.TypeOf(value)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of single value in to column %s", mutator, column), + NativeType(column).String(), reflect.SliceOf(reflect.TypeOf(value)).String()) + } + return nil + } + if NativeType(column) != reflect.TypeOf(value) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + default: + return validateMutationAtomic(column.TypeObj.Key.Type, mutator, value) + } + case TypeMap: + switch mutator { + case MutateOperationInsert: + // Value must be a map of the same kind + if reflect.TypeOf(value) != NativeType(column) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + NativeType(column).String(), value) + } + return nil + case MutateOperationDelete: + // Value must be a map of the same kind or a set of keys to delete + if reflect.TypeOf(value) != NativeType(column) && + reflect.TypeOf(value) != reflect.SliceOf(NativeTypeFromAtomic(column.TypeObj.Key.Type)) { + return NewErrWrongType(fmt.Sprintf("Mutation %s of column %s", mutator, column), + "compatible map type", value) + } + return nil + + default: + return fmt.Errorf("wrong mutator for map type: %s", mutator) + } + case TypeEnum: + // RFC does not clarify what to do with enums. + return fmt.Errorf("enums do not support mutation") + default: + return validateMutationAtomic(column.Type, mutator, value) + } +} + +func ValidateCondition(column *ColumnSchema, function ConditionFunction, nativeValue interface{}) error { + if NativeType(column) != reflect.TypeOf(nativeValue) { + return NewErrWrongType(fmt.Sprintf("Condition for column %s", column), + NativeType(column).String(), nativeValue) + } + + switch column.Type { + case TypeSet, TypeMap, TypeBoolean, TypeString, TypeUUID: + switch function { + case ConditionEqual, ConditionNotEqual, ConditionIncludes, ConditionExcludes: + return nil + default: + return fmt.Errorf("wrong condition function %s for type: %s", function, column.Type) + } + case TypeInteger, TypeReal: + // All functions are valid + return nil + default: + panic("Unsupported Type") + } +} + +func isDefaultBaseValue(elem interface{}, etype ExtendedType) bool { + value := reflect.ValueOf(elem) + if !value.IsValid() { + return true + } + if reflect.TypeOf(elem).Kind() == reflect.Ptr { + return reflect.ValueOf(elem).IsZero() + } + switch etype { + case TypeUUID: + return elem.(string) == "00000000-0000-0000-0000-000000000000" || elem.(string) == "" + case TypeMap, TypeSet: + if value.Kind() == reflect.Array { + return value.Len() == 0 + } + return value.IsNil() || value.Len() == 0 + case TypeString: + return elem.(string) == "" + case TypeInteger: + return elem.(int) == 0 + case TypeReal: + return elem.(float64) == 0 + default: + return false + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go new file mode 100644 index 000000000..783ac0f55 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go @@ -0,0 +1,223 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +type ConditionFunction string +type WaitCondition string + +const ( + // ConditionLessThan is the less than condition + ConditionLessThan ConditionFunction = "<" + // ConditionLessThanOrEqual is the less than or equal condition + ConditionLessThanOrEqual ConditionFunction = "<=" + // ConditionEqual is the equal condition + ConditionEqual ConditionFunction = "==" + // ConditionNotEqual is the not equal condition + ConditionNotEqual ConditionFunction = "!=" + // ConditionGreaterThan is the greater than condition + ConditionGreaterThan ConditionFunction = ">" + // ConditionGreaterThanOrEqual is the greater than or equal condition + ConditionGreaterThanOrEqual ConditionFunction = ">=" + // ConditionIncludes is the includes condition + ConditionIncludes ConditionFunction = "includes" + // ConditionExcludes is the excludes condition + ConditionExcludes ConditionFunction = "excludes" + + // WaitConditionEqual is the equal condition + WaitConditionEqual WaitCondition = "==" + // WaitConditionNotEqual is the not equal condition + WaitConditionNotEqual WaitCondition = "!=" +) + +// Condition is described in RFC 7047: 5.1 +type Condition struct { + Column string + Function ConditionFunction + Value interface{} +} + +func (c Condition) String() string { + return fmt.Sprintf("where column %s %s %v", c.Column, c.Function, c.Value) +} + +// NewCondition returns a new condition +func NewCondition(column string, function ConditionFunction, value interface{}) Condition { + return Condition{ + Column: column, + Function: function, + Value: value, + } +} + +// MarshalJSON marshals a condition to a 3 element JSON array +func (c Condition) MarshalJSON() ([]byte, error) { + v := []interface{}{c.Column, c.Function, c.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Condition +func (c *Condition) UnmarshalJSON(b []byte) error { + var v []interface{} + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + c.Column = v[0].(string) + function := ConditionFunction(v[1].(string)) + switch function { + case ConditionEqual, + ConditionNotEqual, + ConditionIncludes, + ConditionExcludes, + ConditionGreaterThan, + ConditionGreaterThanOrEqual, + ConditionLessThan, + ConditionLessThanOrEqual: + c.Function = function + default: + return fmt.Errorf("%s is not a valid function", function) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + c.Value = vv + return nil +} + +// Evaluate will evaluate the condition on the two provided values +// The conditions operately differently depending on the type of +// the provided values. The behavior is as described in RFC7047 +func (c ConditionFunction) Evaluate(a interface{}, b interface{}) (bool, error) { + x := reflect.ValueOf(a) + y := reflect.ValueOf(b) + if x.Kind() != y.Kind() { + return false, fmt.Errorf("comparison between %s and %s not supported", x.Kind(), y.Kind()) + } + switch c { + case ConditionEqual: + return reflect.DeepEqual(a, b), nil + case ConditionNotEqual: + return !reflect.DeepEqual(a, b), nil + case ConditionIncludes: + switch x.Kind() { + case reflect.Slice: + return sliceContains(x, y), nil + case reflect.Map: + return mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionExcludes: + switch x.Kind() { + case reflect.Slice: + return !sliceContains(x, y), nil + case reflect.Map: + return !mapContains(x, y), nil + case reflect.Int, reflect.Float64, reflect.Bool, reflect.String: + return !reflect.DeepEqual(a, b), nil + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThan: + switch x.Kind() { + case reflect.Int: + return x.Int() > y.Int(), nil + case reflect.Float64: + return x.Float() > y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionGreaterThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() >= y.Int(), nil + case reflect.Float64: + return x.Float() >= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThan: + switch x.Kind() { + case reflect.Int: + return x.Int() < y.Int(), nil + case reflect.Float64: + return x.Float() < y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + case ConditionLessThanOrEqual: + switch x.Kind() { + case reflect.Int: + return x.Int() <= y.Int(), nil + case reflect.Float64: + return x.Float() <= y.Float(), nil + case reflect.Bool, reflect.String, reflect.Slice, reflect.Map: + default: + return false, fmt.Errorf("condition not supported on %s", x.Kind()) + } + default: + return false, fmt.Errorf("unsupported condition function %s", c) + } + // we should never get here + return false, fmt.Errorf("unreachable condition") +} + +func sliceContains(x, y reflect.Value) bool { + for i := 0; i < y.Len(); i++ { + found := false + vy := y.Index(i) + for j := 0; j < x.Len(); j++ { + vx := x.Index(j) + if vy.Kind() == reflect.Interface { + if vy.Elem() == vx.Elem() { + found = true + break + } + } else { + if vy.Interface() == vx.Interface() { + found = true + break + } + } + } + if !found { + return false + } + } + return true +} + +func mapContains(x, y reflect.Value) bool { + iter := y.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + vx := x.MapIndex(k) + if !vx.IsValid() { + return false + } + if v.Kind() != reflect.Interface { + if v.Interface() != vx.Interface() { + return false + } + } else { + if v.Elem() != vx.Elem() { + return false + } + } + } + return true +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go new file mode 100644 index 000000000..4a85c541c --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go @@ -0,0 +1,373 @@ +package ovsdb + +import "fmt" + +const ( + referentialIntegrityViolation = "referential integrity violation" + constraintViolation = "constraint violation" + resourcesExhausted = "resources exhausted" + ioError = "I/O error" + duplicateUUIDName = "duplicate uuid name" + domainError = "domain error" + rangeError = "range error" + timedOut = "timed out" + notSupported = "not supported" + aborted = "aborted" + notOwner = "not owner" +) + +// errorFromResult returns an specific OVSDB error type from +// an OperationResult +func errorFromResult(op *Operation, r OperationResult) OperationError { + if r.Error == "" { + return nil + } + switch r.Error { + case referentialIntegrityViolation: + return &ReferentialIntegrityViolation{r.Details, op} + case constraintViolation: + return &ConstraintViolation{r.Details, op} + case resourcesExhausted: + return &ResourcesExhausted{r.Details, op} + case ioError: + return &IOError{r.Details, op} + case duplicateUUIDName: + return &DuplicateUUIDName{r.Details, op} + case domainError: + return &DomainError{r.Details, op} + case rangeError: + return &RangeError{r.Details, op} + case timedOut: + return &TimedOut{r.Details, op} + case notSupported: + return &NotSupported{r.Details, op} + case aborted: + return &Aborted{r.Details, op} + case notOwner: + return &NotOwner{r.Details, op} + default: + return &Error{r.Error, r.Details, op} + } +} + +func ResultFromError(err error) OperationResult { + if err == nil { + panic("Program error: passed nil error to resultFromError") + } + switch e := err.(type) { + case *ReferentialIntegrityViolation: + return OperationResult{Error: referentialIntegrityViolation, Details: e.details} + case *ConstraintViolation: + return OperationResult{Error: constraintViolation, Details: e.details} + case *ResourcesExhausted: + return OperationResult{Error: resourcesExhausted, Details: e.details} + case *IOError: + return OperationResult{Error: ioError, Details: e.details} + case *DuplicateUUIDName: + return OperationResult{Error: duplicateUUIDName, Details: e.details} + case *DomainError: + return OperationResult{Error: domainError, Details: e.details} + case *RangeError: + return OperationResult{Error: rangeError, Details: e.details} + case *TimedOut: + return OperationResult{Error: timedOut, Details: e.details} + case *NotSupported: + return OperationResult{Error: notSupported, Details: e.details} + case *Aborted: + return OperationResult{Error: aborted, Details: e.details} + case *NotOwner: + return OperationResult{Error: notOwner, Details: e.details} + default: + return OperationResult{Error: e.Error()} + } +} + +// CheckOperationResults checks whether the provided operation was a success +// If the operation was a success, it will return nil, nil +// If the operation failed, due to a error committing the transaction it will +// return nil, error. +// Finally, in the case where one or more of the operations in the transaction +// failed, we return []OperationErrors, error +// Within []OperationErrors, the OperationErrors.Index() corresponds to the same index in +// the original Operations struct. You may also perform type assertions against +// the error so the caller can decide how best to handle it +func CheckOperationResults(result []OperationResult, ops []Operation) ([]OperationError, error) { + // this shouldn't happen, but we'll cover the case to be certain + if len(result) < len(ops) { + return nil, fmt.Errorf("ovsdb transaction error. %d operations submitted but only %d results received", len(ops), len(result)) + } + var errs []OperationError + for i, op := range result { + // RFC 7047: if all of the operations succeed, but the results cannot + // be committed, then "result" will have one more element than "params", + // with the additional element being an . + if i >= len(ops) { + return errs, errorFromResult(nil, op) + } + if err := errorFromResult(&ops[i], op); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return errs, fmt.Errorf("%d ovsdb operations failed", len(errs)) + } + return nil, nil +} + +// OperationError represents an error that occurred as part of an +// OVSDB Operation +type OperationError interface { + error + // Operation is a pointer to the operation which caused the error + Operation() *Operation +} + +// ReferentialIntegrityViolation is explained in RFC 7047 4.1.3 +type ReferentialIntegrityViolation struct { + details string + operation *Operation +} + +func NewReferentialIntegrityViolation(details string) *ReferentialIntegrityViolation { + return &ReferentialIntegrityViolation{details: details} +} + +// Error implements the error interface +func (e *ReferentialIntegrityViolation) Error() string { + msg := referentialIntegrityViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ReferentialIntegrityViolation) Operation() *Operation { + return e.operation +} + +// ConstraintViolation is described in RFC 7047: 4.1.3 +type ConstraintViolation struct { + details string + operation *Operation +} + +func NewConstraintViolation(details string) *ConstraintViolation { + return &ConstraintViolation{details: details} +} + +// Error implements the error interface +func (e *ConstraintViolation) Error() string { + msg := constraintViolation + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ConstraintViolation) Operation() *Operation { + return e.operation +} + +// ResourcesExhausted is described in RFC 7047: 4.1.3 +type ResourcesExhausted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *ResourcesExhausted) Error() string { + msg := resourcesExhausted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *ResourcesExhausted) Operation() *Operation { + return e.operation +} + +// IOError is described in RFC7047: 4.1.3 +type IOError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *IOError) Error() string { + msg := ioError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *IOError) Operation() *Operation { + return e.operation +} + +// DuplicateUUIDName is described in RFC7047 5.2.1 +type DuplicateUUIDName struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DuplicateUUIDName) Error() string { + msg := duplicateUUIDName + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DuplicateUUIDName) Operation() *Operation { + return e.operation +} + +// DomainError is described in RFC 7047: 5.2.4 +type DomainError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *DomainError) Error() string { + msg := domainError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *DomainError) Operation() *Operation { + return e.operation +} + +// RangeError is described in RFC 7047: 5.2.4 +type RangeError struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *RangeError) Error() string { + msg := rangeError + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *RangeError) Operation() *Operation { + return e.operation +} + +// TimedOut is described in RFC 7047: 5.2.6 +type TimedOut struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *TimedOut) Error() string { + msg := timedOut + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *TimedOut) Operation() *Operation { + return e.operation +} + +// NotSupported is described in RFC 7047: 5.2.7 +type NotSupported struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotSupported) Error() string { + msg := notSupported + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotSupported) Operation() *Operation { + return e.operation +} + +// Aborted is described in RFC 7047: 5.2.8 +type Aborted struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *Aborted) Error() string { + msg := aborted + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Aborted) Operation() *Operation { + return e.operation +} + +// NotOwner is described in RFC 7047: 5.2.9 +type NotOwner struct { + details string + operation *Operation +} + +// Error implements the error interface +func (e *NotOwner) Error() string { + msg := notOwner + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *NotOwner) Operation() *Operation { + return e.operation +} + +// Error is a generic OVSDB Error type that implements the +// OperationError and error interfaces +type Error struct { + name string + details string + operation *Operation +} + +// Error implements the error interface +func (e *Error) Error() string { + msg := e.name + if e.details != "" { + msg += ": " + e.details + } + return msg +} + +// Operation implements the OperationError interface +func (e *Error) Operation() *Operation { + return e.operation +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go new file mode 100644 index 000000000..893a9774f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go @@ -0,0 +1,92 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsMap is the JSON map structure used for OVSDB +// RFC 7047 uses the following notation for map as JSON doesn't support non-string keys for maps. +// A 2-element JSON array that represents a database map value. The +// first element of the array must be the string "map", and the +// second element must be an array of zero or more s giving the +// values in the map. All of the s must have the same key and +// value types. +type OvsMap struct { + GoMap map[interface{}]interface{} +} + +// MarshalJSON marshalls an OVSDB style Map to a byte array +func (o OvsMap) MarshalJSON() ([]byte, error) { + if len(o.GoMap) > 0 { + var ovsMap, innerMap []interface{} + ovsMap = append(ovsMap, "map") + for key, val := range o.GoMap { + var mapSeg []interface{} + mapSeg = append(mapSeg, key) + mapSeg = append(mapSeg, val) + innerMap = append(innerMap, mapSeg) + } + ovsMap = append(ovsMap, innerMap) + return json.Marshal(ovsMap) + } + return []byte("[\"map\",[]]"), nil +} + +// UnmarshalJSON unmarshals an OVSDB style Map from a byte array +func (o *OvsMap) UnmarshalJSON(b []byte) (err error) { + var oMap []interface{} + o.GoMap = make(map[interface{}]interface{}) + if err := json.Unmarshal(b, &oMap); err == nil && len(oMap) > 1 { + innerSlice := oMap[1].([]interface{}) + for _, val := range innerSlice { + f := val.([]interface{}) + var k interface{} + switch f[0].(type) { + case []interface{}: + vSet := f[0].([]interface{}) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + k = goSlice + default: + k = f[0] + } + switch f[1].(type) { + case []interface{}: + vSet := f[1].([]interface{}) + if len(vSet) != 2 || vSet[0] == "map" { + return &json.UnmarshalTypeError{Value: reflect.ValueOf(oMap).String(), Type: reflect.TypeOf(*o)} + } + goSlice, err := ovsSliceToGoNotation(vSet) + if err != nil { + return err + } + o.GoMap[k] = goSlice + default: + o.GoMap[k] = f[1] + } + } + } + return err +} + +// NewOvsMap will return an OVSDB style map from a provided Golang Map +func NewOvsMap(goMap interface{}) (OvsMap, error) { + v := reflect.ValueOf(goMap) + if v.Kind() != reflect.Map { + return OvsMap{}, fmt.Errorf("ovsmap supports only go map types") + } + + genMap := make(map[interface{}]interface{}) + keys := v.MapKeys() + for _, key := range keys { + genMap[key.Interface()] = v.MapIndex(key).Interface() + } + return OvsMap{genMap}, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go new file mode 100644 index 000000000..b97e06285 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go @@ -0,0 +1,88 @@ +package ovsdb + +import "encoding/json" + +// MonitorSelect represents a monitor select according to RFC7047 +type MonitorSelect struct { + initial *bool + insert *bool + delete *bool + modify *bool +} + +// NewMonitorSelect returns a new MonitorSelect with the provided values +func NewMonitorSelect(initial, insert, delete, modify bool) *MonitorSelect { + return &MonitorSelect{ + initial: &initial, + insert: &insert, + delete: &delete, + modify: &modify, + } +} + +// NewDefaultMonitorSelect returns a new MonitorSelect with default values +func NewDefaultMonitorSelect() *MonitorSelect { + return NewMonitorSelect(true, true, true, true) +} + +// Initial returns whether or not an initial response will be sent +func (m MonitorSelect) Initial() bool { + if m.initial == nil { + return true + } + return *m.initial +} + +// Insert returns whether we will receive updates for inserts +func (m MonitorSelect) Insert() bool { + if m.insert == nil { + return true + } + return *m.insert +} + +// Delete returns whether we will receive updates for deletions +func (m MonitorSelect) Delete() bool { + if m.delete == nil { + return true + } + return *m.delete +} + +// Modify returns whether we will receive updates for modifications +func (m MonitorSelect) Modify() bool { + if m.modify == nil { + return true + } + return *m.modify +} + +type monitorSelect struct { + Initial *bool `json:"initial,omitempty"` + Insert *bool `json:"insert,omitempty"` + Delete *bool `json:"delete,omitempty"` + Modify *bool `json:"modify,omitempty"` +} + +func (m MonitorSelect) MarshalJSON() ([]byte, error) { + ms := monitorSelect{ + Initial: m.initial, + Insert: m.insert, + Delete: m.delete, + Modify: m.modify, + } + return json.Marshal(ms) +} + +func (m *MonitorSelect) UnmarshalJSON(data []byte) error { + var ms monitorSelect + err := json.Unmarshal(data, &ms) + if err != nil { + return err + } + m.initial = ms.Initial + m.insert = ms.Insert + m.delete = ms.Delete + m.modify = ms.Modify + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go new file mode 100644 index 000000000..dc8b0f6d4 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go @@ -0,0 +1,87 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type Mutator string + +const ( + // MutateOperationDelete is the delete mutator + MutateOperationDelete Mutator = "delete" + // MutateOperationInsert is the insert mutator + MutateOperationInsert Mutator = "insert" + // MutateOperationAdd is the add mutator + MutateOperationAdd Mutator = "+=" + // MutateOperationSubtract is the subtract mutator + MutateOperationSubtract Mutator = "-=" + // MutateOperationMultiply is the multiply mutator + MutateOperationMultiply Mutator = "*=" + // MutateOperationDivide is the divide mutator + MutateOperationDivide Mutator = "/=" + // MutateOperationModulo is the modulo mutator + MutateOperationModulo Mutator = "%=" +) + +// Mutation is described in RFC 7047: 5.1 +type Mutation struct { + Column string + Mutator Mutator + Value interface{} +} + +// NewMutation returns a new mutation +func NewMutation(column string, mutator Mutator, value interface{}) *Mutation { + return &Mutation{ + Column: column, + Mutator: mutator, + Value: value, + } +} + +// MarshalJSON marshals a mutation to a 3 element JSON array +func (m Mutation) MarshalJSON() ([]byte, error) { + v := []interface{}{m.Column, m.Mutator, m.Value} + return json.Marshal(v) +} + +// UnmarshalJSON converts a 3 element JSON array to a Mutation +func (m *Mutation) UnmarshalJSON(b []byte) error { + var v []interface{} + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + ok := false + m.Column, ok = v[0].(string) + if !ok { + return fmt.Errorf("expected column name %v to be a valid string", v[0]) + } + mutatorString, ok := v[1].(string) + if !ok { + return fmt.Errorf("expected mutator %v to be a valid string", v[1]) + } + mutator := Mutator(mutatorString) + switch mutator { + case MutateOperationDelete, + MutateOperationInsert, + MutateOperationAdd, + MutateOperationSubtract, + MutateOperationMultiply, + MutateOperationDivide, + MutateOperationModulo: + m.Mutator = mutator + default: + return fmt.Errorf("%s is not a valid mutator", mutator) + } + vv, err := ovsSliceToGoNotation(v[2]) + if err != nil { + return err + } + m.Value = vv + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go new file mode 100644 index 000000000..29034ee9d --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go @@ -0,0 +1,165 @@ +package ovsdb + +import ( + "fmt" +) + +// ExpandNamedUUIDs replaces named UUIDs in columns that contain UUID types +// throughout the operation. The caller must ensure each input operation has +// a valid UUID, which may be replaced if a previous operation created a +// matching named UUID mapping. Returns the updated operations or an error. +func ExpandNamedUUIDs(ops []Operation, schema *DatabaseSchema) ([]Operation, error) { + uuidMap := make(map[string]string) + + // Pass 1: replace the named UUID with a real UUID for each operation and + // build the substitution map + for i := range ops { + op := &ops[i] + if op.Op != OperationInsert { + // Only Insert operations can specify a Named UUID + continue + } + + if err := ValidateUUID(op.UUID); err != nil { + return nil, fmt.Errorf("operation UUID %q invalid: %v", op.UUID, err) + } + + if op.UUIDName != "" { + if uuid, ok := uuidMap[op.UUIDName]; ok { + if op.UUID != "" && op.UUID != uuid { + return nil, fmt.Errorf("named UUID %q maps to UUID %q but found existing UUID %q", + op.UUIDName, uuid, op.UUID) + } + // If there's already a mapping for this named UUID use it + op.UUID = uuid + } else { + uuidMap[op.UUIDName] = op.UUID + } + op.UUIDName = "" + } + } + + // Pass 2: replace named UUIDs in operation fields with the real UUID + for i := range ops { + op := &ops[i] + tableSchema := schema.Table(op.Table) + if tableSchema == nil { + return nil, fmt.Errorf("table %q not found in schema %q", op.Table, schema.Name) + } + + for i, condition := range op.Where { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, condition.Column, condition.Value, uuidMap) + if err != nil { + return nil, err + } + op.Where[i].Value = newVal + } + for i, mutation := range op.Mutations { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, mutation.Column, mutation.Value, uuidMap) + if err != nil { + return nil, err + } + op.Mutations[i].Value = newVal + } + for _, row := range op.Rows { + for k, v := range row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + row[k] = newVal + } + } + for k, v := range op.Row { + newVal, err := expandColumnNamedUUIDs(tableSchema, op.Table, k, v, uuidMap) + if err != nil { + return nil, err + } + op.Row[k] = newVal + } + } + + return ops, nil +} + +func expandColumnNamedUUIDs(tableSchema *TableSchema, tableName, columnName string, value interface{}, uuidMap map[string]string) (interface{}, error) { + column := tableSchema.Column(columnName) + if column == nil { + return nil, fmt.Errorf("column %q not found in table %q", columnName, tableName) + } + return expandNamedUUID(column, value, uuidMap), nil +} + +func expandNamedUUID(column *ColumnSchema, value interface{}, namedUUIDs map[string]string) interface{} { + var keyType, valType ExtendedType + + switch column.Type { + case TypeUUID: + keyType = column.Type + case TypeSet: + keyType = column.TypeObj.Key.Type + case TypeMap: + keyType = column.TypeObj.Key.Type + valType = column.TypeObj.Value.Type + } + + if valType == TypeUUID { + if m, ok := value.(OvsMap); ok { + for k, v := range m.GoMap { + if newUUID, ok := expandNamedUUIDAtomic(keyType, k, namedUUIDs); ok { + m.GoMap[newUUID] = m.GoMap[k] + delete(m.GoMap, k) + k = newUUID + } + if newUUID, ok := expandNamedUUIDAtomic(valType, v, namedUUIDs); ok { + m.GoMap[k] = newUUID + } + } + } + } else if keyType == TypeUUID { + if ovsSet, ok := value.(OvsSet); ok { + for i, s := range ovsSet.GoSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + ovsSet.GoSet[i] = newUUID + } + } + return value + } else if strSet, ok := value.([]string); ok { + for i, s := range strSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + strSet[i] = newUUID.(string) + } + } + return value + } else if uuidSet, ok := value.([]UUID); ok { + for i, s := range uuidSet { + if newUUID, ok := expandNamedUUIDAtomic(keyType, s, namedUUIDs); ok { + uuidSet[i] = newUUID.(UUID) + } + } + return value + } + + if newUUID, ok := expandNamedUUIDAtomic(keyType, value, namedUUIDs); ok { + return newUUID + } + } + + // No expansion required; return original value + return value +} + +func expandNamedUUIDAtomic(valueType ExtendedType, value interface{}, namedUUIDs map[string]string) (interface{}, bool) { + if valueType == TypeUUID { + if uuid, ok := value.(UUID); ok { + if newUUID, ok := namedUUIDs[uuid.GoUUID]; ok { + return UUID{GoUUID: newUUID}, true + } + } else if uuid, ok := value.(string); ok { + if newUUID, ok := namedUUIDs[uuid]; ok { + return newUUID, true + } + } + } + return value, false +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go new file mode 100644 index 000000000..afad87cdc --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go @@ -0,0 +1,129 @@ +package ovsdb + +import ( + "encoding/json" +) + +const ( + // OperationInsert is an insert operation + OperationInsert = "insert" + // OperationSelect is a select operation + OperationSelect = "select" + // OperationUpdate is an update operation + OperationUpdate = "update" + // OperationMutate is a mutate operation + OperationMutate = "mutate" + // OperationDelete is a delete operation + OperationDelete = "delete" + // OperationWait is a wait operation + OperationWait = "wait" + // OperationCommit is a commit operation + OperationCommit = "commit" + // OperationAbort is an abort operation + OperationAbort = "abort" + // OperationComment is a comment operation + OperationComment = "comment" + // OperationAssert is an assert operation + OperationAssert = "assert" +) + +// Operation represents an operation according to RFC7047 section 5.2 +type Operation struct { + Op string `json:"op"` + Table string `json:"table,omitempty"` + Row Row `json:"row,omitempty"` + Rows []Row `json:"rows,omitempty"` + Columns []string `json:"columns,omitempty"` + Mutations []Mutation `json:"mutations,omitempty"` + Timeout *int `json:"timeout,omitempty"` + Where []Condition `json:"where,omitempty"` + Until string `json:"until,omitempty"` + Durable *bool `json:"durable,omitempty"` + Comment *string `json:"comment,omitempty"` + Lock *string `json:"lock,omitempty"` + UUID string `json:"uuid,omitempty"` + UUIDName string `json:"uuid-name,omitempty"` +} + +// MarshalJSON marshalls 'Operation' to a byte array +// For 'select' operations, we don't omit the 'Where' field +// to allow selecting all rows of a table +func (o Operation) MarshalJSON() ([]byte, error) { + type OpAlias Operation + switch o.Op { + case "select": + where := o.Where + if where == nil { + where = make([]Condition, 0) + } + return json.Marshal(&struct { + Where []Condition `json:"where"` + OpAlias + }{ + Where: where, + OpAlias: (OpAlias)(o), + }) + default: + return json.Marshal(&struct { + OpAlias + }{ + OpAlias: (OpAlias)(o), + }) + } +} + +// MonitorRequests represents a group of monitor requests according to RFC7047 +// We cannot use MonitorRequests by inlining the MonitorRequest Map structure till GoLang issue #6213 makes it. +// The only option is to go with raw map[string]interface{} option :-( that sucks ! +// Refer to client.go : MonitorAll() function for more details +type MonitorRequests struct { + Requests map[string]MonitorRequest `json:"requests"` +} + +// MonitorRequest represents a monitor request according to RFC7047 +type MonitorRequest struct { + Columns []string `json:"columns,omitempty"` + Where []Condition `json:"where,omitempty"` + Select *MonitorSelect `json:"select,omitempty"` +} + +// TransactResponse represents the response to a Transact Operation +type TransactResponse struct { + Result []OperationResult `json:"result"` + Error string `json:"error"` +} + +// OperationResult is the result of an Operation +type OperationResult struct { + Count int `json:"count,omitempty"` + Error string `json:"error,omitempty"` + Details string `json:"details,omitempty"` + UUID UUID `json:"uuid,omitempty"` + Rows []Row `json:"rows,omitempty"` +} + +func ovsSliceToGoNotation(val interface{}) (interface{}, error) { + switch sl := val.(type) { + case []interface{}: + bsliced, err := json.Marshal(sl) + if err != nil { + return nil, err + } + switch sl[0] { + case "uuid", "named-uuid": + var uuid UUID + err = json.Unmarshal(bsliced, &uuid) + return uuid, err + case "set": + var oSet OvsSet + err = json.Unmarshal(bsliced, &oSet) + return oSet, err + case "map": + var oMap OvsMap + err = json.Unmarshal(bsliced, &oMap) + return oMap, err + } + return val, nil + } + return val, nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go new file mode 100644 index 000000000..9a253f74f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go @@ -0,0 +1,26 @@ +package ovsdb + +import "encoding/json" + +// Row is a table Row according to RFC7047 +type Row map[string]interface{} + +// UnmarshalJSON unmarshalls a byte array to an OVSDB Row +func (r *Row) UnmarshalJSON(b []byte) (err error) { + *r = make(map[string]interface{}) + var raw map[string]interface{} + err = json.Unmarshal(b, &raw) + for key, val := range raw { + val, err = ovsSliceToGoNotation(val) + if err != nil { + return err + } + (*r)[key] = val + } + return err +} + +// NewRow returns a new empty row +func NewRow() Row { + return Row(make(map[string]interface{})) +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go new file mode 100644 index 000000000..f1e598005 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go @@ -0,0 +1,79 @@ +package ovsdb + +const ( + // MonitorRPC is the monitor RPC method + MonitorRPC = "monitor" + // ConditionalMonitorRPC is the monitor_cond + ConditionalMonitorRPC = "monitor_cond" + // ConditionalMonitorSinceRPC is the monitor_cond_since RPC method + ConditionalMonitorSinceRPC = "monitor_cond_since" +) + +// NewEchoArgs creates a new set of arguments for an echo RPC +func NewEchoArgs() []interface{} { + return []interface{}{"libovsdb echo"} +} + +// NewGetSchemaArgs creates a new set of arguments for a get_schemas RPC +func NewGetSchemaArgs(schema string) []interface{} { + return []interface{}{schema} +} + +// NewTransactArgs creates a new set of arguments for a transact RPC +func NewTransactArgs(database string, operations ...Operation) []interface{} { + dbSlice := make([]interface{}, 1) + dbSlice[0] = database + + opsSlice := make([]interface{}, len(operations)) + for i, d := range operations { + opsSlice[i] = d + } + + ops := append(dbSlice, opsSlice...) + return ops +} + +// NewCancelArgs creates a new set of arguments for a cancel RPC +func NewCancelArgs(id interface{}) []interface{} { + return []interface{}{id} +} + +// NewMonitorArgs creates a new set of arguments for a monitor RPC +func NewMonitorArgs(database string, value interface{}, requests map[string]MonitorRequest) []interface{} { + return []interface{}{database, value, requests} +} + +// NewMonitorCondSinceArgs creates a new set of arguments for a monitor_cond_since RPC +func NewMonitorCondSinceArgs(database string, value interface{}, requests map[string]MonitorRequest, lastTransactionID string) []interface{} { + return []interface{}{database, value, requests, lastTransactionID} +} + +// NewMonitorCancelArgs creates a new set of arguments for a monitor_cancel RPC +func NewMonitorCancelArgs(value interface{}) []interface{} { + return []interface{}{value} +} + +// NewLockArgs creates a new set of arguments for a lock, steal or unlock RPC +func NewLockArgs(id interface{}) []interface{} { + return []interface{}{id} +} + +// NotificationHandler is the interface that must be implemented to receive notifications +type NotificationHandler interface { + // RFC 7047 section 4.1.6 Update Notification + Update(context interface{}, tableUpdates TableUpdates) + + // ovsdb-server.7 update2 notifications + Update2(context interface{}, tableUpdates TableUpdates2) + + // RFC 7047 section 4.1.9 Locked Notification + Locked([]interface{}) + + // RFC 7047 section 4.1.10 Stolen Notification + Stolen([]interface{}) + + // RFC 7047 section 4.1.11 Echo Notification + Echo([]interface{}) + + Disconnected() +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go new file mode 100644 index 000000000..285d1e02a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go @@ -0,0 +1,641 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "strings" +) + +// DatabaseSchema is a database schema according to RFC7047 +type DatabaseSchema struct { + Name string `json:"name"` + Version string `json:"version"` + Tables map[string]TableSchema `json:"tables"` + allTablesRoot *bool +} + +// UUIDColumn is a static column that represents the _uuid column, common to all tables +var UUIDColumn = ColumnSchema{ + Type: TypeUUID, +} + +// Table returns a TableSchema Schema for a given table and column name +func (schema DatabaseSchema) Table(tableName string) *TableSchema { + if table, ok := schema.Tables[tableName]; ok { + return &table + } + return nil +} + +// IsRoot whether a table is root or not +func (schema DatabaseSchema) IsRoot(tableName string) (bool, error) { + t := schema.Table(tableName) + if t == nil { + return false, fmt.Errorf("Table %s not in schame", tableName) + } + if t.IsRoot { + return true, nil + } + // As per RFC7047, for compatibility with schemas created before + // "isRoot" was introduced, if "isRoot" is omitted or false in every + // in a given , then every table is part + // of the root set. + if schema.allTablesRoot == nil { + allTablesRoot := true + for _, tSchema := range schema.Tables { + if tSchema.IsRoot { + allTablesRoot = false + break + } + } + schema.allTablesRoot = &allTablesRoot + } + return *schema.allTablesRoot, nil +} + +// Print will print the contents of the DatabaseSchema +func (schema DatabaseSchema) Print(w io.Writer) { + fmt.Fprintf(w, "%s, (%s)\n", schema.Name, schema.Version) + for table, tableSchema := range schema.Tables { + fmt.Fprintf(w, "\t %s", table) + if len(tableSchema.Indexes) > 0 { + fmt.Fprintf(w, "(%v)\n", tableSchema.Indexes) + } else { + fmt.Fprintf(w, "\n") + } + for column, columnSchema := range tableSchema.Columns { + fmt.Fprintf(w, "\t\t %s => %s\n", column, columnSchema) + } + } +} + +// SchemaFromFile returns a DatabaseSchema from a file +func SchemaFromFile(f *os.File) (DatabaseSchema, error) { + data, err := ioutil.ReadAll(f) + if err != nil { + return DatabaseSchema{}, err + } + var schema DatabaseSchema + err = json.Unmarshal(data, &schema) + if err != nil { + return DatabaseSchema{}, err + } + return schema, nil +} + +// ValidateOperations performs basic validation for operations against a DatabaseSchema +func (schema DatabaseSchema) ValidateOperations(operations ...Operation) bool { + for _, op := range operations { + switch op.Op { + case OperationAbort, OperationAssert, OperationComment, OperationCommit, OperationWait: + continue + case OperationInsert, OperationSelect, OperationUpdate, OperationMutate, OperationDelete: + table, ok := schema.Tables[op.Table] + if ok { + for column := range op.Row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + for _, row := range op.Rows { + for column := range row { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } + for _, column := range op.Columns { + if _, ok := table.Columns[column]; !ok { + if column != "_uuid" && column != "_version" { + return false + } + } + } + } else { + return false + } + } + } + return true +} + +// TableSchema is a table schema according to RFC7047 +type TableSchema struct { + Columns map[string]*ColumnSchema `json:"columns"` + Indexes [][]string `json:"indexes,omitempty"` + IsRoot bool `json:"isRoot,omitempty"` +} + +// Column returns the Column object for a specific column name +func (t TableSchema) Column(columnName string) *ColumnSchema { + if columnName == "_uuid" { + return &UUIDColumn + } + if column, ok := t.Columns[columnName]; ok { + return column + } + return nil +} + +/*RFC7047 defines some atomic-types (e.g: integer, string, etc). However, the Column's type +can also hold other more complex types such as set, enum and map. The way to determine the type +depends on internal, not directly marshallable fields. Therefore, in order to simplify the usage +of this library, we define an ExtendedType that includes all possible column types (including +atomic fields). +*/ + +// ExtendedType includes atomic types as defined in the RFC plus Enum, Map and Set +type ExtendedType = string + +// RefType is used to define the possible RefTypes +type RefType = string + +// unlimited is not constant as we can't take the address of int constants +var ( + // Unlimited is used to express unlimited "Max" + Unlimited = -1 +) + +const ( + unlimitedString = "unlimited" + //Strong RefType + Strong RefType = "strong" + //Weak RefType + Weak RefType = "weak" + + //ExtendedType associated with Atomic Types + + //TypeInteger is equivalent to 'int' + TypeInteger ExtendedType = "integer" + //TypeReal is equivalent to 'float64' + TypeReal ExtendedType = "real" + //TypeBoolean is equivalent to 'bool' + TypeBoolean ExtendedType = "boolean" + //TypeString is equivalent to 'string' + TypeString ExtendedType = "string" + //TypeUUID is equivalent to 'libovsdb.UUID' + TypeUUID ExtendedType = "uuid" + + //Extended Types used to summarize the internal type of the field. + + //TypeEnum is an enumerator of type defined by Key.Type + TypeEnum ExtendedType = "enum" + //TypeMap is a map whose type depend on Key.Type and Value.Type + TypeMap ExtendedType = "map" + //TypeSet is a set whose type depend on Key.Type + TypeSet ExtendedType = "set" +) + +// BaseType is a base-type structure as per RFC7047 +type BaseType struct { + Type string + Enum []interface{} + minReal *float64 + maxReal *float64 + minInteger *int + maxInteger *int + minLength *int + maxLength *int + refTable *string + refType *RefType +} + +func (b *BaseType) simpleAtomic() bool { + return isAtomicType(b.Type) && b.Enum == nil && b.minReal == nil && b.maxReal == nil && b.minInteger == nil && b.maxInteger == nil && b.minLength == nil && b.maxLength == nil && b.refTable == nil && b.refType == nil +} + +// MinReal returns the minimum real value +// RFC7047 does not define a default, but we assume this to be +// the smallest non zero value a float64 could hold +func (b *BaseType) MinReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.minReal != nil { + return *b.minReal, nil + } + return math.SmallestNonzeroFloat64, nil +} + +// MaxReal returns the maximum real value +// RFC7047 does not define a default, but this would be the maximum +// value held by a float64 +func (b *BaseType) MaxReal() (float64, error) { + if b.Type != TypeReal { + return 0, fmt.Errorf("%s is not a real", b.Type) + } + if b.maxReal != nil { + return *b.maxReal, nil + } + return math.MaxFloat64, nil +} + +// MinInteger returns the minimum integer value +// RFC7047 specifies the minimum to be -2^63 +func (b *BaseType) MinInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.minInteger != nil { + return *b.minInteger, nil + } + return int(math.Pow(-2, 63)), nil +} + +// MaxInteger returns the minimum integer value +// RFC7047 specifies the minimum to be 2^63-1 +func (b *BaseType) MaxInteger() (int, error) { + if b.Type != TypeInteger { + return 0, fmt.Errorf("%s is not an integer", b.Type) + } + if b.maxInteger != nil { + return *b.maxInteger, nil + } + return int(math.Pow(2, 63)) - 1, nil +} + +// MinLength returns the minimum string length +// RFC7047 doesn't specify a default, but we assume +// that it must be >= 0 +func (b *BaseType) MinLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not an string", b.Type) + } + if b.minLength != nil { + return *b.minLength, nil + } + return 0, nil +} + +// MaxLength returns the maximum string length +// RFC7047 doesn't specify a default, but we assume +// that it must 2^63-1 +func (b *BaseType) MaxLength() (int, error) { + if b.Type != TypeString { + return 0, fmt.Errorf("%s is not an string", b.Type) + } + if b.maxLength != nil { + return *b.maxLength, nil + } + return int(math.Pow(2, 63)) - 1, nil +} + +// RefTable returns the table to which a UUID type refers +// It will return an empty string if not set +func (b *BaseType) RefTable() (string, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refTable != nil { + return *b.refTable, nil + } + return "", nil +} + +// RefType returns the reference type for a UUID field +// RFC7047 infers the RefType is strong if omitted +func (b *BaseType) RefType() (RefType, error) { + if b.Type != TypeUUID { + return "", fmt.Errorf("%s is not a uuid", b.Type) + } + if b.refType != nil { + return *b.refType, nil + } + return Strong, nil +} + +// UnmarshalJSON unmarshals a json-formatted base type +func (b *BaseType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + b.Type = s + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + // temporary type to avoid recursive call to unmarshal + var bt struct { + Type string `json:"type"` + Enum interface{} `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + } + err := json.Unmarshal(data, &bt) + if err != nil { + return err + } + + if bt.Enum != nil { + // 'enum' is a list or a single element representing a list of exactly one element + switch bt.Enum.(type) { + case []interface{}: + // it's an OvsSet + oSet := bt.Enum.([]interface{}) + innerSet := oSet[1].([]interface{}) + b.Enum = make([]interface{}, len(innerSet)) + copy(b.Enum, innerSet) + default: + b.Enum = []interface{}{bt.Enum} + } + } + b.Type = bt.Type + b.minReal = bt.MinReal + b.maxReal = bt.MaxReal + b.minInteger = bt.MinInteger + b.maxInteger = bt.MaxInteger + b.minLength = bt.MaxLength + b.maxLength = bt.MaxLength + b.refTable = bt.RefTable + b.refType = bt.RefType + return nil +} + +// MarshalJSON marshals a base type to JSON +func (b BaseType) MarshalJSON() ([]byte, error) { + j := struct { + Type string `json:"type,omitempty"` + Enum *OvsSet `json:"enum,omitempty"` + MinReal *float64 `json:"minReal,omitempty"` + MaxReal *float64 `json:"maxReal,omitempty"` + MinInteger *int `json:"minInteger,omitempty"` + MaxInteger *int `json:"maxInteger,omitempty"` + MinLength *int `json:"minLength,omitempty"` + MaxLength *int `json:"maxLength,omitempty"` + RefTable *string `json:"refTable,omitempty"` + RefType *RefType `json:"refType,omitempty"` + }{ + Type: b.Type, + MinReal: b.minReal, + MaxReal: b.maxReal, + MinInteger: b.minInteger, + MaxInteger: b.maxInteger, + MinLength: b.maxLength, + MaxLength: b.maxLength, + RefTable: b.refTable, + RefType: b.refType, + } + if len(b.Enum) > 0 { + set, err := NewOvsSet(b.Enum) + if err != nil { + return nil, err + } + j.Enum = &set + } + return json.Marshal(j) +} + +// ColumnType is a type object as per RFC7047 +// "key": required +// "value": optional +// "min": optional (default: 1) +// "max": or "unlimited" optional (default: 1) +type ColumnType struct { + Key *BaseType + Value *BaseType + min *int + max *int +} + +// Max returns the maximum value of a ColumnType. -1 is Unlimited +func (c *ColumnType) Max() int { + if c.max == nil { + return 1 + } + return *c.max +} + +// Min returns the minimum value of a ColumnType +func (c *ColumnType) Min() int { + if c.min == nil { + return 1 + } + return *c.min +} + +// UnmarshalJSON unmarshals a json-formatted column type +func (c *ColumnType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err == nil { + if isAtomicType(s) { + c.Key = &BaseType{Type: s} + } else { + return fmt.Errorf("non atomic type %s in ", s) + } + return nil + } + var colType struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value"` + Min *int `json:"min"` + Max interface{} `json:"max"` + } + err := json.Unmarshal(data, &colType) + if err != nil { + return err + } + c.Key = colType.Key + c.Value = colType.Value + c.min = colType.Min + switch v := colType.Max.(type) { + case string: + if v == unlimitedString { + c.max = &Unlimited + } else { + return fmt.Errorf("unexpected string value in max field") + } + case float64: + i := int(v) + c.max = &i + default: + c.max = nil + } + return nil +} + +// MarshalJSON marshalls a column type to JSON +func (c ColumnType) MarshalJSON() ([]byte, error) { + if c.Value == nil && c.max == nil && c.min == nil && c.Key.simpleAtomic() { + return json.Marshal(c.Key.Type) + } + if c.Max() == Unlimited { + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max string `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: unlimitedString, + } + return json.Marshal(&colType) + } + colType := struct { + Key *BaseType `json:"key"` + Value *BaseType `json:"value,omitempty"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` + }{ + Key: c.Key, + Value: c.Value, + Min: c.min, + Max: c.max, + } + return json.Marshal(&colType) +} + +// ColumnSchema is a column schema according to RFC7047 +type ColumnSchema struct { + // According to RFC7047, "type" field can be, either an + // Or a ColumnType defined below. To try to simplify the usage, the + // json message will be parsed manually and Type will indicate the "extended" + // type. Depending on its value, more information may be available in TypeObj. + // E.g: If Type == TypeEnum, TypeObj.Key.Enum contains the possible values + Type ExtendedType + TypeObj *ColumnType + ephemeral *bool + mutable *bool +} + +// Mutable returns whether a column is mutable +func (c *ColumnSchema) Mutable() bool { + if c.mutable != nil { + return *c.mutable + } + // default true + return true +} + +// Ephemeral returns whether a column is ephemeral +func (c *ColumnSchema) Ephemeral() bool { + if c.ephemeral != nil { + return *c.ephemeral + } + // default false + return false +} + +// UnmarshalJSON unmarshals a json-formatted column +func (c *ColumnSchema) UnmarshalJSON(data []byte) error { + // ColumnJSON represents the known json values for a Column + var colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + + // Unmarshal known keys + if err := json.Unmarshal(data, &colJSON); err != nil { + return fmt.Errorf("cannot parse column object %s", err) + } + + c.ephemeral = colJSON.Ephemeral + c.mutable = colJSON.Mutable + c.TypeObj = colJSON.Type + + // Infer the ExtendedType from the TypeObj + if c.TypeObj.Value != nil { + c.Type = TypeMap + } else if c.TypeObj.Min() != 1 || c.TypeObj.Max() != 1 { + c.Type = TypeSet + } else if len(c.TypeObj.Key.Enum) > 0 { + c.Type = TypeEnum + } else { + c.Type = c.TypeObj.Key.Type + } + return nil +} + +// MarshalJSON marshalls a column schema to JSON +func (c ColumnSchema) MarshalJSON() ([]byte, error) { + type colJSON struct { + Type *ColumnType `json:"type"` + Ephemeral *bool `json:"ephemeral,omitempty"` + Mutable *bool `json:"mutable,omitempty"` + } + column := colJSON{ + Type: c.TypeObj, + Ephemeral: c.ephemeral, + Mutable: c.mutable, + } + return json.Marshal(column) +} + +// String returns a string representation of the (native) column type +func (c *ColumnSchema) String() string { + var flags []string + var flagStr string + var typeStr string + if c.Ephemeral() { + flags = append(flags, "E") + } + if c.Mutable() { + flags = append(flags, "M") + } + if len(flags) > 0 { + flagStr = fmt.Sprintf("[%s]", strings.Join(flags, ",")) + } + + switch c.Type { + case TypeInteger, TypeReal, TypeBoolean, TypeString: + typeStr = string(c.Type) + case TypeUUID: + if c.TypeObj != nil && c.TypeObj.Key != nil { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype := "" + if s, err := c.TypeObj.Key.RefType(); err != nil { + reftype = s + } + typeStr = fmt.Sprintf("uuid [%s (%s)]", reftable, reftype) + } else { + typeStr = "uuid" + } + + case TypeEnum: + typeStr = fmt.Sprintf("enum (type: %s): %v", c.TypeObj.Key.Type, c.TypeObj.Key.Enum) + case TypeMap: + typeStr = fmt.Sprintf("[%s]%s", c.TypeObj.Key.Type, c.TypeObj.Value.Type) + case TypeSet: + var keyStr string + if c.TypeObj.Key.Type == TypeUUID { + // ignore err as we've already asserted this is a uuid + reftable, _ := c.TypeObj.Key.RefTable() + reftype, _ := c.TypeObj.Key.RefType() + keyStr = fmt.Sprintf(" [%s (%s)]", reftable, reftype) + } else { + keyStr = string(c.TypeObj.Key.Type) + } + typeStr = fmt.Sprintf("[]%s (min: %d, max: %d)", keyStr, c.TypeObj.Min(), c.TypeObj.Max()) + default: + panic(fmt.Sprintf("Unsupported type %s", c.Type)) + } + + return strings.Join([]string{typeStr, flagStr}, " ") +} + +func isAtomicType(atype string) bool { + switch atype { + case TypeInteger, TypeReal, TypeBoolean, TypeString, TypeUUID: + return true + default: + return false + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore new file mode 100644 index 000000000..33f8bff56 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema \ No newline at end of file diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go new file mode 100644 index 000000000..274a7164f --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go @@ -0,0 +1,182 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import "github.com/ovn-org/libovsdb/model" + +const DatabaseTable = "Database" + +type ( + DatabaseModel = string +) + +var ( + DatabaseModelStandalone DatabaseModel = "standalone" + DatabaseModelClustered DatabaseModel = "clustered" + DatabaseModelRelay DatabaseModel = "relay" +) + +// Database defines an object in Database table +type Database struct { + UUID string `ovsdb:"_uuid"` + Cid *string `ovsdb:"cid"` + Connected bool `ovsdb:"connected"` + Index *int `ovsdb:"index"` + Leader bool `ovsdb:"leader"` + Model DatabaseModel `ovsdb:"model"` + Name string `ovsdb:"name"` + Schema *string `ovsdb:"schema"` + Sid *string `ovsdb:"sid"` +} + +func (a *Database) GetUUID() string { + return a.UUID +} + +func (a *Database) GetCid() *string { + return a.Cid +} + +func copyDatabaseCid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseCid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetConnected() bool { + return a.Connected +} + +func (a *Database) GetIndex() *int { + return a.Index +} + +func copyDatabaseIndex(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseIndex(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetLeader() bool { + return a.Leader +} + +func (a *Database) GetModel() DatabaseModel { + return a.Model +} + +func (a *Database) GetName() string { + return a.Name +} + +func (a *Database) GetSchema() *string { + return a.Schema +} + +func copyDatabaseSchema(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSchema(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) GetSid() *string { + return a.Sid +} + +func copyDatabaseSid(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDatabaseSid(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Database) DeepCopyInto(b *Database) { + *b = *a + b.Cid = copyDatabaseCid(a.Cid) + b.Index = copyDatabaseIndex(a.Index) + b.Schema = copyDatabaseSchema(a.Schema) + b.Sid = copyDatabaseSid(a.Sid) +} + +func (a *Database) DeepCopy() *Database { + b := new(Database) + a.DeepCopyInto(b) + return b +} + +func (a *Database) CloneModelInto(b model.Model) { + c := b.(*Database) + a.DeepCopyInto(c) +} + +func (a *Database) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Database) Equals(b *Database) bool { + return a.UUID == b.UUID && + equalDatabaseCid(a.Cid, b.Cid) && + a.Connected == b.Connected && + equalDatabaseIndex(a.Index, b.Index) && + a.Leader == b.Leader && + a.Model == b.Model && + a.Name == b.Name && + equalDatabaseSchema(a.Schema, b.Schema) && + equalDatabaseSid(a.Sid, b.Sid) +} + +func (a *Database) EqualsModel(b model.Model) bool { + c := b.(*Database) + return a.Equals(c) +} + +var _ model.CloneableModel = &Database{} +var _ model.ComparableModel = &Database{} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go new file mode 100644 index 000000000..5923af60a --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go @@ -0,0 +1,6 @@ +package serverdb + +// server_model is a database model for the special _Server database that all +// ovsdb instances export. It reports back status of the server process itself. + +//go:generate ../../bin/modelgen --extended -p serverdb -o . _server.ovsschema diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go new file mode 100644 index 000000000..3c117faa2 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go @@ -0,0 +1,99 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package serverdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("_Server", map[string]model.Model{ + "Database": &Database{}, + }) +} + +var schema = `{ + "name": "_Server", + "version": "1.2.0", + "tables": { + "Database": { + "columns": { + "cid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + }, + "connected": { + "type": "boolean" + }, + "index": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "leader": { + "type": "boolean" + }, + "model": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "standalone", + "clustered", + "relay" + ] + ] + } + } + }, + "name": { + "type": "string" + }, + "schema": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "sid": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go new file mode 100644 index 000000000..ae1ec59ae --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go @@ -0,0 +1,109 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "reflect" +) + +// OvsSet is an OVSDB style set +// RFC 7047 has a weird (but understandable) notation for set as described as : +// Either an , representing a set with exactly one element, or +// a 2-element JSON array that represents a database set value. The +// first element of the array must be the string "set", and the +// second element must be an array of zero or more s giving the +// values in the set. All of the s must have the same type. +type OvsSet struct { + GoSet []interface{} +} + +// NewOvsSet creates a new OVSDB style set from a Go interface (object) +func NewOvsSet(obj interface{}) (OvsSet, error) { + ovsSet := make([]interface{}, 0) + var v reflect.Value + if reflect.TypeOf(obj).Kind() == reflect.Ptr { + v = reflect.ValueOf(obj).Elem() + if v.Kind() == reflect.Invalid { + // must be a nil pointer, so just return an empty set + return OvsSet{ovsSet}, nil + } + } else { + v = reflect.ValueOf(obj) + } + + switch v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + ovsSet = append(ovsSet, v.Index(i).Interface()) + } + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Bool: + ovsSet = append(ovsSet, v.Interface()) + case reflect.Struct: + if v.Type() == reflect.TypeOf(UUID{}) { + ovsSet = append(ovsSet, v.Interface()) + } else { + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + default: + return OvsSet{}, fmt.Errorf("ovsset supports only go slice/string/numbers/uuid or pointers to those types") + } + return OvsSet{ovsSet}, nil +} + +// MarshalJSON wil marshal an OVSDB style Set in to a JSON byte array +func (o OvsSet) MarshalJSON() ([]byte, error) { + switch l := len(o.GoSet); { + case l == 1: + return json.Marshal(o.GoSet[0]) + case l > 0: + var oSet []interface{} + oSet = append(oSet, "set") + oSet = append(oSet, o.GoSet) + return json.Marshal(oSet) + } + return []byte("[\"set\",[]]"), nil +} + +// UnmarshalJSON will unmarshal a JSON byte array to an OVSDB style Set +func (o *OvsSet) UnmarshalJSON(b []byte) (err error) { + o.GoSet = make([]interface{}, 0) + addToSet := func(o *OvsSet, v interface{}) error { + goVal, err := ovsSliceToGoNotation(v) + if err == nil { + o.GoSet = append(o.GoSet, goVal) + } + return err + } + + var inter interface{} + if err = json.Unmarshal(b, &inter); err != nil { + return err + } + switch inter.(type) { + case []interface{}: + var oSet []interface{} + oSet = inter.([]interface{}) + // it's a single uuid object + if len(oSet) == 2 && (oSet[0] == "uuid" || oSet[0] == "named-uuid") { + return addToSet(o, UUID{GoUUID: oSet[1].(string)}) + } + if oSet[0] != "set" { + // it is a slice, but is not a set + return &json.UnmarshalTypeError{Value: reflect.ValueOf(inter).String(), Type: reflect.TypeOf(*o)} + } + innerSet := oSet[1].([]interface{}) + for _, val := range innerSet { + err := addToSet(o, val) + if err != nil { + return err + } + } + return err + default: + // it is a single object + return addToSet(o, inter) + } +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go new file mode 100644 index 000000000..a24ce64ad --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go @@ -0,0 +1,51 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" +) + +type MonitorCondSinceReply struct { + Found bool + LastTransactionID string + Updates TableUpdates2 +} + +func (m MonitorCondSinceReply) MarshalJSON() ([]byte, error) { + v := []interface{}{m.Found, m.LastTransactionID, m.Updates} + return json.Marshal(v) +} + +func (m *MonitorCondSinceReply) UnmarshalJSON(b []byte) error { + var v []json.RawMessage + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + if len(v) != 3 { + return fmt.Errorf("expected a 3 element json array. there are %d elements", len(v)) + } + + var found bool + err = json.Unmarshal(v[0], &found) + if err != nil { + return err + } + + var lastTransactionID string + err = json.Unmarshal(v[1], &lastTransactionID) + if err != nil { + return err + } + + var updates TableUpdates2 + err = json.Unmarshal(v[2], &updates) + if err != nil { + return err + } + + m.Found = found + m.LastTransactionID = lastTransactionID + m.Updates = updates + return nil +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go new file mode 100644 index 000000000..5a47d0c44 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go @@ -0,0 +1,35 @@ +package ovsdb + +// TableUpdates is an object that maps from a table name to a +// TableUpdate +type TableUpdates map[string]TableUpdate + +// TableUpdate is an object that maps from the row's UUID to a +// RowUpdate +type TableUpdate map[string]*RowUpdate + +// RowUpdate represents a row update according to RFC7047 +type RowUpdate struct { + New *Row `json:"new,omitempty"` + Old *Row `json:"old,omitempty"` +} + +// Insert returns true if this is an update for an insert operation +func (r RowUpdate) Insert() bool { + return r.New != nil && r.Old == nil +} + +// Modify returns true if this is an update for a modify operation +func (r RowUpdate) Modify() bool { + return r.New != nil && r.Old != nil +} + +// Delete returns true if this is an update for a delete operation +func (r RowUpdate) Delete() bool { + return r.New == nil && r.Old != nil +} + +func (r *RowUpdate) FromRowUpdate2(ru2 RowUpdate2) { + r.Old = ru2.Old + r.New = ru2.New +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go new file mode 100644 index 000000000..a040894c9 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go @@ -0,0 +1,19 @@ +package ovsdb + +// TableUpdates2 is an object that maps from a table name to a +// TableUpdate2 +type TableUpdates2 map[string]TableUpdate2 + +// TableUpdate2 is an object that maps from the row's UUID to a +// RowUpdate2 +type TableUpdate2 map[string]*RowUpdate2 + +// RowUpdate2 represents a row update according to ovsdb-server.7 +type RowUpdate2 struct { + Initial *Row `json:"initial,omitempty"` + Insert *Row `json:"insert,omitempty"` + Modify *Row `json:"modify,omitempty"` + Delete *Row `json:"delete,omitempty"` + Old *Row `json:"-"` + New *Row `json:"-"` +} diff --git a/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go new file mode 100644 index 000000000..6bc463653 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go @@ -0,0 +1,59 @@ +package ovsdb + +import ( + "encoding/json" + "fmt" + "regexp" +) + +var validUUID = regexp.MustCompile(`^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$`) + +// UUID is a UUID according to RFC7047 +type UUID struct { + GoUUID string `json:"uuid"` +} + +// MarshalJSON will marshal an OVSDB style UUID to a JSON encoded byte array +func (u UUID) MarshalJSON() ([]byte, error) { + var uuidSlice []string + err := ValidateUUID(u.GoUUID) + if err == nil { + uuidSlice = []string{"uuid", u.GoUUID} + } else { + uuidSlice = []string{"named-uuid", u.GoUUID} + } + + return json.Marshal(uuidSlice) +} + +// UnmarshalJSON will unmarshal a JSON encoded byte array to a OVSDB style UUID +func (u *UUID) UnmarshalJSON(b []byte) (err error) { + var ovsUUID []string + if err := json.Unmarshal(b, &ovsUUID); err == nil { + u.GoUUID = ovsUUID[1] + } + return err +} + +func ValidateUUID(uuid string) error { + if len(uuid) != 36 { + return fmt.Errorf("uuid exceeds 36 characters") + } + + if !validUUID.MatchString(uuid) { + return fmt.Errorf("uuid does not match regexp") + } + + return nil +} + +func IsNamedUUID(uuid string) bool { + return len(uuid) > 0 && !validUUID.MatchString(uuid) +} + +func IsValidUUID(uuid string) bool { + if err := ValidateUUID(uuid); err != nil { + return false + } + return true +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/vendor/github.com/ovn-org/libovsdb/updates/difference.go new file mode 100644 index 000000000..7ebfe8bb5 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/difference.go @@ -0,0 +1,209 @@ +package updates + +import "reflect" + +// difference between value 'a' and value 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the +// difference is 'b' in which case 'b' is returned unmodified. Also returns a +// boolean indicating if there is an actual difference. +func difference(a, b interface{}) (interface{}, bool) { + return mergeDifference(nil, a, b) +} + +// applyDifference returns the result of applying difference 'd' to value 'v' +// along with a boolean indicating if 'v' was changed. +func applyDifference(v, d interface{}) (interface{}, bool) { + if d == nil { + return v, false + } + // difference can be applied with the same algorithm used to calculate it + // f(x,f(x,y))=y + result, changed := difference(v, d) + dv := reflect.ValueOf(d) + switch dv.Kind() { + case reflect.Slice: + fallthrough + case reflect.Map: + // but we need to tweak the interpretation of change for map and slices: + // when there is no difference between the value and non-empty delta, it + // actually means the value needs to be emptied so there is actually a + // change + if !changed && dv.Len() > 0 { + return result, true + } + // there are no changes when delta is empty + return result, changed && dv.Len() > 0 + } + return result, changed +} + +// mergeDifference, given an original value 'o' and two differences 'a' and 'b', +// returns a new equivalent difference that when applied on 'o' it would have +// the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func mergeDifference(o, a, b interface{}) (interface{}, bool) { + kind := reflect.ValueOf(b).Kind() + if kind == reflect.Invalid { + kind = reflect.ValueOf(a).Kind() + } + switch kind { + case reflect.Invalid: + return nil, false + case reflect.Slice: + // set differences are transitive + return setDifference(a, b) + case reflect.Map: + return mergeMapDifference(o, a, b) + case reflect.Array: + panic("Not implemented") + default: + return mergeAtomicDifference(o, a, b) + } +} + +// setDifference calculates the difference between set 'a' and set 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the difference +// is 'b' in which case 'b' is returned unmodified. Also returns a boolean +// indicating if there is an actual difference. +func setDifference(a, b interface{}) (interface{}, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + // From https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two sets are all elements that only belong to one + // of the sets. + difference := make(map[interface{}]struct{}, bv.Len()) + for i := 0; i < bv.Len(); i++ { + // supossedly we are working with comparable atomic types with no + // pointers so we can use the values as map key + difference[bv.Index(i).Interface()] = struct{}{} + } + j := av.Len() + for i := 0; i < j; { + vv := av.Index(i) + vi := vv.Interface() + if _, ok := difference[vi]; ok { + // this value of 'a' is in 'b', so remove it from 'a'; to do that, + // overwrite it with the last value and re-evaluate + vv.Set(av.Index(j - 1)) + // decrease where the last 'a' value is at + j-- + // remove from 'b' values + delete(difference, vi) + } else { + // this value of 'a' is not in 'b', evaluate the next value + i++ + } + } + // trim the slice to the actual values held + av = av.Slice(0, j) + for item := range difference { + // this value of 'b' is not in 'a', so add it + av = reflect.Append(av, reflect.ValueOf(item)) + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeMapDifference, given an original map 'o' and two differences 'a' and +// 'b', returns a new equivalent difference that when applied on 'o' it would +// have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// The result is calculated in 'a' in-place and returned unless the result is +// 'b' in which case 'b' is returned unmodified. +// Returns a boolean indicating if there is an actual difference. +func mergeMapDifference(o, a, b interface{}) (interface{}, bool) { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + if !av.IsValid() && !bv.IsValid() { + return nil, false + } else if (!av.IsValid() || av.Len() == 0) && bv.IsValid() { + return b, bv.Len() != 0 + } else if (!bv.IsValid() || bv.Len() == 0) && av.IsValid() { + return a, av.Len() != 0 + } + + ov := reflect.ValueOf(o) + if !ov.IsValid() { + ov = reflect.Zero(av.Type()) + } + + // From + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + // The difference between two maps are all key-value pairs whose keys + // appears in only one of the maps, plus the key-value pairs whose keys + // appear in both maps but with different values. For the latter elements, + // includes the value from the new column. + + // We can assume that difference is a transitive operation so we calculate + // the difference between 'a' and 'b' but we need to handle exceptions when + // the same key is present in all values. + for i := bv.MapRange(); i.Next(); { + kv := i.Key() + bvv := i.Value() + avv := av.MapIndex(kv) + ovv := ov.MapIndex(kv) + // supossedly we are working with comparable types with no pointers so + // we can compare directly here + switch { + case ovv.IsValid() && avv.IsValid() && ovv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would restore key to the original value, delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + case ovv.IsValid() && avv.IsValid() && avv.Interface() == bvv.Interface(): + // key is present in the three values + // final result would remove key, set in 'a' with 'o' value + av.SetMapIndex(kv, ovv) + case avv.IsValid() && avv.Interface() == bvv.Interface(): + // key/value is in 'a' and 'b', delete from 'a' + av.SetMapIndex(kv, reflect.Value{}) + default: + // key/value in 'b' is not in 'a', set in 'a' with 'b' value + av.SetMapIndex(kv, bvv) + } + } + + if av.Len() == 0 { + return reflect.Zero(av.Type()).Interface(), false + } + + return av.Interface(), true +} + +// mergeAtomicDifference, given an original atomic value 'o' and two differences +// 'a' and 'b', returns a new equivalent difference that when applied on 'o' it +// would have the same result as applying 'a' and 'b' consecutively. +// If 'o' is nil, returns the difference between 'a' and 'b'. +// This difference is calculated as described in +// https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification +// Returns a boolean indicating if there is an actual difference. +func mergeAtomicDifference(o, a, b interface{}) (interface{}, bool) { + if o != nil { + return b, !reflect.DeepEqual(o, b) + } + return b, !reflect.DeepEqual(a, b) +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/vendor/github.com/ovn-org/libovsdb/updates/doc.go new file mode 100644 index 000000000..3e6fe18a0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/doc.go @@ -0,0 +1,15 @@ +/* +Package updates provides an utility to perform and aggregate model updates. + +As input, it supports OVSDB Operations, RowUpdate or RowUpdate2 notations via +the corresponding Add methods. + +As output, it supports both OVSDB RowUpdate2 as well as model notation via the +corresponding ForEach iterative methods. + +Several updates can be added and will be merged with any previous updates even +if they are for the same model. If several updates for the same model are +aggregated, the user is responsible that the provided model to be updated +matches the updated model of the previous update. +*/ +package updates diff --git a/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/vendor/github.com/ovn-org/libovsdb/updates/merge.go new file mode 100644 index 000000000..562f22623 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/merge.go @@ -0,0 +1,160 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { + // handle model update + switch { + case b.old == nil && b.new == nil: + // noop + case a.old == nil && a.new == nil: + // first op + a.old = b.old + a.new = b.new + case a.new != nil && b.old != nil && b.new != nil: + // update after an insert or an update + a.new = b.new + case b.old != nil && b.new == nil: + // a final delete + a.new = nil + default: + return modelUpdate{}, fmt.Errorf("sequence of updates not supported") + } + + // handle row update + ru2, err := mergeRowUpdate(ts, a.rowUpdate2, b.rowUpdate2) + if err != nil { + return modelUpdate{}, err + } + if ru2 == nil { + return modelUpdate{}, nil + } + a.rowUpdate2 = ru2 + + return a, nil +} + +func mergeRowUpdate(ts *ovsdb.TableSchema, a, b *rowUpdate2) (*rowUpdate2, error) { + switch { + case b == nil: + // noop + case a == nil: + // first op + a = b + case a.Insert != nil && b.Modify != nil: + // update after an insert + a.New = b.New + a.Insert = b.New + case a.Modify != nil && b.Modify != nil: + // update after update + a.New = b.New + a.Modify = mergeModifyRow(ts, a.Old, a.Modify, b.Modify) + if a.Modify == nil { + // we merged two modifications that brought back the row to its + // original value which is a no op + a = nil + } + case a.Insert != nil && b.Delete != nil: + // delete after insert + a = nil + case b.Delete != nil: + // a final delete + a.Initial = nil + a.Insert = nil + a.Modify = nil + a.New = nil + a.Delete = b.Delete + default: + return &rowUpdate2{}, fmt.Errorf("sequence of updates not supported") + } + return a, nil +} + +// mergeModifyRow merges two modification rows 'a' and 'b' with respect an +// original row 'o'. Two modifications that restore the original value cancel +// each other and won't be included in the result. Returns nil if there are no +// resulting modifications. +func mergeModifyRow(ts *ovsdb.TableSchema, o, a, b *ovsdb.Row) *ovsdb.Row { + original := *o + aMod := *a + bMod := *b + for k, v := range bMod { + if _, ok := aMod[k]; !ok { + aMod[k] = v + continue + } + + var result interface{} + var changed bool + + // handle maps or sets first + switch v.(type) { + // difference only supports set or map values that are comparable with + // no pointers. This should be currently fine because the set or map + // values should only be non pointer atomic types or the UUID struct. + case ovsdb.OvsSet: + aSet := aMod[k].(ovsdb.OvsSet) + bSet := v.(ovsdb.OvsSet) + // handle sets of multiple values, single value sets are handled as + // atomic values + if ts.Column(k).TypeObj.Max() != 1 { + // set difference is a fully transitive operation so we dont + // need to do anything special to merge two differences + result, changed = setDifference(aSet.GoSet, bSet.GoSet) + result = ovsdb.OvsSet{GoSet: result.([]interface{})} + } + case ovsdb.OvsMap: + aMap := aMod[k].(ovsdb.OvsMap) + bMap := v.(ovsdb.OvsMap) + var originalMap ovsdb.OvsMap + if v, ok := original[k]; ok { + originalMap = v.(ovsdb.OvsMap) + } + // map difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + result, changed = mergeMapDifference(originalMap.GoMap, aMap.GoMap, bMap.GoMap) + result = ovsdb.OvsMap{GoMap: result.(map[interface{}]interface{})} + } + + // was neither a map nor a set + if result == nil { + // atomic difference is not transitive with respect to the original + // value so we have to take the original value into account when + // merging + o := original[k] + if o == nil { + // assume zero value if original does not have the column + o = reflect.Zero(reflect.TypeOf(v)).Interface() + } + if set, ok := o.(ovsdb.OvsSet); ok { + // atomic optional values are cleared out with an empty set + // if the original value was also cleared out, use an empty set + // instead of a nil set so that mergeAtomicDifference notices + // that we are returning to the original value + if set.GoSet == nil { + set.GoSet = []interface{}{} + } + o = set + } + result, changed = mergeAtomicDifference(o, aMod[k], v) + } + + if !changed { + delete(aMod, k) + continue + } + aMod[k] = result + } + + if len(aMod) == 0 { + return nil + } + + return a +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go new file mode 100644 index 000000000..1d87737fc --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/mutate.go @@ -0,0 +1,297 @@ +package updates + +import ( + "reflect" + + "github.com/ovn-org/libovsdb/ovsdb" +) + +func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + v := reflect.AppendSlice(a.Slice(0, i), a.Slice(i+1, a.Len())) + return v, true + } + } + return a, false +} + +func insertToSlice(a, b reflect.Value) (reflect.Value, bool) { + for i := 0; i < a.Len(); i++ { + if a.Index(i).Interface() == b.Interface() { + return a, false + } + } + return reflect.Append(a, b), true +} + +func mutate(current interface{}, mutator ovsdb.Mutator, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case bool, string: + return current, value + } + switch mutator { + case ovsdb.MutateOperationInsert: + // for insert, the delta will be the new value added + return mutateInsert(current, value) + case ovsdb.MutateOperationDelete: + return mutateDelete(current, value) + case ovsdb.MutateOperationAdd: + // for add, the delta is the new value + new := mutateAdd(current, value) + return new, new + case ovsdb.MutateOperationSubtract: + // for subtract, the delta is the new value + new := mutateSubtract(current, value) + return new, new + case ovsdb.MutateOperationMultiply: + new := mutateMultiply(current, value) + return new, new + case ovsdb.MutateOperationDivide: + new := mutateDivide(current, value) + return new, new + case ovsdb.MutateOperationModulo: + new := mutateModulo(current, value) + return new, new + } + return current, value +} + +func mutateInsert(current, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case int, float64: + return current, current + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := insertToSlice(vc, vv) + var diff interface{} + if ok { + diff = value + } + return v.Interface(), diff + } + if !vc.IsValid() { + if vv.IsValid() { + return vv.Interface(), vv.Interface() + } + return nil, nil + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = insertToSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + if vc.IsNil() && vv.Len() > 0 { + return value, value + } + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + k := iter.Key() + if !vc.MapIndex(k).IsValid() { + vc.SetMapIndex(k, iter.Value()) + diff.SetMapIndex(k, iter.Value()) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateDelete(current, value interface{}) (interface{}, interface{}) { + switch current.(type) { + case int, float64: + return current, nil + } + vc := reflect.ValueOf(current) + vv := reflect.ValueOf(value) + if vc.Kind() == reflect.Slice && vc.Type() == reflect.SliceOf(vv.Type()) { + v, ok := removeFromSlice(vc, vv) + diff := value + if !ok { + diff = nil + } + return v.Interface(), diff + } + if vc.Kind() == reflect.Slice && vv.Kind() == reflect.Slice { + v := vc + diff := reflect.Indirect(reflect.New(vv.Type())) + for i := 0; i < vv.Len(); i++ { + var ok bool + v, ok = removeFromSlice(v, vv.Index(i)) + if ok { + diff = reflect.Append(diff, vv.Index(i)) + } + } + if diff.Len() > 0 { + return v.Interface(), diff.Interface() + } + return v.Interface(), nil + } + if vc.Kind() == reflect.Map && vv.Type() == reflect.SliceOf(vc.Type().Key()) { + diff := reflect.MakeMap(vc.Type()) + for i := 0; i < vv.Len(); i++ { + if vc.MapIndex(vv.Index(i)).IsValid() { + diff.SetMapIndex(vv.Index(i), vc.MapIndex(vv.Index(i))) + vc.SetMapIndex(vv.Index(i), reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + if vc.Kind() == reflect.Map && vv.Kind() == reflect.Map { + diff := reflect.MakeMap(vc.Type()) + iter := vv.MapRange() + for iter.Next() { + vvk := iter.Key() + vvv := iter.Value() + vcv := vc.MapIndex(vvk) + if vcv.IsValid() && reflect.DeepEqual(vcv.Interface(), vvv.Interface()) { + diff.SetMapIndex(vvk, vcv) + vc.SetMapIndex(vvk, reflect.Value{}) + } + } + if diff.Len() > 0 { + return current, diff.Interface() + } + return current, nil + } + return current, nil +} + +func mutateAdd(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i + v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i + v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j + v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j + v + } + return is + } + return current +} + +func mutateSubtract(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i - v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i - v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j - v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j - v + } + return is + } + return current +} + +func mutateMultiply(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i * v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i * v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j * v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j * v + } + return is + } + return current +} + +func mutateDivide(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i / v + } + if i, ok := current.(float64); ok { + v := value.(float64) + return i / v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j / v + } + return is + } + if is, ok := current.([]float64); ok { + v := value.(float64) + for i, j := range is { + is[i] = j / v + } + return is + } + return current +} + +func mutateModulo(current, value interface{}) interface{} { + if i, ok := current.(int); ok { + v := value.(int) + return i % v + } + if is, ok := current.([]int); ok { + v := value.(int) + for i, j := range is { + is[i] = j % v + } + return is + } + return current +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/references.go b/vendor/github.com/ovn-org/libovsdb/updates/references.go new file mode 100644 index 000000000..938d02aae --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/references.go @@ -0,0 +1,797 @@ +package updates + +import ( + "fmt" + + "github.com/ovn-org/libovsdb/database" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// ReferenceProvider should be implemented by a database that tracks references +type ReferenceProvider interface { + // GetReferences provides the references to the provided row + GetReferences(database, table, uuid string) (database.References, error) + // Get provides the corresponding model + Get(database, table string, uuid string) (model.Model, error) +} + +// DatabaseUpdate bundles updates together with the updated +// reference information +type DatabaseUpdate struct { + ModelUpdates + referenceUpdates database.References +} + +func (u DatabaseUpdate) ForReferenceUpdates(do func(references database.References) error) error { + refsCopy := database.References{} + // since refsCopy is empty, this will just copy everything + applyReferenceModifications(refsCopy, u.referenceUpdates) + return do(refsCopy) +} + +func NewDatabaseUpdate(updates ModelUpdates, references database.References) DatabaseUpdate { + return DatabaseUpdate{ + ModelUpdates: updates, + referenceUpdates: references, + } +} + +// ProcessReferences tracks referential integrity for the provided set of +// updates. It returns an updated set of updates which includes additional +// updates and updated references as a result of the reference garbage +// collection described in RFC7047. These additional updates resulting from the +// reference garbage collection are also returned separately. Any constraint or +// referential integrity violation is returned as an error. +func ProcessReferences(dbModel model.DatabaseModel, provider ReferenceProvider, updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + referenceTracker := newReferenceTracker(dbModel, provider) + return referenceTracker.processReferences(updates) +} + +type referenceTracker struct { + dbModel model.DatabaseModel + provider ReferenceProvider + + // updates that are being processed + updates ModelUpdates + + // references are the updated references by the set of updates processed + references database.References + + // helper maps to track the rows that we are processing and their tables + tracked map[string]string + added map[string]string + deleted map[string]string +} + +func newReferenceTracker(dbModel model.DatabaseModel, provider ReferenceProvider) *referenceTracker { + return &referenceTracker{ + dbModel: dbModel, + provider: provider, + } +} + +func (rt *referenceTracker) processReferences(updates ModelUpdates) (ModelUpdates, ModelUpdates, database.References, error) { + rt.updates = updates + rt.tracked = make(map[string]string) + rt.added = make(map[string]string) + rt.deleted = make(map[string]string) + rt.references = make(database.References) + + referenceUpdates, err := rt.processReferencesLoop(updates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + // merge the updates generated from reference tracking into the main updates + err = updates.Merge(rt.dbModel, referenceUpdates) + if err != nil { + return ModelUpdates{}, ModelUpdates{}, nil, err + } + + return updates, referenceUpdates, rt.references, nil +} + +func (rt *referenceTracker) processReferencesLoop(updates ModelUpdates) (ModelUpdates, error) { + referenceUpdates := ModelUpdates{} + + // references can be transitive and deleting them can lead to further + // references having to be removed so loop until there are no updates to be + // made + for len(updates.updates) > 0 { + // update the references from the updates + err := rt.processModelUpdates(updates) + if err != nil { + return ModelUpdates{}, err + } + + // process strong reference integrity + updates, err = rt.processStrongReferences() + if err != nil { + return ModelUpdates{}, err + } + + // process weak reference integrity + weakUpdates, err := rt.processWeakReferences() + if err != nil { + return ModelUpdates{}, err + } + + // merge strong and weak reference updates + err = updates.Merge(rt.dbModel, weakUpdates) + if err != nil { + return ModelUpdates{}, err + } + + // merge updates from this iteration to the overall reference updates + err = referenceUpdates.Merge(rt.dbModel, updates) + if err != nil { + return ModelUpdates{}, err + } + } + + return referenceUpdates, nil +} + +// processModelUpdates keeps track of the updated references by a set of updates +func (rt *referenceTracker) processModelUpdates(updates ModelUpdates) error { + tables := updates.GetUpdatedTables() + for _, table := range tables { + err := updates.ForEachRowUpdate(table, func(uuid string, row ovsdb.RowUpdate2) error { + return rt.processRowUpdate(table, uuid, &row) + }) + if err != nil { + return err + } + } + return nil +} + +// processRowUpdate keeps track of the updated references by a given row update +func (rt *referenceTracker) processRowUpdate(table, uuid string, row *ovsdb.RowUpdate2) error { + + // getReferencesFromRowModify extracts updated references from the + // modifications. Following the same strategy as the modify field of Update2 + // notification, it will extract a difference, that is, both old removed + // references and new added references are extracted. This difference will + // then be applied to currently tracked references to come up with the + // updated references. + + // For more info on the modify field of Update2 notification and the + // strategy used to apply differences, check + // https://docs.openvswitch.org/en/latest/ref/ovsdb-server.7/#update2-notification + + var updateRefs database.References + switch { + case row.Delete != nil: + rt.deleted[uuid] = table + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Old, row.Old) + case row.Modify != nil: + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Modify, row.Old) + case row.Insert != nil: + if !isRoot(&rt.dbModel, table) { + // track rows added that are not part of the root set, we might need + // to delete those later + rt.added[uuid] = table + rt.tracked[uuid] = table + } + updateRefs = getReferenceModificationsFromRow(&rt.dbModel, table, uuid, row.Insert, nil) + } + + // (lazy) initialize existing references to the same rows from the database + for spec, refs := range updateRefs { + for to := range refs { + err := rt.initReferences(spec.ToTable, to) + if err != nil { + return err + } + } + } + + // apply the reference modifications to the initialized references + applyReferenceModifications(rt.references, updateRefs) + + return nil +} + +// processStrongReferences adds delete operations for rows that are not part of +// the root set and are no longer strongly referenced. Returns a referential +// integrity violation if a nonexistent row is strongly referenced or a strongly +// referenced row has been deleted. +func (rt *referenceTracker) processStrongReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to the deleted rows + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + // track if rows are referenced or not + isReferenced := map[string]bool{} + + // go over the updated references + for spec, refs := range rt.references { + + // we only care about strong references + if !isStrong(&rt.dbModel, spec) { + continue + } + + for to, from := range refs { + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if !exists { + for _, uuid := range from { + // strong reference to a row that does not exist + return ModelUpdates{}, ovsdb.NewReferentialIntegrityViolation(fmt.Sprintf( + "Table %s column %s row %s references nonexistent or deleted row %s in table %s", + spec.FromTable, spec.FromColumn, uuid, to, spec.ToTable)) + } + // we deleted the row ourselves on a previous loop + continue + } + + // track if this row is referenced from this location spec + isReferenced[to] = isReferenced[to] || len(from) > 0 + } + } + + // inserted rows that are unreferenced and not part of the root set will + // silently be dropped from the updates + for uuid := range rt.added { + if isReferenced[uuid] { + continue + } + isReferenced[uuid] = false + } + + // delete rows that are not referenced + updates := ModelUpdates{} + for uuid, isReferenced := range isReferenced { + if isReferenced { + // row is still referenced, ignore + continue + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + table := rt.tracked[uuid] + if isRoot(&rt.dbModel, table) { + // table is part of the root set, ignore + continue + } + + // delete row that is not part of the root set and is no longer + // referenced + update, err := rt.deleteRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// processWeakReferences deletes weak references to rows that were deleted. +// Returns a constraint violation if this results in invalid values +func (rt *referenceTracker) processWeakReferences() (ModelUpdates, error) { + // make sure that we are tracking the references to rows that might have + // been deleted as a result of strong reference garbage collection + err := rt.initReferencesOfDeletedRows() + if err != nil { + return ModelUpdates{}, err + } + + tables := map[string]string{} + originalRows := map[string]ovsdb.Row{} + updatedRows := map[string]ovsdb.Row{} + + for spec, refs := range rt.references { + // fetch some reference information from the schema + extendedType, minLenAllowed, refType, _ := refInfo(&rt.dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + isEmptyAllowed := minLenAllowed == 0 + + if refType != ovsdb.Weak { + // we only care about weak references + continue + } + + for to, from := range refs { + if len(from) == 0 { + // not referenced from anywhere, ignore + continue + } + + // check if the referenced row exists + exists, err := rt.rowExists(spec.ToTable, to) + if err != nil { + return ModelUpdates{}, err + } + if exists { + // we only care about rows that have been deleted or otherwise + // don't exist + continue + } + + // generate the updates to remove the references to deleted rows + for _, uuid := range from { + if _, ok := updatedRows[uuid]; !ok { + updatedRows[uuid] = ovsdb.NewRow() + } + + if rt.deleted[uuid] != "" { + // already deleted, ignore + continue + } + + // fetch the original rows + if originalRows[uuid] == nil { + originalRow, err := rt.getRow(spec.FromTable, uuid) + if err != nil { + return ModelUpdates{}, err + } + if originalRow == nil { + return ModelUpdates{}, fmt.Errorf("reference from non-existent model with uuid %s", uuid) + } + originalRows[uuid] = *originalRow + } + + var becomesLen int + switch extendedType { + case ovsdb.TypeMap: + // a map referencing the row + // generate the mutation to remove the entry form the map + originalMap := originalRows[uuid][spec.FromColumn].(ovsdb.OvsMap).GoMap + var mutationMap map[interface{}]interface{} + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationMap = map[interface{}]interface{}{} + } else { + mutationMap = value.(ovsdb.OvsMap).GoMap + } + // copy the map entries referencing the row from the original map + mutationMap = copyMapKeyValues(originalMap, mutationMap, !spec.FromValue, ovsdb.UUID{GoUUID: to}) + + // track the new length of the map + if !isEmptyAllowed { + becomesLen = len(originalMap) - len(mutationMap) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsMap{GoMap: mutationMap} + + case ovsdb.TypeSet: + // a set referencing the row + // generate the mutation to remove the entry form the set + var mutationSet []interface{} + value, ok := updatedRows[uuid][spec.FromColumn] + if !ok { + mutationSet = []interface{}{} + } else { + mutationSet = value.(ovsdb.OvsSet).GoSet + } + mutationSet = append(mutationSet, ovsdb.UUID{GoUUID: to}) + + // track the new length of the set + if !isEmptyAllowed { + originalSet := originalRows[uuid][spec.FromColumn].(ovsdb.OvsSet).GoSet + becomesLen = len(originalSet) - len(mutationSet) + } + + updatedRows[uuid][spec.FromColumn] = ovsdb.OvsSet{GoSet: mutationSet} + + case ovsdb.TypeUUID: + // this is an atomic UUID value that needs to be cleared + updatedRows[uuid][spec.FromColumn] = nil + becomesLen = 0 + } + + if becomesLen < minLenAllowed { + return ModelUpdates{}, ovsdb.NewConstraintViolation(fmt.Sprintf( + "Deletion of a weak reference to a deleted (or never-existing) row from column %s in table %s "+ + "row %s caused this column to have an invalid length.", + spec.FromColumn, spec.FromTable, uuid)) + } + + // track the table of the row we are going to update + tables[uuid] = spec.FromTable + } + } + } + + // process the updates + updates := ModelUpdates{} + for uuid, rowUpdate := range updatedRows { + update, err := rt.updateRow(tables[uuid], uuid, rowUpdate) + if err != nil { + return ModelUpdates{}, err + } + err = updates.Merge(rt.dbModel, update) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +func copyMapKeyValues(from, to map[interface{}]interface{}, isKey bool, keyValue ovsdb.UUID) map[interface{}]interface{} { + if isKey { + to[keyValue] = from[keyValue] + return to + } + for key, value := range from { + if value.(ovsdb.UUID) == keyValue { + to[key] = from[key] + } + } + return to +} + +// initReferences initializes the references to the provided row from the +// database +func (rt *referenceTracker) initReferences(table, uuid string) error { + if _, ok := rt.tracked[uuid]; ok { + // already initialized + return nil + } + existingRefs, err := rt.provider.GetReferences(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return err + } + rt.references.UpdateReferences(existingRefs) + rt.tracked[uuid] = table + return nil +} + +func (rt *referenceTracker) initReferencesOfDeletedRows() error { + for uuid, table := range rt.deleted { + err := rt.initReferences(table, uuid) + if err != nil { + return err + } + } + return nil +} + +// deleteRow adds an update to delete the provided row. +func (rt *referenceTracker) deleteRow(table, uuid string) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + row, err := rt.getRow(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + updates := ModelUpdates{} + update := ovsdb.RowUpdate2{Delete: &ovsdb.Row{}, Old: row} + err = updates.AddRowUpdate2(rt.dbModel, table, uuid, model, update) + + rt.deleted[uuid] = table + + return updates, err +} + +// updateRow generates updates for the provided row +func (rt *referenceTracker) updateRow(table, uuid string, row ovsdb.Row) (ModelUpdates, error) { + model, err := rt.getModel(table, uuid) + if err != nil { + return ModelUpdates{}, err + } + + // In agreement with processWeakReferences, columns with values are assumed + // to be values of sets or maps that need to be mutated for deletion. + // Columns with no values are assumed to be atomic optional values that need + // to be cleared with an update. + + mutations := make([]ovsdb.Mutation, 0, len(row)) + update := ovsdb.Row{} + for column, value := range row { + if value != nil { + mutations = append(mutations, *ovsdb.NewMutation(column, ovsdb.MutateOperationDelete, value)) + continue + } + update[column] = ovsdb.OvsSet{GoSet: []interface{}{}} + } + + updates := ModelUpdates{} + + if len(mutations) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationMutate, + Table: table, + Mutations: mutations, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + if len(update) > 0 { + err = updates.AddOperation(rt.dbModel, table, uuid, model, &ovsdb.Operation{ + Op: ovsdb.OperationUpdate, + Table: table, + Row: update, + Where: []ovsdb.Condition{ovsdb.NewCondition("_uuid", ovsdb.ConditionEqual, ovsdb.UUID{GoUUID: uuid})}, + }) + if err != nil { + return ModelUpdates{}, err + } + } + + return updates, nil +} + +// getModel gets the model from the updates or the database +func (rt *referenceTracker) getModel(table, uuid string) (model.Model, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // model has been deleted + return nil, nil + } + // look for the model in the updates + model := rt.updates.GetModel(table, uuid) + if model != nil { + return model, nil + } + // look for the model in the database + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + return model, nil +} + +// getRow gets the row from the updates or the database +func (rt *referenceTracker) getRow(table, uuid string) (*ovsdb.Row, error) { + if _, deleted := rt.deleted[uuid]; deleted { + // row has been deleted + return nil, nil + } + // look for the row in the updates + row := rt.updates.GetRow(table, uuid) + if row != nil { + return row, nil + } + // look for the model in the database and build the row + model, err := rt.provider.Get(rt.dbModel.Client().Name(), table, uuid) + if err != nil { + return nil, err + } + info, err := rt.dbModel.NewModelInfo(model) + if err != nil { + return nil, err + } + newRow, err := rt.dbModel.Mapper.NewRow(info) + if err != nil { + return nil, err + } + return &newRow, nil +} + +// rowExists returns whether the row exists either in the updates or the database +func (rt *referenceTracker) rowExists(table, uuid string) (bool, error) { + model, err := rt.getModel(table, uuid) + return model != nil, err +} + +func getReferenceModificationsFromRow(dbModel *model.DatabaseModel, table, uuid string, modify, old *ovsdb.Row) database.References { + refs := database.References{} + for column, value := range *modify { + var oldValue interface{} + if old != nil { + oldValue = (*old)[column] + } + crefs := getReferenceModificationsFromColumn(dbModel, table, uuid, column, value, oldValue) + refs.UpdateReferences(crefs) + } + return refs +} + +func getReferenceModificationsFromColumn(dbModel *model.DatabaseModel, table, uuid, column string, modify, old interface{}) database.References { + switch v := modify.(type) { + case ovsdb.UUID: + var oldUUID ovsdb.UUID + if old != nil { + oldUUID = old.(ovsdb.UUID) + } + return getReferenceModificationsFromAtom(dbModel, table, uuid, column, v, oldUUID) + case ovsdb.OvsSet: + var oldSet ovsdb.OvsSet + if old != nil { + oldSet = old.(ovsdb.OvsSet) + } + return getReferenceModificationsFromSet(dbModel, table, uuid, column, v, oldSet) + case ovsdb.OvsMap: + return getReferenceModificationsFromMap(dbModel, table, uuid, column, v) + } + return nil +} + +func getReferenceModificationsFromMap(dbModel *model.DatabaseModel, table, uuid, column string, value ovsdb.OvsMap) database.References { + if len(value.GoMap) == 0 { + return nil + } + + // get the referenced table + keyRefTable := refTable(dbModel, table, column, false) + valueRefTable := refTable(dbModel, table, column, true) + if keyRefTable == "" && valueRefTable == "" { + return nil + } + + from := uuid + keySpec := database.ReferenceSpec{ToTable: keyRefTable, FromTable: table, FromColumn: column, FromValue: false} + valueSpec := database.ReferenceSpec{ToTable: valueRefTable, FromTable: table, FromColumn: column, FromValue: true} + + refs := database.References{} + for k, v := range value.GoMap { + if keyRefTable != "" { + switch to := k.(type) { + case ovsdb.UUID: + if _, ok := refs[keySpec]; !ok { + refs[keySpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[keySpec][to.GoUUID]; !ok { + refs[keySpec][to.GoUUID] = append(refs[keySpec][to.GoUUID], from) + } + } + } + if valueRefTable != "" { + switch to := v.(type) { + case ovsdb.UUID: + if _, ok := refs[valueSpec]; !ok { + refs[valueSpec] = database.Reference{to.GoUUID: []string{from}} + } else if _, ok := refs[valueSpec][to.GoUUID]; !ok { + refs[valueSpec][to.GoUUID] = append(refs[valueSpec][to.GoUUID], from) + } + } + } + } + + return refs +} + +func getReferenceModificationsFromSet(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.OvsSet) database.References { + // if the modify set is empty, it means the op is clearing an atomic value + // so pick the old value instead + value := modify + if len(modify.GoSet) == 0 { + value = old + } + + if len(value.GoSet) == 0 { + return nil + } + + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + refs := database.References{spec: database.Reference{}} + for _, v := range value.GoSet { + switch to := v.(type) { + case ovsdb.UUID: + refs[spec][to.GoUUID] = append(refs[spec][to.GoUUID], from) + } + } + return refs +} + +func getReferenceModificationsFromAtom(dbModel *model.DatabaseModel, table, uuid, column string, modify, old ovsdb.UUID) database.References { + // get the referenced table + refTable := refTable(dbModel, table, column, false) + if refTable == "" { + return nil + } + spec := database.ReferenceSpec{ToTable: refTable, FromTable: table, FromColumn: column} + from := uuid + to := modify.GoUUID + refs := database.References{spec: {to: {from}}} + if old.GoUUID != "" { + // extract the old value as well + refs[spec][old.GoUUID] = []string{from} + } + return refs +} + +// applyReferenceModifications updates references in 'a' from those in 'b' +func applyReferenceModifications(a, b database.References) { + for spec, bv := range b { + for to, bfrom := range bv { + if av, ok := a[spec]; ok { + if afrom, ok := av[to]; ok { + r, _ := applyDifference(afrom, bfrom) + av[to] = r.([]string) + } else { + // this reference is not in 'a', so add it + av[to] = bfrom + } + } else { + // this reference is not in 'a', so add it + a[spec] = database.Reference{to: bfrom} + } + } + } +} + +func refInfo(dbModel *model.DatabaseModel, table, column string, mapValue bool) (ovsdb.ExtendedType, int, ovsdb.RefType, string) { + tSchema := dbModel.Schema.Table(table) + if tSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for table %s", table)) + } + + cSchema := tSchema.Column(column) + if cSchema == nil { + panic(fmt.Sprintf("unexpected schema error: no schema for column %s", column)) + } + + cType := cSchema.TypeObj + if cType == nil { + // this is not a reference + return "", 0, "", "" + } + + var bType *ovsdb.BaseType + switch { + case !mapValue && cType.Key != nil: + bType = cType.Key + case mapValue && cType.Value != nil: + bType = cType.Value + default: + panic(fmt.Sprintf("unexpected schema error: no schema for map value on column %s", column)) + } + if bType.Type != ovsdb.TypeUUID { + // this is not a reference + return "", 0, "", "" + } + + // treat optional values represented with sets as atomic UUIDs + extendedType := cSchema.Type + if extendedType == ovsdb.TypeSet && cType.Min() == 0 && cType.Max() == 1 { + extendedType = ovsdb.TypeUUID + } + + rType, err := bType.RefType() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + rTable, err := bType.RefTable() + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + + return extendedType, cType.Min(), rType, rTable +} + +func refTable(dbModel *model.DatabaseModel, table, column string, mapValue bool) ovsdb.RefType { + _, _, _, refTable := refInfo(dbModel, table, column, mapValue) + return refTable +} + +func isRoot(dbModel *model.DatabaseModel, table string) bool { + isRoot, err := dbModel.Schema.IsRoot(table) + if err != nil { + panic(fmt.Sprintf("unexpected schema error: %v", err)) + } + return isRoot +} + +func isStrong(dbModel *model.DatabaseModel, spec database.ReferenceSpec) bool { + _, _, refType, _ := refInfo(dbModel, spec.FromTable, spec.FromColumn, spec.FromValue) + return refType == ovsdb.Strong +} diff --git a/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/vendor/github.com/ovn-org/libovsdb/updates/updates.go new file mode 100644 index 000000000..4ff2363a0 --- /dev/null +++ b/vendor/github.com/ovn-org/libovsdb/updates/updates.go @@ -0,0 +1,528 @@ +package updates + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/mapper" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +type rowUpdate2 = ovsdb.RowUpdate2 + +// modelUpdate contains an update in model and OVSDB RowUpdate2 notation +type modelUpdate struct { + rowUpdate2 *rowUpdate2 + old model.Model + new model.Model +} + +// isEmpty returns whether this update is empty +func (mu modelUpdate) isEmpty() bool { + return mu == modelUpdate{} +} + +// ModelUpdates contains updates indexed by table and uuid +type ModelUpdates struct { + updates map[string]map[string]modelUpdate +} + +// GetUpdatedTables returns the tables that have updates +func (u ModelUpdates) GetUpdatedTables() []string { + tables := make([]string, 0, len(u.updates)) + for table, updates := range u.updates { + if len(updates) > 0 { + tables = append(tables, table) + } + } + return tables +} + +// ForEachModelUpdate processes each row update of a given table in model +// notation +func (u ModelUpdates) ForEachModelUpdate(table string, do func(uuid string, old, new model.Model) error) error { + models := u.updates[table] + for uuid, model := range models { + err := do(uuid, model.old, model.new) + if err != nil { + return err + } + } + return nil +} + +// ForEachRowUpdate processes each row update of a given table in OVSDB +// RowUpdate2 notation +func (u ModelUpdates) ForEachRowUpdate(table string, do func(uuid string, row ovsdb.RowUpdate2) error) error { + rows := u.updates[table] + for uuid, row := range rows { + err := do(uuid, *row.rowUpdate2) + if err != nil { + return err + } + } + return nil +} + +// GetModel returns the last known state of the requested model. If the model is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetModel(table, uuid string) model.Model { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.new + } + } + return nil +} + +// GetRow returns the last known state of the requested row. If the row is +// unknown or has been deleted, returns nil. +func (u ModelUpdates) GetRow(table, uuid string) *ovsdb.Row { + if u.updates == nil { + return nil + } + if t, found := u.updates[table]; found { + if update, found := t[uuid]; found { + return update.rowUpdate2.New + } + } + return nil +} + +// Merge a set of updates with an earlier set of updates +func (u *ModelUpdates) Merge(dbModel model.DatabaseModel, new ModelUpdates) error { + for table, models := range new.updates { + for uuid, update := range models { + err := u.addUpdate(dbModel, table, uuid, update) + if err != nil { + return err + } + } + } + return nil +} + +// AddOperation adds an update for a model from a OVSDB Operation. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddOperation(dbModel model.DatabaseModel, table, uuid string, current model.Model, op *ovsdb.Operation) error { + switch op.Op { + case ovsdb.OperationInsert: + return u.addInsertOperation(dbModel, table, uuid, op) + case ovsdb.OperationUpdate: + return u.addUpdateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationMutate: + return u.addMutateOperation(dbModel, table, uuid, current, op) + case ovsdb.OperationDelete: + return u.addDeleteOperation(dbModel, table, uuid, current, op) + default: + return fmt.Errorf("database update from operation %#v not supported", op.Op) + } +} + +// AddRowUpdate adds an update for a model from a OVSDB RowUpdate. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru ovsdb.RowUpdate) error { + switch { + case ru.Old == nil && ru.New != nil: + new, err := model.CreateModel(dbModel, table, ru.New, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &rowUpdate2{New: ru.New}}) + if err != nil { + return err + } + case ru.Old != nil && ru.New != nil: + old := current + new := model.Clone(current) + info, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + changed, err := updateModel(dbModel, table, info, ru.New, nil) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &rowUpdate2{Old: ru.Old, New: ru.New}}) + if err != nil { + return err + } + case ru.New == nil: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &rowUpdate2{Old: ru.Old}}) + if err != nil { + return err + } + } + return nil +} + +// AddRowUpdate2 adds an update for a model from a OVSDB RowUpdate2. If several +// updates for the same model are aggregated, the user is responsible that the +// provided model to be updated matches the updated model of the previous +// update. +func (u *ModelUpdates) AddRowUpdate2(dbModel model.DatabaseModel, table, uuid string, current model.Model, ru2 ovsdb.RowUpdate2) error { + switch { + case ru2.Initial != nil: + ru2.Insert = ru2.Initial + fallthrough + case ru2.Insert != nil: + new, err := model.CreateModel(dbModel, table, ru2.Insert, uuid) + if err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{new: new, rowUpdate2: &ru2}) + if err != nil { + return err + } + case ru2.Modify != nil: + old := current + new := model.Clone(current) + info, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + changed, err := modifyModel(dbModel, table, info, ru2.Modify) + if !changed || err != nil { + return err + } + err = u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, new: new, rowUpdate2: &ru2}) + if err != nil { + return err + } + default: + old := current + err := u.addUpdate(dbModel, table, uuid, modelUpdate{old: old, rowUpdate2: &ru2}) + if err != nil { + return err + } + } + return nil +} + +func (u *ModelUpdates) addUpdate(dbModel model.DatabaseModel, table, uuid string, update modelUpdate) error { + if u.updates == nil { + u.updates = map[string]map[string]modelUpdate{} + } + if _, ok := u.updates[table]; !ok { + u.updates[table] = make(map[string]modelUpdate) + } + + ts := dbModel.Schema.Table(table) + update, err := merge(ts, u.updates[table][uuid], update) + if err != nil { + return err + } + + if !update.isEmpty() { + u.updates[table][uuid] = update + return nil + } + + // If after the merge this amounts to no update, remove it from the list and + // clean up + delete(u.updates[table], uuid) + if len(u.updates[table]) == 0 { + delete(u.updates, table) + } + if len(u.updates) == 0 { + u.updates = nil + } + + return nil +} + +func (u *ModelUpdates) addInsertOperation(dbModel model.DatabaseModel, table, uuid string, op *ovsdb.Operation) error { + m := dbModel.Mapper + + model, err := dbModel.NewModel(table) + if err != nil { + return err + } + + mapperInfo, err := dbModel.NewModelInfo(model) + if err != nil { + return err + } + + err = m.GetRowData(&op.Row, mapperInfo) + if err != nil { + return err + } + + err = mapperInfo.SetField("_uuid", uuid) + if err != nil { + return err + } + + resultRow, err := m.NewRow(mapperInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: nil, + new: model, + rowUpdate2: &rowUpdate2{ + Insert: &resultRow, + New: &resultRow, + Old: nil, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addUpdateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + new := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + + delta := ovsdb.NewRow() + changed, err := updateModel(dbModel, table, newInfo, &op.Row, &delta) + if err != nil { + return err + } + if !changed { + return nil + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: new, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addMutateOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + schema := dbModel.Schema.Table(table) + + oldInfo, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(oldInfo) + if err != nil { + return err + } + + new := model.Clone(old) + newInfo, err := dbModel.NewModelInfo(new) + if err != nil { + return err + } + + differences := make(map[string]interface{}) + for _, mutation := range op.Mutations { + column := schema.Column(mutation.Column) + if column == nil { + continue + } + + var nativeValue interface{} + // Usually a mutation value is of the same type of the value being mutated + // except for delete mutation of maps where it can also be a list of same type of + // keys (rfc7047 5.1). Handle this special case here. + if mutation.Mutator == "delete" && column.Type == ovsdb.TypeMap && reflect.TypeOf(mutation.Value) != reflect.TypeOf(ovsdb.OvsMap{}) { + nativeValue, err = ovsdb.OvsToNativeSlice(column.TypeObj.Key.Type, mutation.Value) + if err != nil { + return err + } + } else { + nativeValue, err = ovsdb.OvsToNative(column, mutation.Value) + if err != nil { + return err + } + } + + if err := ovsdb.ValidateMutation(column, mutation.Mutator, nativeValue); err != nil { + return err + } + + current, err := newInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + + newValue, diff := mutate(current, mutation.Mutator, nativeValue) + if err := newInfo.SetField(mutation.Column, newValue); err != nil { + return err + } + + old, err := oldInfo.FieldByColumn(mutation.Column) + if err != nil { + return err + } + diff, changed := mergeDifference(old, differences[mutation.Column], diff) + if changed { + differences[mutation.Column] = diff + } else { + delete(differences, mutation.Column) + } + } + + if len(differences) == 0 { + return nil + } + + delta := ovsdb.NewRow() + for column, diff := range differences { + colSchema := schema.Column(column) + diffOvs, err := ovsdb.NativeToOvs(colSchema, diff) + if err != nil { + return err + } + delta[column] = diffOvs + } + + newRow, err := m.NewRow(newInfo) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: new, + rowUpdate2: &rowUpdate2{ + Modify: &delta, + Old: &oldRow, + New: &newRow, + }, + }, + ) + + return err +} + +func (u *ModelUpdates) addDeleteOperation(dbModel model.DatabaseModel, table, uuid string, old model.Model, op *ovsdb.Operation) error { + m := dbModel.Mapper + + info, err := dbModel.NewModelInfo(old) + if err != nil { + return err + } + + oldRow, err := m.NewRow(info) + if err != nil { + return err + } + + err = u.addUpdate(dbModel, table, uuid, + modelUpdate{ + old: old, + new: nil, + rowUpdate2: &rowUpdate2{ + Delete: &ovsdb.Row{}, + Old: &oldRow, + }, + }, + ) + + return err +} + +func updateModel(dbModel model.DatabaseModel, table string, info *mapper.Info, update, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, update, modify, false) +} + +func modifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, modify *ovsdb.Row) (bool, error) { + return updateOrModifyModel(dbModel, table, info, modify, nil, true) +} + +// updateOrModifyModel updates info about a model with a given row containing +// the change. The change row itself can be interpreted as an update or a +// modify. If the change is an update and a modify row is provided, it will be +// filled with the modify data. +func updateOrModifyModel(dbModel model.DatabaseModel, table string, info *mapper.Info, changeRow, modifyRow *ovsdb.Row, isModify bool) (bool, error) { + schema := dbModel.Schema.Table(table) + var changed bool + + for column, updateOvs := range *changeRow { + colSchema := schema.Column(column) + if colSchema == nil { + // ignore columns we don't know about in our schema + continue + } + + currentNative, err := info.FieldByColumn(column) + if err != nil { + return false, err + } + + updateNative, err := ovsdb.OvsToNative(colSchema, updateOvs) + if err != nil { + return false, err + } + + if isModify { + differenceNative, isDifferent := applyDifference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + err = info.SetField(column, differenceNative) + if err != nil { + return false, err + } + } else { + differenceNative, isDifferent := difference(currentNative, updateNative) + if isDifferent && !colSchema.Mutable() { + return false, ovsdb.NewConstraintViolation(fmt.Sprintf("column %q of table %q is not mutable", column, table)) + } + changed = changed || isDifferent + if isDifferent && modifyRow != nil { + deltaOvs, err := ovsdb.NativeToOvs(colSchema, differenceNative) + if err != nil { + return false, err + } + (*modifyRow)[column] = deltaOvs + } + err = info.SetField(column, updateNative) + if err != nil { + return false, err + } + } + } + + return changed, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go new file mode 100644 index 000000000..00fb19fcf --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model/network_event.go @@ -0,0 +1,61 @@ +package model + +import ( + "fmt" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type NetworkEvent interface { + String() string +} + +type ACLEvent struct { + NetworkEvent + Action string + Actor string + Name string + Namespace string + Direction string +} + +func (e *ACLEvent) String() string { + var action string + switch e.Action { + case nbdb.ACLActionAllow, nbdb.ACLActionAllowRelated, nbdb.ACLActionAllowStateless: + action = "Allowed" + case nbdb.ACLActionDrop: + action = "Dropped" + case nbdb.ACLActionPass: + action = "Delegated to network policy" + default: + action = "Action " + e.Action + } + var msg string + switch e.Actor { + case libovsdbops.AdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("admin network policy %s, direction %s", e.Name, e.Direction) + case libovsdbops.BaselineAdminNetworkPolicyOwnerType: + msg = fmt.Sprintf("baseline admin network policy %s, direction %s", e.Name, e.Direction) + case libovsdbops.MulticastNamespaceOwnerType: + msg = fmt.Sprintf("multicast in namespace %s, direction %s", e.Namespace, e.Direction) + case libovsdbops.MulticastClusterOwnerType: + msg = fmt.Sprintf("cluster multicast policy, direction %s", e.Direction) + case libovsdbops.NetpolNodeOwnerType: + msg = fmt.Sprintf("default allow from local node policy, direction %s", e.Direction) + case libovsdbops.NetworkPolicyOwnerType: + if e.Namespace != "" { + msg = fmt.Sprintf("network policy %s in namespace %s, direction %s", e.Name, e.Namespace, e.Direction) + } else { + msg = fmt.Sprintf("network policy %s, direction %s", e.Name, e.Direction) + } + case libovsdbops.NetpolNamespaceOwnerType: + msg = fmt.Sprintf("network policies isolation in namespace %s, direction %s", e.Namespace, e.Direction) + case libovsdbops.EgressFirewallOwnerType: + msg = fmt.Sprintf("egress firewall in namespace %s", e.Namespace) + case libovsdbops.UDNIsolationOwnerType: + msg = fmt.Sprintf("UDN isolation of type %s", e.Name) + } + return fmt.Sprintf("%s by %s", action, msg) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go new file mode 100644 index 000000000..d0135c488 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/bridge.go @@ -0,0 +1,570 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const BridgeTable = "Bridge" + +type ( + BridgeFailMode = string + BridgeProtocols = string +) + +var ( + BridgeFailModeStandalone BridgeFailMode = "standalone" + BridgeFailModeSecure BridgeFailMode = "secure" + BridgeProtocolsOpenflow10 BridgeProtocols = "OpenFlow10" + BridgeProtocolsOpenflow11 BridgeProtocols = "OpenFlow11" + BridgeProtocolsOpenflow12 BridgeProtocols = "OpenFlow12" + BridgeProtocolsOpenflow13 BridgeProtocols = "OpenFlow13" + BridgeProtocolsOpenflow14 BridgeProtocols = "OpenFlow14" + BridgeProtocolsOpenflow15 BridgeProtocols = "OpenFlow15" +) + +// Bridge defines an object in Bridge table +type Bridge struct { + UUID string `ovsdb:"_uuid"` + AutoAttach *string `ovsdb:"auto_attach"` + Controller []string `ovsdb:"controller"` + DatapathID *string `ovsdb:"datapath_id"` + DatapathType string `ovsdb:"datapath_type"` + DatapathVersion string `ovsdb:"datapath_version"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FailMode *BridgeFailMode `ovsdb:"fail_mode"` + FloodVLANs []int `ovsdb:"flood_vlans"` + FlowTables map[int]string `ovsdb:"flow_tables"` + IPFIX *string `ovsdb:"ipfix"` + McastSnoopingEnable bool `ovsdb:"mcast_snooping_enable"` + Mirrors []string `ovsdb:"mirrors"` + Name string `ovsdb:"name"` + Netflow *string `ovsdb:"netflow"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + Protocols []BridgeProtocols `ovsdb:"protocols"` + RSTPEnable bool `ovsdb:"rstp_enable"` + RSTPStatus map[string]string `ovsdb:"rstp_status"` + Sflow *string `ovsdb:"sflow"` + Status map[string]string `ovsdb:"status"` + STPEnable bool `ovsdb:"stp_enable"` +} + +func (a *Bridge) GetUUID() string { + return a.UUID +} + +func (a *Bridge) GetAutoAttach() *string { + return a.AutoAttach +} + +func copyBridgeAutoAttach(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeAutoAttach(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetController() []string { + return a.Controller +} + +func copyBridgeController(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeController(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetDatapathID() *string { + return a.DatapathID +} + +func copyBridgeDatapathID(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeDatapathID(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetDatapathType() string { + return a.DatapathType +} + +func (a *Bridge) GetDatapathVersion() string { + return a.DatapathVersion +} + +func (a *Bridge) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBridgeExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetFailMode() *BridgeFailMode { + return a.FailMode +} + +func copyBridgeFailMode(a *BridgeFailMode) *BridgeFailMode { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeFailMode(a, b *BridgeFailMode) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetFloodVLANs() []int { + return a.FloodVLANs +} + +func copyBridgeFloodVLANs(a []int) []int { + if a == nil { + return nil + } + b := make([]int, len(a)) + copy(b, a) + return b +} + +func equalBridgeFloodVLANs(a, b []int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetFlowTables() map[int]string { + return a.FlowTables +} + +func copyBridgeFlowTables(a map[int]string) map[int]string { + if a == nil { + return nil + } + b := make(map[int]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeFlowTables(a, b map[int]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetIPFIX() *string { + return a.IPFIX +} + +func copyBridgeIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetMcastSnoopingEnable() bool { + return a.McastSnoopingEnable +} + +func (a *Bridge) GetMirrors() []string { + return a.Mirrors +} + +func copyBridgeMirrors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgeMirrors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetName() string { + return a.Name +} + +func (a *Bridge) GetNetflow() *string { + return a.Netflow +} + +func copyBridgeNetflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeNetflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyBridgeOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetPorts() []string { + return a.Ports +} + +func copyBridgePorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalBridgePorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetProtocols() []BridgeProtocols { + return a.Protocols +} + +func copyBridgeProtocols(a []BridgeProtocols) []BridgeProtocols { + if a == nil { + return nil + } + b := make([]BridgeProtocols, len(a)) + copy(b, a) + return b +} + +func equalBridgeProtocols(a, b []BridgeProtocols) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Bridge) GetRSTPEnable() bool { + return a.RSTPEnable +} + +func (a *Bridge) GetRSTPStatus() map[string]string { + return a.RSTPStatus +} + +func copyBridgeRSTPStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeRSTPStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSflow() *string { + return a.Sflow +} + +func copyBridgeSflow(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBridgeSflow(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Bridge) GetStatus() map[string]string { + return a.Status +} + +func copyBridgeStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBridgeStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Bridge) GetSTPEnable() bool { + return a.STPEnable +} + +func (a *Bridge) DeepCopyInto(b *Bridge) { + *b = *a + b.AutoAttach = copyBridgeAutoAttach(a.AutoAttach) + b.Controller = copyBridgeController(a.Controller) + b.DatapathID = copyBridgeDatapathID(a.DatapathID) + b.ExternalIDs = copyBridgeExternalIDs(a.ExternalIDs) + b.FailMode = copyBridgeFailMode(a.FailMode) + b.FloodVLANs = copyBridgeFloodVLANs(a.FloodVLANs) + b.FlowTables = copyBridgeFlowTables(a.FlowTables) + b.IPFIX = copyBridgeIPFIX(a.IPFIX) + b.Mirrors = copyBridgeMirrors(a.Mirrors) + b.Netflow = copyBridgeNetflow(a.Netflow) + b.OtherConfig = copyBridgeOtherConfig(a.OtherConfig) + b.Ports = copyBridgePorts(a.Ports) + b.Protocols = copyBridgeProtocols(a.Protocols) + b.RSTPStatus = copyBridgeRSTPStatus(a.RSTPStatus) + b.Sflow = copyBridgeSflow(a.Sflow) + b.Status = copyBridgeStatus(a.Status) +} + +func (a *Bridge) DeepCopy() *Bridge { + b := new(Bridge) + a.DeepCopyInto(b) + return b +} + +func (a *Bridge) CloneModelInto(b model.Model) { + c := b.(*Bridge) + a.DeepCopyInto(c) +} + +func (a *Bridge) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Bridge) Equals(b *Bridge) bool { + return a.UUID == b.UUID && + equalBridgeAutoAttach(a.AutoAttach, b.AutoAttach) && + equalBridgeController(a.Controller, b.Controller) && + equalBridgeDatapathID(a.DatapathID, b.DatapathID) && + a.DatapathType == b.DatapathType && + a.DatapathVersion == b.DatapathVersion && + equalBridgeExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalBridgeFailMode(a.FailMode, b.FailMode) && + equalBridgeFloodVLANs(a.FloodVLANs, b.FloodVLANs) && + equalBridgeFlowTables(a.FlowTables, b.FlowTables) && + equalBridgeIPFIX(a.IPFIX, b.IPFIX) && + a.McastSnoopingEnable == b.McastSnoopingEnable && + equalBridgeMirrors(a.Mirrors, b.Mirrors) && + a.Name == b.Name && + equalBridgeNetflow(a.Netflow, b.Netflow) && + equalBridgeOtherConfig(a.OtherConfig, b.OtherConfig) && + equalBridgePorts(a.Ports, b.Ports) && + equalBridgeProtocols(a.Protocols, b.Protocols) && + a.RSTPEnable == b.RSTPEnable && + equalBridgeRSTPStatus(a.RSTPStatus, b.RSTPStatus) && + equalBridgeSflow(a.Sflow, b.Sflow) && + equalBridgeStatus(a.Status, b.Status) && + a.STPEnable == b.STPEnable +} + +func (a *Bridge) EqualsModel(b model.Model) bool { + c := b.(*Bridge) + return a.Equals(c) +} + +var _ model.CloneableModel = &Bridge{} +var _ model.ComparableModel = &Bridge{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go new file mode 100644 index 000000000..57a26e805 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go @@ -0,0 +1,143 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" + +// FlowSampleCollectorSet defines an object in Flow_Sample_Collector_Set table +type FlowSampleCollectorSet struct { + UUID string `ovsdb:"_uuid"` + Bridge string `ovsdb:"bridge"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + IPFIX *string `ovsdb:"ipfix"` + LocalGroupID *int `ovsdb:"local_group_id"` +} + +func (a *FlowSampleCollectorSet) GetUUID() string { + return a.UUID +} + +func (a *FlowSampleCollectorSet) GetBridge() string { + return a.Bridge +} + +func (a *FlowSampleCollectorSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyFlowSampleCollectorSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalFlowSampleCollectorSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *FlowSampleCollectorSet) GetID() int { + return a.ID +} + +func (a *FlowSampleCollectorSet) GetIPFIX() *string { + return a.IPFIX +} + +func copyFlowSampleCollectorSetIPFIX(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetIPFIX(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) GetLocalGroupID() *int { + return a.LocalGroupID +} + +func copyFlowSampleCollectorSetLocalGroupID(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalFlowSampleCollectorSetLocalGroupID(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *FlowSampleCollectorSet) DeepCopyInto(b *FlowSampleCollectorSet) { + *b = *a + b.ExternalIDs = copyFlowSampleCollectorSetExternalIDs(a.ExternalIDs) + b.IPFIX = copyFlowSampleCollectorSetIPFIX(a.IPFIX) + b.LocalGroupID = copyFlowSampleCollectorSetLocalGroupID(a.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) DeepCopy() *FlowSampleCollectorSet { + b := new(FlowSampleCollectorSet) + a.DeepCopyInto(b) + return b +} + +func (a *FlowSampleCollectorSet) CloneModelInto(b model.Model) { + c := b.(*FlowSampleCollectorSet) + a.DeepCopyInto(c) +} + +func (a *FlowSampleCollectorSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FlowSampleCollectorSet) Equals(b *FlowSampleCollectorSet) bool { + return a.UUID == b.UUID && + a.Bridge == b.Bridge && + equalFlowSampleCollectorSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + equalFlowSampleCollectorSetIPFIX(a.IPFIX, b.IPFIX) && + equalFlowSampleCollectorSetLocalGroupID(a.LocalGroupID, b.LocalGroupID) +} + +func (a *FlowSampleCollectorSet) EqualsModel(b model.Model) bool { + c := b.(*FlowSampleCollectorSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &FlowSampleCollectorSet{} +var _ model.ComparableModel = &FlowSampleCollectorSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go new file mode 100644 index 000000000..c5aabca46 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/gen.go @@ -0,0 +1,3 @@ +package ovsdb + +//go:generate modelgen --extended -p ovsdb -o . vswitch.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go new file mode 100644 index 000000000..7ba2329e3 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb/observ_model.go @@ -0,0 +1,11 @@ +package ovsdb + +import "github.com/ovn-org/libovsdb/model" + +// ObservDatabaseModel returns the DatabaseModel object to be used by observability library. +func ObservDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("Open_vSwitch", map[string]model.Model{ + "Bridge": &Bridge{}, + "Flow_Sample_Collector_Set": &FlowSampleCollectorSet{}, + }) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go new file mode 100644 index 000000000..5ff1587a6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/db_client.go @@ -0,0 +1,118 @@ +package sampledecoder + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "k8s.io/klog/v2/textlogger" +) + +const OVSDBTimeout = 10 * time.Second + +func NewNBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := nbdb.FullDatabaseModel() + if err != nil { + return nil, err + } + + // define client indexes for ACLs to quickly find them by sample_new or sample_est column. + dbModel.SetIndexes(map[string][]model.ClientIndex{ + nbdb.ACLTable: { + {Columns: []model.ColumnKey{{Column: "sample_new"}}}, + {Columns: []model.ColumnKey{{Column: "sample_est"}}}, + }, + }) + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&nbdb.ACL{}), + client.WithTable(&nbdb.Sample{}), + ), + ) + + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +func NewOVSDBClientWithConfig(ctx context.Context, cfg dbConfig) (client.Client, error) { + dbModel, err := ovsdb.ObservDatabaseModel() + if err != nil { + return nil, err + } + + c, err := newClient(cfg, dbModel) + if err != nil { + return nil, err + } + + _, err = c.Monitor(ctx, + c.NewMonitor( + client.WithTable(&ovsdb.FlowSampleCollectorSet{}), + client.WithTable(&ovsdb.Bridge{}), + ), + ) + if err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// newClient creates a new client object given the provided config +// the stopCh is required to ensure the goroutine for ssl cert +// update is not leaked +func newClient(cfg dbConfig, dbModel model.ClientDBModel) (client.Client, error) { + const connectTimeout = OVSDBTimeout * 2 + const inactivityTimeout = OVSDBTimeout * 18 + // Don't log anything from the libovsdb client by default + config := textlogger.NewConfig(textlogger.Verbosity(0)) + logger := textlogger.NewLogger(config) + + options := []client.Option{ + // Reading and parsing the DB after reconnect at scale can (unsurprisingly) + // take longer than a normal ovsdb operation. Give it a bit more time, so + // we don't time out and enter a reconnect loop. In addition, it also enables + // inactivity check on the ovsdb connection. + client.WithInactivityCheck(inactivityTimeout, connectTimeout, &backoff.ZeroBackOff{}), + client.WithLeaderOnly(true), + client.WithLogger(&logger), + } + + for _, endpoint := range strings.Split(cfg.address, ",") { + options = append(options, client.WithEndpoint(endpoint)) + } + if cfg.scheme != "unix" { + return nil, fmt.Errorf("only unix scheme is supported for now") + } + + client, err := client.NewOVSDBClient(dbModel, options...) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), connectTimeout) + defer cancel() + err = client.Connect(ctx) + if err != nil { + return nil, err + } + + return client, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go new file mode 100644 index 000000000..d691fd9cc --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder/sample_decoder.go @@ -0,0 +1,293 @@ +package sampledecoder + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "strings" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model" + "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" +) + +type SampleDecoder struct { + nbClient client.Client + ovsdbClient client.Client + cleanupCollectors []int +} + +type dbConfig struct { + address string + scheme string +} + +type Cookie struct { + ObsDomainID uint32 + ObsPointID uint32 +} + +const CookieSize = 8 +const bridgeName = "br-int" + +var SampleEndian = getEndian() + +func getEndian() binary.ByteOrder { + // Use network bite order + return binary.BigEndian +} + +// getLocalNBClient only supports connecting to nbdb via unix socket. +// address is the path to the unix socket, e.g. "/var/run/ovn/ovnnb_db.sock" +func getLocalNBClient(ctx context.Context, address string) (client.Client, error) { + config := dbConfig{ + address: "unix:" + address, + scheme: "unix", + } + libovsdbOvnNBClient, err := NewNBClientWithConfig(ctx, config) + if err != nil { + return nil, fmt.Errorf("error creating libovsdb client: %w ", err) + } + return libovsdbOvnNBClient, nil +} + +func getLocalOVSDBClient(ctx context.Context) (client.Client, error) { + config := dbConfig{ + address: "unix:/var/run/openvswitch/db.sock", + scheme: "unix", + } + return NewOVSDBClientWithConfig(ctx, config) +} + +// NewSampleDecoderWithDefaultCollector creates a new SampleDecoder, initializes the OVSDB client and adds the default collector. +// It allows to set the groupID and ownerName for the created default collector. +// If the default collector already exists with a different owner or different groupID an error will be returned. +// Shutdown should be called to clean up the collector from the OVSDB. +func NewSampleDecoderWithDefaultCollector(ctx context.Context, nbdbSocketPath string, ownerName string, groupID int) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + ovsdbClient, err := getLocalOVSDBClient(ctx) + if err != nil { + return nil, err + } + decoder := &SampleDecoder{ + nbClient: nbClient, + ovsdbClient: ovsdbClient, + } + err = decoder.AddCollector(observability.DefaultObservabilityCollectorSetID, groupID, ownerName) + if err != nil { + return nil, err + } + decoder.cleanupCollectors = append(decoder.cleanupCollectors, observability.DefaultObservabilityCollectorSetID) + return decoder, nil +} + +// NewSampleDecoder creates a new SampleDecoder and initializes the OVSDB client. +func NewSampleDecoder(ctx context.Context, nbdbSocketPath string) (*SampleDecoder, error) { + nbClient, err := getLocalNBClient(ctx, nbdbSocketPath) + if err != nil { + return nil, err + } + return &SampleDecoder{ + nbClient: nbClient, + }, nil +} + +func (d *SampleDecoder) Shutdown() { + for _, collectorID := range d.cleanupCollectors { + err := d.DeleteCollector(collectorID) + if err != nil { + fmt.Printf("Error deleting collector with ID=%d: %v", collectorID, err) + } + } +} + +func getObservAppID(obsDomainID uint32) uint8 { + return uint8(obsDomainID >> 24) +} + +// findACLBySample relies on the client index based on sample_new and sample_est column. +func findACLBySample(nbClient client.Client, acl *nbdb.ACL) ([]*nbdb.ACL, error) { + found := []*nbdb.ACL{} + err := nbClient.Where(acl).List(context.Background(), &found) + return found, err +} + +func (d *SampleDecoder) DecodeCookieIDs(obsDomainID, obsPointID uint32) (model.NetworkEvent, error) { + // Find sample using obsPointID + sample, err := libovsdbops.FindSample(d.nbClient, int(obsPointID)) + if err != nil || sample == nil { + return nil, fmt.Errorf("find sample failed: %w", err) + } + // find db object using observ application ID + // Since ACL is indexed both by sample_new and sample_est, when searching by one of them, + // we need to make sure the other one will not match. + // nil is a valid index value, therefore we have to use non-existing UUID. + wrongUUID := "wrongUUID" + var dbObj interface{} + switch getObservAppID(obsDomainID) { + case observability.ACLNewTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &sample.UUID, SampleEst: &wrongUUID}) + if err != nil { + return nil, fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + case observability.ACLEstTrafficSamplingID: + acls, err := findACLBySample(d.nbClient, &nbdb.ACL{SampleNew: &wrongUUID, SampleEst: &sample.UUID}) + if err != nil { + return nil, fmt.Errorf("find acl for sample failed: %w", err) + } + if len(acls) != 1 { + return nil, fmt.Errorf("expected 1 ACL, got %d", len(acls)) + } + dbObj = acls[0] + default: + return nil, fmt.Errorf("unknown app ID: %d", getObservAppID(obsDomainID)) + } + var event model.NetworkEvent + switch o := dbObj.(type) { + case *nbdb.ACL: + event, err = newACLEvent(o) + if err != nil { + return nil, fmt.Errorf("failed to build ACL network event: %w", err) + } + } + if event == nil { + return nil, fmt.Errorf("failed to build network event for db object %v", dbObj) + } + return event, nil +} + +func newACLEvent(o *nbdb.ACL) (*model.ACLEvent, error) { + actor := o.ExternalIDs[libovsdbops.OwnerTypeKey.String()] + event := model.ACLEvent{ + Action: o.Action, + Actor: actor, + } + switch actor { + case libovsdbops.NetworkPolicyOwnerType: + objName := o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + nsname := strings.SplitN(objName, ":", 2) + if len(nsname) == 2 { + event.Namespace = nsname[0] + event.Name = nsname[1] + } else { + return nil, fmt.Errorf("expected format namespace:name for Object Name, but found: %s", objName) + } + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.AdminNetworkPolicyOwnerType, libovsdbops.BaselineAdminNetworkPolicyOwnerType: + event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.MulticastNamespaceOwnerType, libovsdbops.NetpolNamespaceOwnerType: + event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.MulticastClusterOwnerType: + event.Direction = o.ExternalIDs[libovsdbops.PolicyDirectionKey.String()] + case libovsdbops.EgressFirewallOwnerType: + event.Namespace = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + event.Direction = "Egress" + case libovsdbops.UDNIsolationOwnerType: + event.Name = o.ExternalIDs[libovsdbops.ObjectNameKey.String()] + case libovsdbops.NetpolNodeOwnerType: + event.Direction = "Ingress" + } + return &event, nil +} + +func (d *SampleDecoder) DecodeCookieBytes(cookie []byte) (model.NetworkEvent, error) { + if uint64(len(cookie)) != CookieSize { + return nil, fmt.Errorf("invalid cookie size: %d", len(cookie)) + } + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie), SampleEndian, &c) + if err != nil { + return nil, err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func (d *SampleDecoder) DecodeCookie8Bytes(cookie [8]byte) (model.NetworkEvent, error) { + c := Cookie{} + err := binary.Read(bytes.NewReader(cookie[:]), SampleEndian, &c) + if err != nil { + return nil, err + } + return d.DecodeCookieIDs(c.ObsDomainID, c.ObsPointID) +} + +func getGroupID(groupID *int) string { + if groupID == nil { + return "unset" + } + return fmt.Sprintf("%d", *groupID) +} + +func (d *SampleDecoder) AddCollector(collectorID, groupID int, ownerName string) error { + if d.ovsdbClient == nil { + return fmt.Errorf("OVSDB client is not initialized") + } + // find existing collector with the same ID + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding existing collector: %w", err) + } + if len(collectors) > 0 && (collectors[0].ExternalIDs["owner"] != ownerName || + collectors[0].LocalGroupID == nil || *collectors[0].LocalGroupID != groupID) { + return fmt.Errorf("requested collector with id=%v already exists "+ + "with the external_ids=%+v, local_group_id=%v", collectorID, collectors[0].ExternalIDs["owner"], getGroupID(collectors[0].LocalGroupID)) + } + + // find br-int UUID to attach collector + bridges := []*ovsdb.Bridge{} + err = d.ovsdbClient.WhereCache(func(item *ovsdb.Bridge) bool { + return item.Name == bridgeName + }).List(context.Background(), &bridges) + if err != nil || len(bridges) != 1 { + return fmt.Errorf("failed finding br-int: %w", err) + } + + ops, err := d.ovsdbClient.Create(&ovsdb.FlowSampleCollectorSet{ + ID: collectorID, + Bridge: bridges[0].UUID, + LocalGroupID: &groupID, + ExternalIDs: map[string]string{"owner": ownerName}, + }) + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + _, err = d.ovsdbClient.Transact(context.Background(), ops...) + return err +} + +func (d *SampleDecoder) DeleteCollector(collectorID int) error { + collectors := []*ovsdb.FlowSampleCollectorSet{} + err := d.ovsdbClient.WhereCache(func(item *ovsdb.FlowSampleCollectorSet) bool { + return item.ID == collectorID + }).List(context.Background(), &collectors) + if err != nil { + return fmt.Errorf("failed finding exisiting collector: %w", err) + } + if len(collectors) != 1 { + return fmt.Errorf("expected only 1 collector with given id") + } + + ops, err := d.ovsdbClient.Where(collectors[0]).Delete() + if err != nil { + return fmt.Errorf("failed creating collector: %w", err) + } + res, err := d.ovsdbClient.Transact(context.Background(), ops...) + fmt.Println("res: ", res) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go new file mode 100644 index 000000000..88b8f6a83 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types/types.go @@ -0,0 +1,97 @@ +package types + +import ( + "net" + + "github.com/containernetworking/cni/pkg/types" +) + +// NetConf is CNI NetConf with DeviceID +type NetConf struct { + types.NetConf + // Role is valid only on L3 / L2 topologies. Not on localnet. + // It allows for using this network to be either secondary or + // primary user defined network for the pod. + // primary user defined networks are used in order to achieve + // native network isolation. + // In order to ensure backwards compatibility, if empty the + // network is considered secondary + Role string `json:"role,omitempty"` + // specifies the OVN topology for this network configuration + // when not specified, by default it is Layer3AttachDefTopoType + Topology string `json:"topology,omitempty"` + // captures net-attach-def name in the form of namespace/name + NADName string `json:"netAttachDefName,omitempty"` + // Network MTU + MTU int `json:"mtu,omitempty"` + // comma-seperated subnet cidr + // for secondary layer3 network, eg. 10.128.0.0/14/23 + // for layer2 and localnet network, eg. 10.1.130.0/24 + Subnets string `json:"subnets,omitempty"` + // comma-seperated list of IPs, expressed in the form of subnets, to be excluded from being allocated for Pod + // valid for layer2 and localnet network topology + // eg. "10.1.130.0/27, 10.1.130.122/32" + ExcludeSubnets string `json:"excludeSubnets,omitempty"` + // join subnet cidr is required for supporting + // services and ingress for user defined networks + // in case of dualstack cluster, please do a comma-seperated list + // expected format: + // 1) V4 single stack: "v4CIDR" (eg: "100.65.0.0/16") + // 2) V6 single stack: "v6CIDR" (eg: "fd99::/64") + // 3) dualstack: "v4CIDR,v6CIDR" (eg: "100.65.0.0/16,fd99::/64") + // valid for UDN layer3/layer2 network topology + // default value: 100.65.0.0/16,fd99::/64 if not provided + JoinSubnet string `json:"joinSubnet,omitempty"` + // VLANID, valid in localnet topology network only + VLANID int `json:"vlanID,omitempty"` + // AllowPersistentIPs is valid on both localnet / layer topologies. + // It allows for having IP allocations that outlive the pod for which + // they are originally created - e.g. a KubeVirt VM's migration, or + // restart. + AllowPersistentIPs bool `json:"allowPersistentIPs,omitempty"` + + // PhysicalNetworkName indicates the name of the physical network to which + // the OVN overlay will connect. Only applies to `localnet` topologies. + // When omitted, the physical network name of the network will be the value + // of the `name` attribute. + // This attribute allows multiple overlays to share the same physical + // network mapping in the hosts. + PhysicalNetworkName string `json:"physicalNetworkName,omitempty"` + + // PciAddrs in case of using sriov or Auxiliry device name in case of SF + DeviceID string `json:"deviceID,omitempty"` + // LogFile to log all the messages from cni shim binary to + LogFile string `json:"logFile,omitempty"` + // Level is the logging verbosity level + LogLevel string `json:"logLevel,omitempty"` + // LogFileMaxSize is the maximum size in bytes of the logfile + // before it gets rolled. + LogFileMaxSize int `json:"logfile-maxsize"` + // LogFileMaxBackups represents the maximum number of + // old log files to retain + LogFileMaxBackups int `json:"logfile-maxbackups"` + // LogFileMaxAge represents the maximum number + // of days to retain old log files + LogFileMaxAge int `json:"logfile-maxage"` + // Runtime arguments passed by the NPWG implementation (e.g. multus) + RuntimeConfig struct { + // see https://github.com/k8snetworkplumbingwg/device-info-spec + CNIDeviceInfoFile string `json:"CNIDeviceInfoFile,omitempty"` + } `json:"runtimeConfig,omitempty"` +} + +// NetworkSelectionElement represents one element of the JSON format +// Network Attachment Selection Annotation as described in section 4.1.2 +// of the CRD specification. +type NetworkSelectionElement struct { + // Name contains the name of the Network object this element selects + Name string `json:"name"` + // Namespace contains the optional namespace that the network referenced + // by Name exists in + Namespace string `json:"namespace,omitempty"` + // MacRequest contains an optional requested MAC address for this + // network attachment + MacRequest string `json:"mac,omitempty"` + // GatewayRequest contains default route IP address for the pod + GatewayRequest []net.IP `json:"default-route,omitempty"` +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go new file mode 100644 index 000000000..3d935c5c6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/cni.go @@ -0,0 +1,173 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" + + ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" + ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +var ErrorAttachDefNotOvnManaged = errors.New("net-attach-def not managed by OVN") +var ErrorChainingNotSupported = errors.New("CNI plugin chaining is not supported") + +// WriteCNIConfig writes a CNI JSON config file to directory given by global config +// if the file doesn't already exist, or is different than the content that would +// be written. +func WriteCNIConfig() error { + netConf := &ovncnitypes.NetConf{ + NetConf: types.NetConf{ + CNIVersion: "0.4.0", + Name: "ovn-kubernetes", + Type: CNI.Plugin, + }, + LogFile: Logging.CNIFile, + LogLevel: fmt.Sprintf("%d", Logging.Level), + LogFileMaxSize: Logging.LogFileMaxSize, + LogFileMaxBackups: Logging.LogFileMaxBackups, + LogFileMaxAge: Logging.LogFileMaxAge, + } + + newBytes, err := json.Marshal(netConf) + if err != nil { + return fmt.Errorf("failed to marshal CNI config JSON: %v", err) + } + + confFile := filepath.Join(CNI.ConfDir, CNIConfFileName) + if existingBytes, err := os.ReadFile(confFile); err == nil { + if bytes.Equal(newBytes, existingBytes) { + // No changes; do nothing + return nil + } + } + + // Install the CNI config file after all initialization is done + // MkdirAll() returns no error if the path already exists + if err := os.MkdirAll(CNI.ConfDir, os.ModeDir); err != nil { + return err + } + + var f *os.File + f, err = os.CreateTemp(CNI.ConfDir, "ovnkube-") + if err != nil { + return err + } + + if _, err := f.Write(newBytes); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + + return os.Rename(f.Name(), confFile) +} + +// ParseNetConf parses config in NAD spec +func ParseNetConf(bytes []byte) (*ovncnitypes.NetConf, error) { + var netconf *ovncnitypes.NetConf + + confList, err := libcni.ConfListFromBytes(bytes) + if err == nil { + netconf, err = parseNetConfList(confList) + if err == nil { + if _, singleErr := parseNetConfSingle(bytes); singleErr == nil { + return nil, fmt.Errorf("CNI config cannot have both a plugin list and a single config") + } + } + } else { + netconf, err = parseNetConfSingle(bytes) + } + + if err != nil { + return nil, err + } + + if netconf.Topology == "" { + // NAD of default network + netconf.Name = ovntypes.DefaultNetworkName + } + + return netconf, nil +} + +func parseNetConfSingle(bytes []byte) (*ovncnitypes.NetConf, error) { + netconf := &ovncnitypes.NetConf{MTU: Default.MTU} + err := json.Unmarshal(bytes, &netconf) + if err != nil { + return nil, err + } + + // skip non-OVN NAD + if netconf.Type != "ovn-k8s-cni-overlay" { + return nil, ErrorAttachDefNotOvnManaged + } + + err = ValidateNetConfNameFields(netconf) + if err != nil { + return nil, err + } + + return netconf, nil +} + +func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, error) { + if len(confList.Plugins) > 1 { + return nil, ErrorChainingNotSupported + } + + netconf := &ovncnitypes.NetConf{MTU: Default.MTU} + if err := json.Unmarshal(confList.Plugins[0].Bytes, netconf); err != nil { + return nil, err + } + + // skip non-OVN NAD + if netconf.Type != "ovn-k8s-cni-overlay" { + return nil, ErrorAttachDefNotOvnManaged + } + + netconf.Name = confList.Name + netconf.CNIVersion = confList.CNIVersion + + if err := ValidateNetConfNameFields(netconf); err != nil { + return nil, err + } + + return netconf, nil +} + +func ValidateNetConfNameFields(netconf *ovncnitypes.NetConf) error { + if netconf.Topology != "" { + if netconf.NADName == "" { + return fmt.Errorf("missing NADName in secondary network netconf %s", netconf.Name) + } + // "ovn-kubernetes" network name is reserved for later + if netconf.Name == "" || netconf.Name == ovntypes.DefaultNetworkName || netconf.Name == "ovn-kubernetes" { + return fmt.Errorf("invalid name in in secondary network netconf (%s)", netconf.Name) + } + } + + return nil +} + +// ReadCNIConfig unmarshals a CNI JSON config into an NetConf structure +func ReadCNIConfig(bytes []byte) (*ovncnitypes.NetConf, error) { + conf, err := ParseNetConf(bytes) + if err != nil { + return nil, err + } + if conf.RawPrevResult != nil { + if err := version.ParsePrevResult(&conf.NetConf); err != nil { + return nil, err + } + } + return conf, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go new file mode 100644 index 000000000..3b129624b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/config.go @@ -0,0 +1,2714 @@ +package config + +import ( + "flag" + "fmt" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/urfave/cli/v2" + gcfg "gopkg.in/gcfg.v1" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + kexec "k8s.io/utils/exec" + utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +// DefaultEncapPort number used if not supplied +const DefaultEncapPort = 6081 + +const DefaultAPIServer = "http://localhost:8443" + +// Default IANA-assigned UDP port number for VXLAN +const DefaultVXLANPort = 4789 + +const DefaultDBTxnTimeout = time.Second * 100 + +// The following are global config parameters that other modules may access directly +var ( + // Build information. Populated at build-time. + // commit ID used to build ovn-kubernetes + Commit = "" + // branch used to build ovn-kubernetes + Branch = "" + // ovn-kubernetes build user + BuildUser = "" + // ovn-kubernetes build date + BuildDate = "" + // ovn-kubernetes version, to be changed with every release + Version = "1.0.0" + // version of the go runtime used to compile ovn-kubernetes + GoVersion = runtime.Version() + // os and architecture used to build ovn-kubernetes + OSArch = fmt.Sprintf("%s %s", runtime.GOOS, runtime.GOARCH) + + // ovn-kubernetes cni config file name + CNIConfFileName = "10-ovn-kubernetes.conf" + + // Default holds parsed config file parameters and command-line overrides + Default = DefaultConfig{ + MTU: 1400, + ConntrackZone: 64000, + EncapType: "geneve", + EncapIP: "", + EncapPort: DefaultEncapPort, + InactivityProbe: 100000, // in Milliseconds + OpenFlowProbe: 180, // in Seconds + OfctrlWaitBeforeClear: 0, // in Milliseconds + MonitorAll: true, + OVSDBTxnTimeout: DefaultDBTxnTimeout, + LFlowCacheEnable: true, + RawClusterSubnets: "10.128.0.0/14/23", + Zone: types.OvnDefaultZone, + RawUDNAllowedDefaultServices: "default/kubernetes,kube-system/kube-dns", + } + + // Logging holds logging-related parsed config file parameters and command-line overrides + Logging = LoggingConfig{ + File: "", // do not log to a file by default + CNIFile: "", + LibovsdbFile: "", + Level: 4, + LogFileMaxSize: 100, // Size in Megabytes + LogFileMaxBackups: 5, + LogFileMaxAge: 5, //days + ACLLoggingRateLimit: 20, + } + + // Monitoring holds monitoring-related parsed config file parameters and command-line overrides + Monitoring = MonitoringConfig{ + RawNetFlowTargets: "", + RawSFlowTargets: "", + RawIPFIXTargets: "", + } + + // IPFIX holds IPFIX-related performance configuration options. It requires that the + // IPFIXTargets value of the Monitoring section contains at least one endpoint. + IPFIX = IPFIXConfig{ + Sampling: 400, + CacheActiveTimeout: 60, + CacheMaxFlows: 0, + } + + // CNI holds CNI-related parsed config file parameters and command-line overrides + CNI = CNIConfig{ + ConfDir: "/etc/cni/net.d", + Plugin: "ovn-k8s-cni-overlay", + } + + // Kubernetes holds Kubernetes-related parsed config file parameters and command-line overrides + Kubernetes = KubernetesConfig{ + APIServer: DefaultAPIServer, + RawServiceCIDRs: "172.16.1.0/24", + OVNConfigNamespace: "ovn-kubernetes", + HostNetworkNamespace: "", + DisableRequestedChassis: false, + PlatformType: "", + DNSServiceNamespace: "kube-system", + DNSServiceName: "kube-dns", + // By default, use a short lifetime length for certificates to ensure that the automatic rotation works well, + // might revisit in the future to use a more sensible value + CertDuration: 10 * time.Minute, + } + + // Metrics holds Prometheus metrics-related parameters. + Metrics MetricsConfig + + // OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides + OVNKubernetesFeature = OVNKubernetesFeatureConfig{ + EgressIPReachabiltyTotalTimeout: 1, + } + + // OvnNorth holds northbound OVN database client and server authentication and location details + OvnNorth OvnAuthConfig + + // OvnSouth holds southbound OVN database client and server authentication and location details + OvnSouth OvnAuthConfig + + // Gateway holds node gateway-related parsed config file parameters and command-line overrides + Gateway = GatewayConfig{ + V4JoinSubnet: "100.64.0.0/16", + V6JoinSubnet: "fd98::/64", + V4MasqueradeSubnet: "169.254.169.0/29", + V6MasqueradeSubnet: "fd69::/125", + MasqueradeIPs: MasqueradeIPsConfig{ + V4OVNMasqueradeIP: net.ParseIP("169.254.169.1"), + V6OVNMasqueradeIP: net.ParseIP("fd69::1"), + V4HostMasqueradeIP: net.ParseIP("169.254.169.2"), + V6HostMasqueradeIP: net.ParseIP("fd69::2"), + V4HostETPLocalMasqueradeIP: net.ParseIP("169.254.169.3"), + V6HostETPLocalMasqueradeIP: net.ParseIP("fd69::3"), + V4DummyNextHopMasqueradeIP: net.ParseIP("169.254.169.4"), + V6DummyNextHopMasqueradeIP: net.ParseIP("fd69::4"), + V4OVNServiceHairpinMasqueradeIP: net.ParseIP("169.254.169.5"), + V6OVNServiceHairpinMasqueradeIP: net.ParseIP("fd69::5"), + }, + } + + // Set Leaderelection config values based on + // https://github.com/openshift/enhancements/blame/84e894ead7b188a1013556e0ba6973b8463995f1/CONVENTIONS.md#L183 + + // MasterHA holds master HA related config options. + MasterHA = HAConfig{ + ElectionRetryPeriod: 26, + ElectionRenewDeadline: 107, + ElectionLeaseDuration: 137, + } + + // ClusterMgrHA holds cluster manager HA related config options. + ClusterMgrHA = HAConfig{ + ElectionRetryPeriod: 26, + ElectionRenewDeadline: 107, + ElectionLeaseDuration: 137, + } + + // HybridOverlay holds hybrid overlay feature config options. + HybridOverlay = HybridOverlayConfig{ + VXLANPort: DefaultVXLANPort, + } + + // UnprivilegedMode allows ovnkube-node to run without SYS_ADMIN capability, by performing interface setup in the CNI plugin + UnprivilegedMode bool + + // EnableMulticast enables multicast support between the pods within the same namespace + EnableMulticast bool + + // IPv4Mode captures whether we are using IPv4 for OVN logical topology. (ie, single-stack IPv4 or dual-stack) + IPv4Mode bool + + // IPv6Mode captures whether we are using IPv6 for OVN logical topology. (ie, single-stack IPv6 or dual-stack) + IPv6Mode bool + + // OvnKubeNode holds ovnkube-node parsed config file parameters and command-line overrides + OvnKubeNode = OvnKubeNodeConfig{ + Mode: types.NodeModeFull, + } + + ClusterManager = ClusterManagerConfig{ + V4TransitSwitchSubnet: "100.88.0.0/16", + V6TransitSwitchSubnet: "fd97::/64", + } +) + +const ( + kubeServiceAccountPath string = "/var/run/secrets/kubernetes.io/serviceaccount/" + kubeServiceAccountFileToken string = "token" + kubeServiceAccountFileCACert string = "ca.crt" +) + +// DefaultConfig holds parsed config file parameters and command-line overrides +type DefaultConfig struct { + // MTU value used for the overlay networks. + MTU int `gcfg:"mtu"` + // RoutableMTU is the maximum routable MTU between nodes, used to facilitate + // an MTU migration procedure where different nodes might be using different + // MTU values + RoutableMTU int `gcfg:"routable-mtu"` + // ConntrackZone affects only the gateway nodes, This value is used to track connections + // that are initiated from the pods so that the reverse connections go back to the pods. + // This represents the conntrack zone used for the conntrack flow rules. + ConntrackZone int `gcfg:"conntrack-zone"` + // HostMasqConntrackZone is an unexposed config with the value of ConntrackZone+1 + HostMasqConntrackZone int + // OVNMasqConntrackZone is an unexposed config with the value of ConntrackZone+2 + OVNMasqConntrackZone int + // HostNodePortCTZone is an unexposed config with the value of ConntrackZone+3 + HostNodePortConntrackZone int + // ReassemblyConntrackZone is an unexposed config with the value of ConntrackZone+4 + ReassemblyConntrackZone int + // EncapType value defines the encapsulation protocol to use to transmit packets between + // hypervisors. By default the value is 'geneve' + EncapType string `gcfg:"encap-type"` + // The IP address of the encapsulation endpoint. If not specified, the IP address the + // NodeName resolves to will be used + EncapIP string `gcfg:"encap-ip"` + // The UDP Port of the encapsulation endpoint. If not specified, the IP default port + // of 6081 will be used + EncapPort uint `gcfg:"encap-port"` + // Maximum number of milliseconds of idle time on connection that + // ovn-controller waits before it will send a connection health probe. + InactivityProbe int `gcfg:"inactivity-probe"` + // Maximum number of seconds of idle time on the OpenFlow connection + // that ovn-controller will wait before it sends a connection health probe + OpenFlowProbe int `gcfg:"openflow-probe"` + // Maximum number of milliseconds that ovn-controller waits before clearing existing flows + // during start up, to make sure the initial flow compute is complete and avoid data plane + // interruptions. + OfctrlWaitBeforeClear int `gcfg:"ofctrl-wait-before-clear"` + // The boolean flag indicates if ovn-controller should monitor all data in SB DB + // instead of conditionally monitoring the data relevant to this node only. + // By default monitor-all is enabled. + MonitorAll bool `gcfg:"monitor-all"` + // OVSDBTxnTimeout is the timeout for db transaction, may be useful to increase for high-scale clusters. + // default value is 100 seconds. + OVSDBTxnTimeout time.Duration `gcfg:"db-txn-timeout"` + // The boolean flag indicates if ovn-controller should + // enable/disable the logical flow in-memory cache it uses + // when processing Southbound database logical flow changes. + // By default caching is enabled. + LFlowCacheEnable bool `gcfg:"enable-lflow-cache"` + // Maximum number of logical flow cache entries ovn-controller + // may create when the logical flow cache is enabled. By + // default the size of the cache is unlimited. + LFlowCacheLimit uint `gcfg:"lflow-cache-limit"` + // Maximum number of logical flow cache entries ovn-controller + // may create when the logical flow cache is enabled. By + // default the size of the cache is unlimited. + LFlowCacheLimitKb uint `gcfg:"lflow-cache-limit-kb"` + // RawClusterSubnets holds the unparsed cluster subnets. Should only be + // used inside config module. + RawClusterSubnets string `gcfg:"cluster-subnets"` + // ClusterSubnets holds parsed cluster subnet entries and may be used + // outside the config module. + ClusterSubnets []CIDRNetworkEntry + // EnableUDPAggregation is true if ovn-kubernetes should use UDP Generic Receive + // Offload forwarding to improve the performance of containers that transmit lots + // of small UDP packets by allowing them to be aggregated before passing through + // the kernel network stack. This requires a new-enough kernel (5.15 or RHEL 8.5). + EnableUDPAggregation bool `gcfg:"enable-udp-aggregation"` + + // Zone name to which ovnkube-node/ovnkube-controller belongs to + Zone string `gcfg:"zone"` + + // RawUDNAllowedDefaultServices holds the unparsed UDNAllowedDefaultServices. Should only be + // used inside config module. + RawUDNAllowedDefaultServices string `gcfg:"udn-allowed-default-services"` + + // UDNAllowedDefaultServices holds a list of namespaced names of + // default cluster network services accessible from primary user-defined networks + UDNAllowedDefaultServices []string +} + +// LoggingConfig holds logging-related parsed config file parameters and command-line overrides +type LoggingConfig struct { + // File is the path of the file to log to + File string `gcfg:"logfile"` + // CNIFile is the path of the file for the CNI shim to log to + CNIFile string `gcfg:"cnilogfile"` + // LibovsdbFile is the path of the file for the libovsdb client to log to + LibovsdbFile string `gcfg:"libovsdblogfile"` + // Level is the logging verbosity level + Level int `gcfg:"loglevel"` + // LogFileMaxSize is the maximum size in megabytes of the logfile + // before it gets rolled. + LogFileMaxSize int `gcfg:"logfile-maxsize"` + // LogFileMaxBackups represents the the maximum number of old log files to retain + LogFileMaxBackups int `gcfg:"logfile-maxbackups"` + // LogFileMaxAge represents the maximum number of days to retain old log files + LogFileMaxAge int `gcfg:"logfile-maxage"` + // Logging rate-limiting meter + ACLLoggingRateLimit int `gcfg:"acl-logging-rate-limit"` +} + +// MonitoringConfig holds monitoring-related parsed config file parameters and command-line overrides +type MonitoringConfig struct { + // RawNetFlowTargets holds the unparsed NetFlow targets. Should only be used inside the config module. + RawNetFlowTargets string `gcfg:"netflow-targets"` + // RawSFlowTargets holds the unparsed SFlow targets. Should only be used inside the config module. + RawSFlowTargets string `gcfg:"sflow-targets"` + // RawIPFIXTargets holds the unparsed IPFIX targets. Should only be used inside the config module. + RawIPFIXTargets string `gcfg:"ipfix-targets"` + // NetFlowTargets holds the parsed NetFlow targets and may be used outside the config module. + NetFlowTargets []HostPort + // SFlowTargets holds the parsed SFlow targets and may be used outside the config module. + SFlowTargets []HostPort + // IPFIXTargets holds the parsed IPFIX targets and may be used outside the config module. + IPFIXTargets []HostPort +} + +// IPFIXConfig holds IPFIX-related performance configuration options. It requires that the ipfix-targets +// value of the [monitoring] section contains at least one endpoint. +type IPFIXConfig struct { + // Sampling is an optional integer in range 1 to 4,294,967,295. It holds the rate at which + // packets should be sampled and sent to each target collector. If not specified, defaults to + // 400, which means one out of 400 packets, on average, will be sent to each target collector. + Sampling uint `gcfg:"sampling"` + // CacheActiveTimeout is an optional integer in range 0 to 4,200. It holds the maximum period in + // seconds for which an IPFIX flow record is cached and aggregated before being sent. If not + // specified, defaults to 60. If 0, caching is disabled. + CacheActiveTimeout uint `gcfg:"cache-active-timeout"` + // CacheMaxFlows is an optional integer in range 0 to 4,294,967,295. It holds the maximum number + // of IPFIX flow records that can be cached at a time. If not specified in OVS, defaults to 0 + // (however, this controller defaults it to 60). If 0, caching is disabled. + CacheMaxFlows uint `gcfg:"cache-max-flows"` +} + +// CNIConfig holds CNI-related parsed config file parameters and command-line overrides +type CNIConfig struct { + // ConfDir specifies the CNI config directory in which to write the overlay CNI config file + ConfDir string `gcfg:"conf-dir"` + // Plugin specifies the name of the CNI plugin + Plugin string `gcfg:"plugin"` +} + +// KubernetesConfig holds Kubernetes-related parsed config file parameters and command-line overrides +type KubernetesConfig struct { + BootstrapKubeconfig string `gcfg:"bootstrap-kubeconfig"` + CertDir string `gcfg:"cert-dir"` + CertDuration time.Duration `gcfg:"cert-duration"` + Kubeconfig string `gcfg:"kubeconfig"` + CACert string `gcfg:"cacert"` + CAData []byte + APIServer string `gcfg:"apiserver"` + Token string `gcfg:"token"` + TokenFile string `gcfg:"tokenFile"` + CompatServiceCIDR string `gcfg:"service-cidr"` + RawServiceCIDRs string `gcfg:"service-cidrs"` + ServiceCIDRs []*net.IPNet + OVNConfigNamespace string `gcfg:"ovn-config-namespace"` + OVNEmptyLbEvents bool `gcfg:"ovn-empty-lb-events"` + PodIP string `gcfg:"pod-ip"` // UNUSED + RawNoHostSubnetNodes string `gcfg:"no-hostsubnet-nodes"` + NoHostSubnetNodes labels.Selector + HostNetworkNamespace string `gcfg:"host-network-namespace"` + DisableRequestedChassis bool `gcfg:"disable-requestedchassis"` + PlatformType string `gcfg:"platform-type"` + HealthzBindAddress string `gcfg:"healthz-bind-address"` + + // CompatMetricsBindAddress is overridden by the corresponding option in MetricsConfig + CompatMetricsBindAddress string `gcfg:"metrics-bind-address"` + // CompatOVNMetricsBindAddress is overridden by the corresponding option in MetricsConfig + CompatOVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"` + // CompatMetricsEnablePprof is overridden by the corresponding option in MetricsConfig + CompatMetricsEnablePprof bool `gcfg:"metrics-enable-pprof"` + + DNSServiceNamespace string `gcfg:"dns-service-namespace"` + DNSServiceName string `gcfg:"dns-service-name"` +} + +// MetricsConfig holds Prometheus metrics-related parameters. +type MetricsConfig struct { + BindAddress string `gcfg:"bind-address"` + OVNMetricsBindAddress string `gcfg:"ovn-metrics-bind-address"` + ExportOVSMetrics bool `gcfg:"export-ovs-metrics"` + EnablePprof bool `gcfg:"enable-pprof"` + NodeServerPrivKey string `gcfg:"node-server-privkey"` + NodeServerCert string `gcfg:"node-server-cert"` + // EnableConfigDuration holds the boolean flag to enable OVN-Kubernetes master to monitor OVN-Kubernetes master + // configuration duration and optionally, its application to all nodes + EnableConfigDuration bool `gcfg:"enable-config-duration"` + EnableScaleMetrics bool `gcfg:"enable-scale-metrics"` +} + +// OVNKubernetesFeatureConfig holds OVN-Kubernetes feature enhancement config file parameters and command-line overrides +type OVNKubernetesFeatureConfig struct { + // Admin Network Policy feature is enabled + EnableAdminNetworkPolicy bool `gcfg:"enable-admin-network-policy"` + // EgressIP feature is enabled + EnableEgressIP bool `gcfg:"enable-egress-ip"` + // EgressIP node reachability total timeout in seconds + EgressIPReachabiltyTotalTimeout int `gcfg:"egressip-reachability-total-timeout"` + EnableEgressFirewall bool `gcfg:"enable-egress-firewall"` + EnableEgressQoS bool `gcfg:"enable-egress-qos"` + EnableEgressService bool `gcfg:"enable-egress-service"` + EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"` + EnableMultiNetwork bool `gcfg:"enable-multi-network"` + EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` + EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` + EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` + EnableInterconnect bool `gcfg:"enable-interconnect"` + EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` + EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` + EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` + EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableObservability bool `gcfg:"enable-observability"` +} + +// GatewayMode holds the node gateway mode +type GatewayMode string + +const ( + // GatewayModeDisabled indicates the node gateway mode is disabled + GatewayModeDisabled GatewayMode = "" + // GatewayModeShared indicates OVN shares a gateway interface with the node + GatewayModeShared GatewayMode = "shared" + // GatewayModeLocal indicates OVN creates a local NAT-ed interface for the gateway + GatewayModeLocal GatewayMode = "local" +) + +// GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides +type GatewayConfig struct { + // Mode is the gateway mode; if may be either empty (disabled), "shared", or "local" + Mode GatewayMode `gcfg:"mode"` + // Interface is the network interface to use for the gateway in "shared" mode + Interface string `gcfg:"interface"` + // Exgress gateway interface is the optional network interface to use for external gw pods traffic. + EgressGWInterface string `gcfg:"egw-interface"` + // NextHop is the gateway IP address of Interface; will be autodetected if not given + NextHop string `gcfg:"next-hop"` + // VLANID is the option VLAN tag to apply to gateway traffic for "shared" mode + VLANID uint `gcfg:"vlan-id"` + // NodeportEnable sets whether to provide Kubernetes NodePort service or not + NodeportEnable bool `gcfg:"nodeport"` + // DisableSNATMultipleGws sets whether to disable SNAT of egress traffic in namespaces annotated with routing-external-gws + DisableSNATMultipleGWs bool `gcfg:"disable-snat-multiple-gws"` + // V4JoinSubnet to be used in the cluster + V4JoinSubnet string `gcfg:"v4-join-subnet"` + // V6JoinSubnet to be used in the cluster + V6JoinSubnet string `gcfg:"v6-join-subnet"` + // V4MasqueradeSubnet to be used in the cluster + V4MasqueradeSubnet string `gcfg:"v4-masquerade-subnet"` + // V6MasqueradeSubnet to be used in the cluster + V6MasqueradeSubnet string `gcfg:"v6-masquerade-subnet"` + // MasqueradeIps to be allocated from the masquerade subnets to enable host to service traffic + MasqueradeIPs MasqueradeIPsConfig + + // DisablePacketMTUCheck disables adding openflow flows to check packets too large to be + // delivered to OVN due to pod MTU being lower than NIC MTU. Disabling this check will result in southbound packets + // exceeding pod MTU to be dropped by OVN. With this check enabled, ICMP needs frag/packet too big will be sent + // back to the original client + DisablePacketMTUCheck bool `gcfg:"disable-pkt-mtu-check"` + // RouterSubnet is the subnet to be used for the GR external port. auto-detected if not given. + // Must match the the kube node IP address. Currently valid for DPU only. + RouterSubnet string `gcfg:"router-subnet"` + // SingeNode indicates the cluster has only one node + SingleNode bool `gcfg:"single-node"` + // DisableForwarding (enabled by default) controls if forwarding is allowed on OVNK controlled interfaces + DisableForwarding bool `gcfg:"disable-forwarding"` + // AllowNoUplink (disabled by default) controls if the external gateway bridge without an uplink port is allowed in local gateway mode. + AllowNoUplink bool `gcfg:"allow-no-uplink"` +} + +// OvnAuthConfig holds client authentication and location details for +// an OVN database (either northbound or southbound) +type OvnAuthConfig struct { + // e.g: "ssl:192.168.1.2:6641,ssl:192.168.1.2:6642" + Address string `gcfg:"address"` + PrivKey string `gcfg:"client-privkey"` + Cert string `gcfg:"client-cert"` + CACert string `gcfg:"client-cacert"` + CertCommonName string `gcfg:"cert-common-name"` + Scheme OvnDBScheme + ElectionTimer uint `gcfg:"election-timer"` + northbound bool + + exec kexec.Interface +} + +// HAConfig holds configuration for HA +// configuration. +type HAConfig struct { + ElectionLeaseDuration int `gcfg:"election-lease-duration"` + ElectionRenewDeadline int `gcfg:"election-renew-deadline"` + ElectionRetryPeriod int `gcfg:"election-retry-period"` +} + +// HybridOverlayConfig holds configuration for hybrid overlay +// configuration. +type HybridOverlayConfig struct { + // Enabled indicates whether hybrid overlay features are enabled or not. + Enabled bool `gcfg:"enabled"` + // RawClusterSubnets holds the unparsed hybrid overlay cluster subnets. + // Should only be used inside config module. + RawClusterSubnets string `gcfg:"cluster-subnets"` + // ClusterSubnets holds parsed hybrid overlay cluster subnet entries and + // may be used outside the config module. + ClusterSubnets []CIDRNetworkEntry + // VXLANPort holds the VXLAN tunnel UDP port number. + VXLANPort uint `gcfg:"hybrid-overlay-vxlan-port"` +} + +// OvnKubeNodeConfig holds ovnkube-node configurations +type OvnKubeNodeConfig struct { + Mode string `gcfg:"mode"` + DPResourceDeviceIdsMap map[string][]string + MgmtPortNetdev string `gcfg:"mgmt-port-netdev"` + MgmtPortDPResourceName string `gcfg:"mgmt-port-dp-resource-name"` +} + +// ClusterManagerConfig holds configuration for ovnkube-cluster-manager +type ClusterManagerConfig struct { + // V4TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones + V4TransitSwitchSubnet string `gcfg:"v4-transit-switch-subnet"` + // V6TransitSwitchSubnet to be used in the cluster for interconnecting multiple zones + V6TransitSwitchSubnet string `gcfg:"v6-transit-switch-subnet"` +} + +// OvnDBScheme describes the OVN database connection transport method +type OvnDBScheme string + +const ( + // OvnDBSchemeSSL specifies SSL as the OVN database transport method + OvnDBSchemeSSL OvnDBScheme = "ssl" + // OvnDBSchemeTCP specifies TCP as the OVN database transport method + OvnDBSchemeTCP OvnDBScheme = "tcp" + // OvnDBSchemeUnix specifies Unix domains sockets as the OVN database transport method + OvnDBSchemeUnix OvnDBScheme = "unix" +) + +// Config is used to read the structured config file and to cache config in testcases +type config struct { + Default DefaultConfig + Logging LoggingConfig + Monitoring MonitoringConfig + IPFIX IPFIXConfig + CNI CNIConfig + OVNKubernetesFeature OVNKubernetesFeatureConfig + Kubernetes KubernetesConfig + Metrics MetricsConfig + OvnNorth OvnAuthConfig + OvnSouth OvnAuthConfig + Gateway GatewayConfig + MasterHA HAConfig + ClusterMgrHA HAConfig + HybridOverlay HybridOverlayConfig + OvnKubeNode OvnKubeNodeConfig + ClusterManager ClusterManagerConfig +} + +var ( + savedDefault DefaultConfig + savedLogging LoggingConfig + savedMonitoring MonitoringConfig + savedIPFIX IPFIXConfig + savedCNI CNIConfig + savedOVNKubernetesFeature OVNKubernetesFeatureConfig + savedKubernetes KubernetesConfig + savedMetrics MetricsConfig + savedOvnNorth OvnAuthConfig + savedOvnSouth OvnAuthConfig + savedGateway GatewayConfig + savedMasterHA HAConfig + savedClusterMgrHA HAConfig + savedHybridOverlay HybridOverlayConfig + savedOvnKubeNode OvnKubeNodeConfig + savedClusterManager ClusterManagerConfig + + // legacy service-cluster-ip-range CLI option + serviceClusterIPRange string + // legacy cluster-subnet CLI option + clusterSubnet string + // legacy init-gateways CLI option + initGateways bool + // legacy gateway-local CLI option + gatewayLocal bool + // legacy disable-ovn-iface-id-ver CLI option + disableOVNIfaceIDVer bool +) + +func init() { + // Cache original default config values + savedDefault = Default + savedLogging = Logging + savedMonitoring = Monitoring + savedIPFIX = IPFIX + savedCNI = CNI + savedOVNKubernetesFeature = OVNKubernetesFeature + savedKubernetes = Kubernetes + savedMetrics = Metrics + savedOvnNorth = OvnNorth + savedOvnSouth = OvnSouth + savedGateway = Gateway + savedMasterHA = MasterHA + savedClusterMgrHA = ClusterMgrHA + savedHybridOverlay = HybridOverlay + savedOvnKubeNode = OvnKubeNode + savedClusterManager = ClusterManager + cli.VersionPrinter = func(c *cli.Context) { + fmt.Printf("Version: %s\n", Version) + fmt.Printf("Git commit: %s\n", Commit) + fmt.Printf("Git branch: %s\n", Branch) + fmt.Printf("Go version: %s\n", GoVersion) + fmt.Printf("Build date: %s\n", BuildDate) + fmt.Printf("OS/Arch: %s\n", OSArch) + } + Flags = GetFlags([]cli.Flag{}) +} + +// PrepareTestConfig restores default config values. Used by testcases to +// provide a pristine environment between tests. +func PrepareTestConfig() error { + Default = savedDefault + Logging = savedLogging + Logging.Level = 5 + Monitoring = savedMonitoring + IPFIX = savedIPFIX + CNI = savedCNI + OVNKubernetesFeature = savedOVNKubernetesFeature + Kubernetes = savedKubernetes + Metrics = savedMetrics + OvnNorth = savedOvnNorth + OvnSouth = savedOvnSouth + Gateway = savedGateway + MasterHA = savedMasterHA + HybridOverlay = savedHybridOverlay + OvnKubeNode = savedOvnKubeNode + ClusterManager = savedClusterManager + Kubernetes.DisableRequestedChassis = false + EnableMulticast = false + Default.OVSDBTxnTimeout = 5 * time.Second + + if err := completeConfig(); err != nil { + return err + } + + // Don't pick up defaults from the environment + os.Unsetenv("KUBECONFIG") + os.Unsetenv("K8S_CACERT") + os.Unsetenv("K8S_APISERVER") + os.Unsetenv("K8S_TOKEN") + os.Unsetenv("K8S_TOKEN_FILE") + + return nil +} + +// copy members of struct 'src' into the corresponding field in struct 'dst' +// if the field in 'src' is a non-zero int or a non-zero-length string and +// does not contain a default value. This function should be called with pointers to structs. +func overrideFields(dst, src, defaults interface{}) error { + dstStruct := reflect.ValueOf(dst).Elem() + srcStruct := reflect.ValueOf(src).Elem() + if dstStruct.Kind() != srcStruct.Kind() || dstStruct.Kind() != reflect.Struct { + return fmt.Errorf("mismatched value types") + } + if dstStruct.NumField() != srcStruct.NumField() { + return fmt.Errorf("mismatched struct types") + } + + var defStruct reflect.Value + if defaults != nil { + defStruct = reflect.ValueOf(defaults).Elem() + } + // Iterate over each field in dst/src Type so we can get the tags, + // and use the field name to retrieve the field's actual value from + // the dst/src instance + var handled bool + dstType := reflect.TypeOf(dst).Elem() + for i := 0; i < dstType.NumField(); i++ { + structField := dstType.Field(i) + // Ignore private internal fields; we only care about overriding + // 'gcfg' tagged fields read from CLI or the config file + if _, ok := structField.Tag.Lookup("gcfg"); !ok { + continue + } + handled = true + + dstField := dstStruct.FieldByName(structField.Name) + srcField := srcStruct.FieldByName(structField.Name) + var dv reflect.Value + if defStruct.IsValid() { + dv = defStruct.FieldByName(structField.Name) + } + if !dstField.IsValid() || !srcField.IsValid() { + return fmt.Errorf("invalid struct %q field %q", dstType.Name(), structField.Name) + } + if dstField.Kind() != srcField.Kind() { + return fmt.Errorf("mismatched struct %q fields %q", dstType.Name(), structField.Name) + } + if dv.IsValid() && reflect.DeepEqual(dv.Interface(), srcField.Interface()) { + continue + } + dstField.Set(srcField) + } + if !handled { + // No tags found in the struct so we don't know how to override + return fmt.Errorf("failed to find 'gcfg' tags in struct %q", dstType.Name()) + } + + return nil +} + +var cliConfig config + +// CommonFlags capture general options. +var CommonFlags = []cli.Flag{ + // Mode flags + &cli.StringFlag{ + Name: "init-master", + Usage: "initialize master (both cluster-manager and ovnkube-controller), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-cluster-manager", + Usage: "initialize cluster manager (but not ovnkube-controller), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-ovnkube-controller", + Usage: "initialize ovnkube-controller (but not cluster-manager), requires the hostname as argument", + }, + &cli.StringFlag{ + Name: "init-node", + Usage: "initialize node, requires the name that node is registered with in kubernetes cluster", + }, + &cli.StringFlag{ + Name: "cleanup-node", + Usage: "cleanup node, requires the name that node is registered with in kubernetes cluster", + }, + &cli.StringFlag{ + Name: "pidfile", + Usage: "Name of file that will hold the ovnkube pid (optional)", + }, + &cli.StringFlag{ + Name: "config-file", + Usage: "configuration file path (default: /etc/openvswitch/ovn_k8s.conf)", + //Value: "/etc/openvswitch/ovn_k8s.conf", + }, + &cli.IntFlag{ + Name: "mtu", + Usage: "MTU value used for the overlay networks (default: 1400)", + Destination: &cliConfig.Default.MTU, + Value: Default.MTU, + }, + &cli.IntFlag{ + Name: "routable-mtu", + Usage: "Maximum routable MTU between nodes, used to facilitate an MTU migration procedure where different nodes might be using different MTU values", + Destination: &cliConfig.Default.RoutableMTU, + }, + &cli.IntFlag{ + Name: "conntrack-zone", + Usage: "For gateway nodes, the conntrack zone used for conntrack flow rules (default: 64000)", + Destination: &cliConfig.Default.ConntrackZone, + Value: Default.ConntrackZone, + }, + &cli.StringFlag{ + Name: "encap-type", + Usage: "The encapsulation protocol to use to transmit packets between hypervisors (default: geneve)", + Destination: &cliConfig.Default.EncapType, + Value: Default.EncapType, + }, + &cli.StringFlag{ + Name: "encap-ip", + Usage: "The IP address of the encapsulation endpoint (default: Node IP address resolved from Node hostname)", + Destination: &cliConfig.Default.EncapIP, + }, + &cli.UintFlag{ + Name: "encap-port", + Usage: "The UDP port used by the encapsulation endpoint (default: 6081)", + Destination: &cliConfig.Default.EncapPort, + Value: Default.EncapPort, + }, + &cli.IntFlag{ + Name: "inactivity-probe", + Usage: "Maximum number of milliseconds of idle time on " + + "connection for ovn-controller before it sends a inactivity probe", + Destination: &cliConfig.Default.InactivityProbe, + Value: Default.InactivityProbe, + }, + &cli.IntFlag{ + Name: "openflow-probe", + Usage: "Maximum number of seconds of idle time on the openflow " + + "connection for ovn-controller before it sends a inactivity probe", + Destination: &cliConfig.Default.OpenFlowProbe, + Value: Default.OpenFlowProbe, + }, + &cli.IntFlag{ + Name: "ofctrl-wait-before-clear", + Usage: "Maximum number of milliseconds that ovn-controller waits before " + + "clearing existing flows during start up, to make sure the initial flow " + + "compute is complete and avoid data plane interruptions.", + Destination: &cliConfig.Default.OfctrlWaitBeforeClear, + Value: Default.OfctrlWaitBeforeClear, + }, + &cli.BoolFlag{ + Name: "monitor-all", + Usage: "Enable monitoring all data from SB DB instead of conditionally " + + "monitoring the data relevant to this node only. " + + "By default it is enabled.", + Destination: &cliConfig.Default.MonitorAll, + Value: Default.MonitorAll, + }, + &cli.DurationFlag{ + Name: "db-txn-timeout", + Usage: "OVSDBTxnTimeout is the timeout for db transaction in seconds, " + + "may be useful to increase for high-scale clusters. default value is 60 seconds.", + Destination: &cliConfig.Default.OVSDBTxnTimeout, + Value: Default.OVSDBTxnTimeout, + }, + &cli.BoolFlag{ + Name: "enable-lflow-cache", + Usage: "Enable the logical flow in-memory cache it uses " + + "when processing Southbound database logical flow changes. " + + "By default caching is enabled.", + Destination: &cliConfig.Default.LFlowCacheEnable, + Value: Default.LFlowCacheEnable, + }, + &cli.UintFlag{ + Name: "lflow-cache-limit", + Usage: "Maximum number of logical flow cache entries ovn-controller " + + "may create when the logical flow cache is enabled. By " + + "default the size of the cache is unlimited.", + Destination: &cliConfig.Default.LFlowCacheLimit, + Value: Default.LFlowCacheLimit, + }, + &cli.UintFlag{ + Name: "lflow-cache-limit-kb", + Usage: "Maximum size of the logical flow cache ovn-controller " + + "may create when the logical flow cache is enabled. By " + + "default the size of the cache is unlimited.", + Destination: &cliConfig.Default.LFlowCacheLimitKb, + Value: Default.LFlowCacheLimitKb, + }, + &cli.StringFlag{ + Name: "cluster-subnet", + Usage: "Deprecated alias for cluster-subnets.", + Destination: &clusterSubnet, + }, + &cli.StringFlag{ + Name: "cluster-subnets", + Value: Default.RawClusterSubnets, + Usage: "A comma separated set of IP subnets and the associated " + + "hostsubnet prefix lengths to use for the cluster (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " + + "Each entry is given in the form [IP address/prefix-length/hostsubnet-prefix-length] " + + "and cannot overlap with other entries. The hostsubnet-prefix-length " + + "defines how large a subnet is given to each node and may be different " + + "for each entry. For IPv6 subnets, it must be 64 (and does not need to " + + "be explicitly specified). For IPv4 subnets an explicit " + + "hostsubnet-prefix should be specified, but for backward compatibility " + + "it defaults to 24 if unspecified.", + Destination: &cliConfig.Default.RawClusterSubnets, + }, + &cli.BoolFlag{ + Name: "unprivileged-mode", + Usage: "Run ovnkube-node container in unprivileged mode. Valid only with --init-node option.", + Destination: &UnprivilegedMode, + }, + &cli.BoolFlag{ + Name: "enable-multicast", + Usage: "Adds multicast support. Valid only with --init-master option.", + Destination: &EnableMulticast, + }, + // Logging options + &cli.IntFlag{ + Name: "loglevel", + Usage: "log verbosity and level: info, warn, fatal, error are always printed no matter the log level. Use 5 for debug (default: 4)", + Destination: &cliConfig.Logging.Level, + Value: Logging.Level, + }, + &cli.StringFlag{ + Name: "logfile", + Usage: "path of a file to direct log output to", + Destination: &cliConfig.Logging.File, + }, + &cli.StringFlag{ + Name: "cnilogfile", + Usage: "path of a file to direct log from cni shim to output to (default: /var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log)", + Destination: &cliConfig.Logging.CNIFile, + Value: "/var/log/ovn-kubernetes/ovn-k8s-cni-overlay.log", + }, + &cli.StringFlag{ + Name: "libovsdblogfile", + Usage: "path of a file to direct log from libovsdb client to output to (default is to use same as --logfile)", + Destination: &cliConfig.Logging.LibovsdbFile, + }, + // Logfile rotation parameters + &cli.IntFlag{ + Name: "logfile-maxsize", + Usage: "Maximum size in bytes of the log file before it gets rolled", + Destination: &cliConfig.Logging.LogFileMaxSize, + Value: Logging.LogFileMaxSize, + }, + &cli.IntFlag{ + Name: "logfile-maxbackups", + Usage: "Maximum number of old log files to retain", + Destination: &cliConfig.Logging.LogFileMaxBackups, + Value: Logging.LogFileMaxBackups, + }, + &cli.IntFlag{ + Name: "logfile-maxage", + Usage: "Maximum number of days to retain old log files", + Destination: &cliConfig.Logging.LogFileMaxAge, + Value: Logging.LogFileMaxAge, + }, + &cli.IntFlag{ + Name: "acl-logging-rate-limit", + Usage: "The largest number of messages per second that gets logged before drop (default 20)", + Destination: &cliConfig.Logging.ACLLoggingRateLimit, + Value: 20, + }, + &cli.StringFlag{ + Name: "zone", + Usage: "zone name to which ovnkube-node/ovnkube-controller belongs to", + Value: Default.Zone, + Destination: &cliConfig.Default.Zone, + }, + &cli.StringFlag{ + Name: "udn-allowed-default-services", + Usage: "a list of namespaced names of default cluster network services accessible from primary" + + "user-defined networks. If not specified defaults to [\"default/kubernetes\", \"kube-system/kube-dns\"]." + + "Only used when enable-network-segmentation is set", + Value: Default.RawUDNAllowedDefaultServices, + Destination: &cliConfig.Default.RawUDNAllowedDefaultServices, + }, +} + +// MonitoringFlags capture monitoring-related options +var MonitoringFlags = []cli.Flag{ + // Monitoring options + &cli.StringFlag{ + Name: "netflow-targets", + Value: Monitoring.RawNetFlowTargets, + Usage: "A comma separated set of NetFlow collectors to export flow data (eg, \"10.128.0.150:2056,10.0.0.151:2056\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawNetFlowTargets, + }, + &cli.StringFlag{ + Name: "sflow-targets", + Value: Monitoring.RawSFlowTargets, + Usage: "A comma separated set of SFlow collectors to export flow data (eg, \"10.128.0.150:6343,10.0.0.151:6343\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawSFlowTargets, + }, + &cli.StringFlag{ + Name: "ipfix-targets", + Value: Monitoring.RawIPFIXTargets, + Usage: "A comma separated set of IPFIX collectors to export flow data (eg, \"10.128.0.150:2055,10.0.0.151:2055\")." + + "Each entry is given in the form [IP address:port] or [:port]. If only port is provided, it uses the Node IP", + Destination: &cliConfig.Monitoring.RawIPFIXTargets, + }, +} + +// IPFIXFlags capture IPFIX-related options +var IPFIXFlags = []cli.Flag{ + &cli.UintFlag{ + Name: "ipfix-sampling", + Usage: "Rate at which packets should be sampled and sent to each target collector (default: 400)", + Destination: &cliConfig.IPFIX.Sampling, + Value: IPFIX.Sampling, + }, + &cli.UintFlag{ + Name: "ipfix-cache-max-flows", + Usage: "Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled (default: 0)", + Destination: &cliConfig.IPFIX.CacheMaxFlows, + Value: IPFIX.CacheMaxFlows, + }, &cli.UintFlag{ + Name: "ipfix-cache-active-timeout", + Usage: "Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled (default: 60)", + Destination: &cliConfig.IPFIX.CacheActiveTimeout, + Value: IPFIX.CacheActiveTimeout, + }, +} + +// CNIFlags capture CNI-related options +var CNIFlags = []cli.Flag{ + // CNI options + &cli.StringFlag{ + Name: "cni-conf-dir", + Usage: "the CNI config directory in which to write the overlay CNI config file (default: /etc/cni/net.d)", + Destination: &cliConfig.CNI.ConfDir, + Value: CNI.ConfDir, + }, + &cli.StringFlag{ + Name: "cni-plugin", + Usage: "the name of the CNI plugin (default: ovn-k8s-cni-overlay)", + Destination: &cliConfig.CNI.Plugin, + Value: CNI.Plugin, + }, +} + +// OVNK8sFeatureFlags capture OVN-Kubernetes feature related options +var OVNK8sFeatureFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: "enable-admin-network-policy", + Usage: "Configure to use Admin Network Policy CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableAdminNetworkPolicy, + Value: OVNKubernetesFeature.EnableAdminNetworkPolicy, + }, + &cli.BoolFlag{ + Name: "enable-egress-ip", + Usage: "Configure to use EgressIP CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressIP, + Value: OVNKubernetesFeature.EnableEgressIP, + }, + &cli.IntFlag{ + Name: "egressip-reachability-total-timeout", + Usage: "EgressIP node reachability total timeout in seconds (default: 1)", + Destination: &cliConfig.OVNKubernetesFeature.EgressIPReachabiltyTotalTimeout, + Value: 1, + }, + &cli.BoolFlag{ + Name: "enable-egress-firewall", + Usage: "Configure to use EgressFirewall CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressFirewall, + Value: OVNKubernetesFeature.EnableEgressFirewall, + }, + &cli.BoolFlag{ + Name: "enable-egress-qos", + Usage: "Configure to use EgressQoS CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressQoS, + Value: OVNKubernetesFeature.EnableEgressQoS, + }, + &cli.IntFlag{ + Name: "egressip-node-healthcheck-port", + Usage: "Configure EgressIP node reachability using gRPC on this TCP port.", + Destination: &cliConfig.OVNKubernetesFeature.EgressIPNodeHealthCheckPort, + }, + &cli.BoolFlag{ + Name: "enable-multi-network", + Usage: "Configure to use multiple NetworkAttachmentDefinition CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetwork, + Value: OVNKubernetesFeature.EnableMultiNetwork, + }, + &cli.BoolFlag{ + Name: "enable-multi-networkpolicy", + Usage: "Configure to use MultiNetworkPolicy CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy, + Value: OVNKubernetesFeature.EnableMultiNetworkPolicy, + }, + &cli.BoolFlag{ + Name: "enable-network-segmentation", + Usage: "Configure to use network segmentation feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation, + Value: OVNKubernetesFeature.EnableNetworkSegmentation, + }, + &cli.BoolFlag{ + Name: "enable-stateless-netpol", + Usage: "Configure to use stateless network policy feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableStatelessNetPol, + Value: OVNKubernetesFeature.EnableStatelessNetPol, + }, + &cli.BoolFlag{ + Name: "enable-interconnect", + Usage: "Configure to enable interconnecting multiple zones.", + Destination: &cliConfig.OVNKubernetesFeature.EnableInterconnect, + Value: OVNKubernetesFeature.EnableInterconnect, + }, + &cli.BoolFlag{ + Name: "enable-egress-service", + Usage: "Configure to use EgressService CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableEgressService, + Value: OVNKubernetesFeature.EnableEgressService, + }, + &cli.BoolFlag{ + Name: "enable-multi-external-gateway", + Usage: "Configure to use AdminPolicyBasedExternalRoute CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableMultiExternalGateway, + Value: OVNKubernetesFeature.EnableMultiExternalGateway, + }, + &cli.BoolFlag{ + Name: "enable-persistent-ips", + Usage: "Configure to use the persistent ips feature for virtualization with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnablePersistentIPs, + Value: OVNKubernetesFeature.EnablePersistentIPs, + }, + &cli.BoolFlag{ + Name: "enable-dns-name-resolver", + Usage: "Configure to use DNSNameResolver CRD feature with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableDNSNameResolver, + Value: OVNKubernetesFeature.EnableDNSNameResolver, + }, + &cli.BoolFlag{ + Name: "enable-svc-template-support", + Usage: "Configure to use svc-template with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableServiceTemplateSupport, + Value: OVNKubernetesFeature.EnableServiceTemplateSupport, + }, + &cli.BoolFlag{ + Name: "enable-observability", + Usage: "Configure to use OVN sampling with ovn-kubernetes.", + Destination: &cliConfig.OVNKubernetesFeature.EnableObservability, + Value: OVNKubernetesFeature.EnableObservability, + }, +} + +// K8sFlags capture Kubernetes-related options +var K8sFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "service-cluster-ip-range", + Usage: "Deprecated alias for k8s-service-cidrs.", + Destination: &serviceClusterIPRange, + }, + &cli.StringFlag{ + Name: "k8s-service-cidr", + Usage: "Deprecated alias for k8s-service-cidrs.", + Destination: &cliConfig.Kubernetes.CompatServiceCIDR, + }, + &cli.StringFlag{ + Name: "k8s-service-cidrs", + Usage: "A comma-separated set of CIDR notation IP ranges from which k8s assigns " + + "service cluster IPs. This should be the same as the value " + + "provided for kube-apiserver \"--service-cluster-ip-range\" " + + "option. (default: 172.16.1.0/24)", + Destination: &cliConfig.Kubernetes.RawServiceCIDRs, + Value: Kubernetes.RawServiceCIDRs, + }, + &cli.StringFlag{ + Name: "k8s-kubeconfig", + Usage: "absolute path to the Kubernetes kubeconfig file (not required if the --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)", + Destination: &cliConfig.Kubernetes.Kubeconfig, + }, + &cli.StringFlag{ + Name: "bootstrap-kubeconfig", + Usage: "absolute path to the Kubernetes kubeconfig file that is used to create the initial, per node, client certificates (should only be used together with 'cert-dir')", + Destination: &cliConfig.Kubernetes.BootstrapKubeconfig, + }, + &cli.StringFlag{ + Name: "k8s-apiserver", + Usage: "URL of the Kubernetes API server (not required if --k8s-kubeconfig is given) (default: http://localhost:8443)", + Destination: &cliConfig.Kubernetes.APIServer, + Value: Kubernetes.APIServer, + }, + &cli.StringFlag{ + Name: "cert-dir", + Usage: "absolute path to the directory of the client key and certificate (not required if --k8s-kubeconfig or --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)", + Destination: &cliConfig.Kubernetes.CertDir, + }, + &cli.DurationFlag{ + Name: "cert-duration", + Usage: "requested certificate duration, default: 10min", + Destination: &cliConfig.Kubernetes.CertDuration, + Value: Kubernetes.CertDuration, + }, + &cli.StringFlag{ + Name: "k8s-cacert", + Usage: "the absolute path to the Kubernetes API CA certificate (not required if --k8s-kubeconfig is given)", + Destination: &cliConfig.Kubernetes.CACert, + }, + &cli.StringFlag{ + Name: "k8s-token", + Usage: "the Kubernetes API authentication token (not required if --k8s-kubeconfig is given)", + Destination: &cliConfig.Kubernetes.Token, + }, + &cli.StringFlag{ + Name: "k8s-token-file", + Usage: "the path to Kubernetes API token. If set, it is periodically read and takes precedence over k8s-token", + Destination: &cliConfig.Kubernetes.TokenFile, + }, + &cli.StringFlag{ + Name: "ovn-config-namespace", + Usage: "specify a namespace which will contain services to config the OVN databases", + Destination: &cliConfig.Kubernetes.OVNConfigNamespace, + Value: Kubernetes.OVNConfigNamespace, + }, + &cli.BoolFlag{ + Name: "ovn-empty-lb-events", + Usage: "If set, then load balancers do not get deleted when all backends are removed. " + + "Instead, ovn-kubernetes monitors the OVN southbound database for empty lb backends " + + "controller events. If one arrives, then a NeedPods event is sent so that Kubernetes " + + "will spin up pods for the load balancer to send traffic to.", + Destination: &cliConfig.Kubernetes.OVNEmptyLbEvents, + }, + &cli.StringFlag{ + Name: "pod-ip", + Usage: "UNUSED", + }, + &cli.StringFlag{ + Name: "no-hostsubnet-nodes", + Usage: "Specify a label for nodes that will manage their own hostsubnets", + Destination: &cliConfig.Kubernetes.RawNoHostSubnetNodes, + }, + &cli.StringFlag{ + Name: "host-network-namespace", + Usage: "specify a namespace which will be used to classify host network traffic for network policy", + Destination: &cliConfig.Kubernetes.HostNetworkNamespace, + Value: Kubernetes.HostNetworkNamespace, + }, + &cli.BoolFlag{ + Name: "disable-requestedchassis", + Usage: "If set to true, requested-chassis option will not be set during pod creation", + Destination: &cliConfig.Kubernetes.DisableRequestedChassis, + Value: Kubernetes.DisableRequestedChassis, + }, + &cli.StringFlag{ + Name: "platform-type", + Usage: "The cloud provider platform type ovn-kubernetes is deployed on. " + + "Valid values can be found in: https://github.com/ovn-org/ovn-kubernetes/blob/master/go-controller/vendor/github.com/openshift/api/config/v1/types_infrastructure.go#L130-L172", + Destination: &cliConfig.Kubernetes.PlatformType, + Value: Kubernetes.PlatformType, + }, + &cli.StringFlag{ + Name: "healthz-bind-address", + Usage: "The IP address and port for the node proxy healthz server to serve on (set to '0.0.0.0:10256' or '[::]:10256' for listening in all interfaces and IP families). Disabled by default.", + Destination: &cliConfig.Kubernetes.HealthzBindAddress, + }, + &cli.StringFlag{ + Name: "dns-service-namespace", + Usage: "DNS kubernetes service namespace used to expose name resolving to live migratable vms.", + Destination: &cliConfig.Kubernetes.DNSServiceNamespace, + Value: Kubernetes.DNSServiceNamespace, + }, + &cli.StringFlag{ + Name: "dns-service-name", + Usage: "DNS kubernetes service name used to expose name resolving to live migratable vms.", + Destination: &cliConfig.Kubernetes.DNSServiceName, + Value: Kubernetes.DNSServiceName, + }, +} + +// MetricsFlags capture metrics-related options +var MetricsFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "metrics-bind-address", + Usage: "The IP address and port for the OVN K8s metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)", + Destination: &cliConfig.Metrics.BindAddress, + }, + &cli.StringFlag{ + Name: "ovn-metrics-bind-address", + Usage: "The IP address and port for the OVN metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces)", + Destination: &cliConfig.Metrics.OVNMetricsBindAddress, + }, + &cli.BoolFlag{ + Name: "export-ovs-metrics", + Usage: "When true exports OVS metrics from the OVN metrics server", + Destination: &cliConfig.Metrics.ExportOVSMetrics, + }, + &cli.BoolFlag{ + Name: "metrics-enable-pprof", + Usage: "If true, then also accept pprof requests on the metrics port.", + Destination: &cliConfig.Metrics.EnablePprof, + Value: Metrics.EnablePprof, + }, + &cli.StringFlag{ + Name: "node-server-privkey", + Usage: "Private key that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerPrivKey, + }, + &cli.StringFlag{ + Name: "node-server-cert", + Usage: "Certificate that the OVN node K8s metrics server uses to serve metrics over TLS.", + Destination: &cliConfig.Metrics.NodeServerCert, + }, + &cli.BoolFlag{ + Name: "metrics-enable-config-duration", + Usage: "Enables monitoring OVN-Kubernetes master and OVN configuration duration", + Destination: &cliConfig.Metrics.EnableConfigDuration, + }, + &cli.BoolFlag{ + Name: "metrics-enable-scale", + Usage: "Enables metrics related to scaling", + Destination: &cliConfig.Metrics.EnableScaleMetrics, + }, +} + +// OvnNBFlags capture OVN northbound database options +var OvnNBFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "nb-address", + Usage: "IP address and port of the OVN northbound API " + + "(eg, ssl:1.2.3.4:6641,ssl:1.2.3.5:6642). Leave empty to " + + "use a local unix socket.", + Destination: &cliConfig.OvnNorth.Address, + }, + &cli.StringFlag{ + Name: "nb-client-privkey", + Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-privkey.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.PrivKey, + }, + &cli.StringFlag{ + Name: "nb-client-cert", + Usage: "Client certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-cert.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.Cert, + }, + &cli.StringFlag{ + Name: "nb-client-cacert", + Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnnb-ca.cert)." + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnNorth.CACert, + }, + &cli.StringFlag{ + Name: "nb-cert-common-name", + Usage: "Common Name of the certificate used for TLS server certificate verification. " + + "In cases where the certificate doesn't have any SAN Extensions, this parameter " + + "should match the DNS(hostname) of the server. In case the certificate has a " + + "SAN extension, this parameter should match one of the SAN fields.", + Destination: &cliConfig.OvnNorth.CertCommonName, + }, + &cli.UintFlag{ + Name: "nb-raft-election-timer", + Usage: "The desired northbound database election timer.", + Destination: &cliConfig.OvnNorth.ElectionTimer, + }, +} + +// OvnSBFlags capture OVN southbound database options +var OvnSBFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "sb-address", + Usage: "IP address and port of the OVN southbound API " + + "(eg, ssl:1.2.3.4:6642,ssl:1.2.3.5:6642). " + + "Leave empty to use a local unix socket.", + Destination: &cliConfig.OvnSouth.Address, + }, + &cli.StringFlag{ + Name: "sb-client-privkey", + Usage: "Private key that the client should use for talking to the OVN database (default when ssl address is used: /etc/openvswitch/ovnsb-privkey.pem)." + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.PrivKey, + }, + &cli.StringFlag{ + Name: "sb-client-cert", + Usage: "Client certificate that the client should use for talking to the OVN database(default when ssl address is used: /etc/openvswitch/ovnsb-cert.pem). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.Cert, + }, + &cli.StringFlag{ + Name: "sb-client-cacert", + Usage: "CA certificate that the client should use for talking to the OVN database (default when ssl address is used /etc/openvswitch/ovnsb-ca.cert). " + + "Default value for this setting is empty which defaults to use local unix socket.", + Destination: &cliConfig.OvnSouth.CACert, + }, + &cli.StringFlag{ + Name: "sb-cert-common-name", + Usage: "Common Name of the certificate used for TLS server certificate verification. " + + "In cases where the certificate doesn't have any SAN Extensions, this parameter " + + "should match the DNS(hostname) of the server. In case the certificate has a " + + "SAN extension, this parameter should match one of the SAN fields.", + Destination: &cliConfig.OvnSouth.CertCommonName, + }, + &cli.UintFlag{ + Name: "sb-raft-election-timer", + Usage: "The desired southbound database election timer.", + Destination: &cliConfig.OvnSouth.ElectionTimer, + }, +} + +// OVNGatewayFlags capture L3 Gateway related flags +var OVNGatewayFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "gateway-mode", + Usage: "Sets the cluster gateway mode. One of \"shared\", " + + "or \"local\". If not given, gateway functionality is disabled.", + }, + &cli.StringFlag{ + Name: "gateway-interface", + Usage: "The interface on nodes that will be the gateway interface. " + + "If none specified, then the node's interface on which the " + + "default gateway is configured will be used as the gateway " + + "interface. Only useful with \"init-gateways\"", + Destination: &cliConfig.Gateway.Interface, + }, + &cli.StringFlag{ + Name: "exgw-interface", + Usage: "The interface on nodes that will be used for external gw network traffic. " + + "If none specified, ovnk will use the default interface", + Destination: &cliConfig.Gateway.EgressGWInterface, + }, + &cli.StringFlag{ + Name: "gateway-nexthop", + Usage: "The external default gateway which is used as a next hop by " + + "OVN gateway. This is many times just the default gateway " + + "of the node in question. If not specified, the default gateway" + + "configured in the node is used. Only useful with " + + "\"init-gateways\"", + Destination: &cliConfig.Gateway.NextHop, + }, + &cli.UintFlag{ + Name: "gateway-vlanid", + Usage: "The VLAN on which the external network is available. " + + "Valid only for Shared Gateway interface mode.", + Destination: &cliConfig.Gateway.VLANID, + }, + &cli.BoolFlag{ + Name: "nodeport", + Usage: "Setup nodeport based ingress on gateways.", + Destination: &cliConfig.Gateway.NodeportEnable, + }, + &cli.BoolFlag{ + Name: "disable-snat-multiple-gws", + Usage: "Disable SNAT for egress traffic with multiple gateways.", + Destination: &cliConfig.Gateway.DisableSNATMultipleGWs, + }, + &cli.BoolFlag{ + Name: "disable-forwarding", + Usage: "Disable forwarding on OVNK controlled interfaces.", + Destination: &cliConfig.Gateway.DisableForwarding, + }, + &cli.StringFlag{ + Name: "gateway-v4-join-subnet", + Usage: "The v4 join subnet used for assigning join switch IPv4 addresses", + Destination: &cliConfig.Gateway.V4JoinSubnet, + Value: Gateway.V4JoinSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v6-join-subnet", + Usage: "The v6 join subnet used for assigning join switch IPv6 addresses", + Destination: &cliConfig.Gateway.V6JoinSubnet, + Value: Gateway.V6JoinSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v4-masquerade-subnet", + Usage: "The v4 masquerade subnet used for assigning masquerade IPv4 addresses", + Destination: &cliConfig.Gateway.V4MasqueradeSubnet, + Value: Gateway.V4MasqueradeSubnet, + }, + &cli.StringFlag{ + Name: "gateway-v6-masquerade-subnet", + Usage: "The v6 masquerade subnet used for assigning masquerade IPv6 addresses", + Destination: &cliConfig.Gateway.V6MasqueradeSubnet, + Value: Gateway.V6MasqueradeSubnet, + }, + &cli.BoolFlag{ + Name: "disable-pkt-mtu-check", + Usage: "Disable OpenFlow checks for if packet size is greater than pod MTU", + Destination: &cliConfig.Gateway.DisablePacketMTUCheck, + }, + &cli.StringFlag{ + Name: "gateway-router-subnet", + Usage: "The Subnet to be used for the gateway router external port (shared mode only). " + + "auto-detected if not given. Must match the the kube node IP address. " + + "Currently valid for DPUs only", + Destination: &cliConfig.Gateway.RouterSubnet, + Value: Gateway.RouterSubnet, + }, + &cli.BoolFlag{ + Name: "single-node", + Usage: "Enable single node optimizations. " + + "Single node indicates a one node cluster and allows to simplify ovn-kubernetes gateway logic", + Destination: &cliConfig.Gateway.SingleNode, + }, + &cli.BoolFlag{ + Name: "allow-no-uplink", + Usage: "Allow the external gateway bridge without an uplink port in local gateway mode", + Destination: &cliConfig.Gateway.AllowNoUplink, + }, + // Deprecated CLI options + &cli.BoolFlag{ + Name: "init-gateways", + Usage: "DEPRECATED; use --gateway-mode instead", + Destination: &initGateways, + }, + &cli.BoolFlag{ + Name: "gateway-local", + Usage: "DEPRECATED; use --gateway-mode instead", + Destination: &gatewayLocal, + }, +} + +// MasterHAFlags capture leader election flags for master +var MasterHAFlags = []cli.Flag{ + &cli.IntFlag{ + Name: "ha-election-lease-duration", + Usage: "Leader election lease duration (in secs) (default: 60)", + Destination: &cliConfig.MasterHA.ElectionLeaseDuration, + Value: MasterHA.ElectionLeaseDuration, + }, + &cli.IntFlag{ + Name: "ha-election-renew-deadline", + Usage: "Leader election renew deadline (in secs) (default: 30)", + Destination: &cliConfig.MasterHA.ElectionRenewDeadline, + Value: MasterHA.ElectionRenewDeadline, + }, + &cli.IntFlag{ + Name: "ha-election-retry-period", + Usage: "Leader election retry period (in secs) (default: 20)", + Destination: &cliConfig.MasterHA.ElectionRetryPeriod, + Value: MasterHA.ElectionRetryPeriod, + }, +} + +// ClusterMgrHAFlags capture leader election flags for cluster manager +var ClusterMgrHAFlags = []cli.Flag{ + &cli.IntFlag{ + Name: "cluster-manager-ha-election-lease-duration", + Usage: "Leader election lease duration (in secs) (default: 60)", + Destination: &cliConfig.ClusterMgrHA.ElectionLeaseDuration, + Value: ClusterMgrHA.ElectionLeaseDuration, + }, + &cli.IntFlag{ + Name: "cluster-manager-ha-election-renew-deadline", + Usage: "Leader election renew deadline (in secs) (default: 30)", + Destination: &cliConfig.ClusterMgrHA.ElectionRenewDeadline, + Value: ClusterMgrHA.ElectionRenewDeadline, + }, + &cli.IntFlag{ + Name: "cluster-manager-ha-election-retry-period", + Usage: "Leader election retry period (in secs) (default: 20)", + Destination: &cliConfig.ClusterMgrHA.ElectionRetryPeriod, + Value: ClusterMgrHA.ElectionRetryPeriod, + }, +} + +// HybridOverlayFlags capture hybrid overlay feature options +var HybridOverlayFlags = []cli.Flag{ + &cli.BoolFlag{ + Name: "enable-hybrid-overlay", + Usage: "Enables hybrid overlay functionality", + Destination: &cliConfig.HybridOverlay.Enabled, + }, + &cli.StringFlag{ + Name: "hybrid-overlay-cluster-subnets", + Value: HybridOverlay.RawClusterSubnets, + Usage: "A comma separated set of IP subnets and the associated" + + "hostsubnetlengths (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " + + "to use with the extended hybrid network. Each entry is given " + + "in the form IP address/subnet mask/hostsubnetlength, " + + "the hostsubnetlength is optional and if unspecified defaults to 24. The " + + "hostsubnetlength defines how many IP addresses are dedicated to each node.", + Destination: &cliConfig.HybridOverlay.RawClusterSubnets, + }, + &cli.UintFlag{ + Name: "hybrid-overlay-vxlan-port", + Value: HybridOverlay.VXLANPort, + Usage: "The UDP port used by the VXLAN protocol for hybrid networks.", + Destination: &cliConfig.HybridOverlay.VXLANPort, + }, +} + +// OvnKubeNodeFlags captures ovnkube-node specific configurations +var OvnKubeNodeFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "ovnkube-node-mode", + Usage: "ovnkube-node operating mode full(default), dpu, dpu-host", + Value: OvnKubeNode.Mode, + Destination: &cliConfig.OvnKubeNode.Mode, + }, + &cli.StringFlag{ + Name: "ovnkube-node-mgmt-port-netdev", + Usage: "When provided, use this netdev as management port. It will be renamed to ovn-k8s-mp0 " + + "and used to allow host network services and pods to access k8s pod and service networks. ", + Value: OvnKubeNode.MgmtPortNetdev, + Destination: &cliConfig.OvnKubeNode.MgmtPortNetdev, + }, + &cli.StringFlag{ + Name: "ovnkube-node-mgmt-port-dp-resource-name", + Usage: "When provided, use this device plugin resource name to find the allocated resource as management port. " + + "The interface chosen from this resource will be renamed to ovn-k8s-mp0 " + + "and used to allow host network services and pods to access k8s pod and service networks. ", + Value: OvnKubeNode.MgmtPortDPResourceName, + Destination: &cliConfig.OvnKubeNode.MgmtPortDPResourceName, + }, + &cli.BoolFlag{ + Name: "disable-ovn-iface-id-ver", + Usage: "Deprecated; iface-id-ver is always enabled", + Destination: &disableOVNIfaceIDVer, + }, +} + +// ClusterManagerFlags captures ovnkube-cluster-manager specific configurations +var ClusterManagerFlags = []cli.Flag{ + &cli.StringFlag{ + Name: "cluster-manager-v4-transit-switch-subnet", + Usage: "The v4 transit switch subnet used for assigning transit switch IPv4 addresses for interconnect", + Destination: &cliConfig.ClusterManager.V4TransitSwitchSubnet, + Value: ClusterManager.V4TransitSwitchSubnet, + }, + &cli.StringFlag{ + Name: "cluster-manager-v6-transit-switch-subnet", + Usage: "The v6 transit switch subnet used for assigning transit switch IPv6 addresses for interconnect", + Destination: &cliConfig.ClusterManager.V6TransitSwitchSubnet, + Value: ClusterManager.V6TransitSwitchSubnet, + }, +} + +// Flags are general command-line flags. Apps should add these flags to their +// own urfave/cli flags and call InitConfig() early in the application. +var Flags []cli.Flag + +// GetFlags returns an array of all command-line flags necessary to configure +// ovn-kubernetes +func GetFlags(customFlags []cli.Flag) []cli.Flag { + flags := CommonFlags + flags = append(flags, CNIFlags...) + flags = append(flags, OVNK8sFeatureFlags...) + flags = append(flags, K8sFlags...) + flags = append(flags, MetricsFlags...) + flags = append(flags, OvnNBFlags...) + flags = append(flags, OvnSBFlags...) + flags = append(flags, OVNGatewayFlags...) + flags = append(flags, MasterHAFlags...) + flags = append(flags, ClusterMgrHAFlags...) + flags = append(flags, HybridOverlayFlags...) + flags = append(flags, MonitoringFlags...) + flags = append(flags, IPFIXFlags...) + flags = append(flags, OvnKubeNodeFlags...) + flags = append(flags, ClusterManagerFlags...) + flags = append(flags, customFlags...) + return flags +} + +// Defaults are a set of flags to indicate which options should be read from +// ovs-vsctl and used as default values if option is not found via the config +// file or command-line +type Defaults struct { + OvnNorthAddress bool + K8sAPIServer bool + K8sToken bool + K8sTokenFile bool + K8sCert bool +} + +const ( + ovsVsctlCommand = "ovs-vsctl" +) + +// Can't use pkg/ovs or pkg/util here because those package import this one +func rawExec(exec kexec.Interface, cmd string, args ...string) (string, error) { + cmdPath, err := exec.LookPath(cmd) + if err != nil { + return "", err + } + + klog.V(5).Infof("Exec: %s %s", cmdPath, strings.Join(args, " ")) + out, err := exec.Command(cmdPath, args...).CombinedOutput() + if err != nil { + klog.V(5).Infof("Exec: %s %s => %v", cmdPath, strings.Join(args, " "), err) + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +// Can't use pkg/ovs or pkg/util here because those package import this one +func runOVSVsctl(exec kexec.Interface, args ...string) (string, error) { + newArgs := append([]string{"--timeout=15"}, args...) + out, err := rawExec(exec, ovsVsctlCommand, newArgs...) + if err != nil { + return "", err + } + return strings.Trim(strings.TrimSpace(out), "\""), nil +} + +func getOVSExternalID(exec kexec.Interface, name string) string { + out, err := runOVSVsctl(exec, + "--if-exists", + "get", + "Open_vSwitch", + ".", + "external_ids:"+name) + if err != nil { + klog.V(5).Infof("Failed to get OVS external_id %s: %v\n\t%s", name, err, out) + return "" + } + return out +} + +func setOVSExternalID(exec kexec.Interface, key, value string) error { + out, err := runOVSVsctl(exec, + "set", + "Open_vSwitch", + ".", + fmt.Sprintf("external_ids:%s=%s", key, value)) + if err != nil { + return fmt.Errorf("error setting OVS external ID '%s=%s': %v\n %q", key, value, err, out) + } + return nil +} + +func buildKubernetesConfig(exec kexec.Interface, cli, file *config, saPath string, defaults *Defaults) error { + // token adn ca.crt may be from files mounted in container. + saConfig := savedKubernetes + if data, err := os.ReadFile(filepath.Join(saPath, kubeServiceAccountFileToken)); err == nil { + saConfig.Token = string(data) + saConfig.TokenFile = filepath.Join(saPath, kubeServiceAccountFileToken) + } + if _, err2 := os.Stat(filepath.Join(saPath, kubeServiceAccountFileCACert)); err2 == nil { + saConfig.CACert = filepath.Join(saPath, kubeServiceAccountFileCACert) + } + + if err := overrideFields(&Kubernetes, &saConfig, &savedKubernetes); err != nil { + return err + } + + // values for token, cacert, kubeconfig, api-server may be found in several places. + // Priority order (highest first): OVS config, command line options, config file, + // environment variables, service account files + + envConfig := savedKubernetes + envVarsMap := map[string]string{ + "Kubeconfig": "KUBECONFIG", + "BootstrapKubeconfig": "BOOTSTRAP_KUBECONFIG", + "CertDir": "CERT_DIR", + "CACert": "K8S_CACERT", + "APIServer": "K8S_APISERVER", + "Token": "K8S_TOKEN", + "TokenFile": "K8S_TOKEN_FILE", + "HostNetworkNamespace": "OVN_HOST_NETWORK_NAMESPACE", + } + for k, v := range envVarsMap { + if x, exists := os.LookupEnv(v); exists && len(x) > 0 { + reflect.ValueOf(&envConfig).Elem().FieldByName(k).SetString(x) + } + } + + if err := overrideFields(&Kubernetes, &envConfig, &savedKubernetes); err != nil { + return err + } + + // Copy config file values over default values + if err := overrideFields(&Kubernetes, &file.Kubernetes, &savedKubernetes); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&Kubernetes, &cli.Kubernetes, &savedKubernetes); err != nil { + return err + } + + // Grab default values from OVS external IDs + if defaults.K8sAPIServer { + Kubernetes.APIServer = getOVSExternalID(exec, "k8s-api-server") + } + if defaults.K8sToken { + Kubernetes.Token = getOVSExternalID(exec, "k8s-api-token") + } + if defaults.K8sTokenFile { + Kubernetes.TokenFile = getOVSExternalID(exec, "k8s-api-token-file") + } + + if defaults.K8sCert { + Kubernetes.CACert = getOVSExternalID(exec, "k8s-ca-certificate") + } + + if Kubernetes.Kubeconfig != "" && !pathExists(Kubernetes.Kubeconfig) { + return fmt.Errorf("kubernetes kubeconfig file %q not found", Kubernetes.Kubeconfig) + } + + if Kubernetes.CACert != "" { + bytes, err := os.ReadFile(Kubernetes.CACert) + if err != nil { + return err + } + Kubernetes.CAData = bytes + } + + url, err := url.Parse(Kubernetes.APIServer) + if err != nil { + return fmt.Errorf("kubernetes API server address %q invalid: %v", Kubernetes.APIServer, err) + } else if url.Scheme != "https" && url.Scheme != "http" { + return fmt.Errorf("kubernetes API server URL scheme %q invalid", url.Scheme) + } + + // Legacy --service-cluster-ip-range or --k8s-service-cidr options override config file or --k8s-service-cidrs. + if serviceClusterIPRange != "" { + Kubernetes.RawServiceCIDRs = serviceClusterIPRange + } else if Kubernetes.CompatServiceCIDR != "" { + Kubernetes.RawServiceCIDRs = Kubernetes.CompatServiceCIDR + } + if Kubernetes.RawServiceCIDRs == "" { + return fmt.Errorf("kubernetes service-cidrs is required") + } + + return nil +} + +// completeKubernetesConfig completes the Kubernetes config by parsing raw values +// into their final form. +func completeKubernetesConfig(allSubnets *ConfigSubnets) error { + Kubernetes.ServiceCIDRs = []*net.IPNet{} + for _, cidrString := range strings.Split(Kubernetes.RawServiceCIDRs, ",") { + _, serviceCIDR, err := net.ParseCIDR(cidrString) + if err != nil { + return fmt.Errorf("kubernetes service network CIDR %q invalid: %v", cidrString, err) + } + Kubernetes.ServiceCIDRs = append(Kubernetes.ServiceCIDRs, serviceCIDR) + allSubnets.Append(ConfigSubnetService, serviceCIDR) + } + if len(Kubernetes.ServiceCIDRs) > 2 { + return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair") + } else if len(Kubernetes.ServiceCIDRs) == 2 && utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[0]) == utilnet.IsIPv6CIDR(Kubernetes.ServiceCIDRs[1]) { + return fmt.Errorf("kubernetes service-cidrs must contain either a single CIDR or else an IPv4/IPv6 pair") + } + + if Kubernetes.RawNoHostSubnetNodes != "" { + nodeSelector, err := metav1.ParseToLabelSelector(Kubernetes.RawNoHostSubnetNodes) + if err != nil { + return fmt.Errorf("labelSelector \"%s\" is invalid: %v", Kubernetes.RawNoHostSubnetNodes, err) + } + selector, err := metav1.LabelSelectorAsSelector(nodeSelector) + if err != nil { + return fmt.Errorf("failed to convert %v into a labels.Selector: %v", nodeSelector, err) + } + Kubernetes.NoHostSubnetNodes = selector + } + + return nil +} + +func buildMetricsConfig(cli, file *config) error { + // Copy KubernetesConfig backwards-compat values over default values + if Kubernetes.CompatMetricsBindAddress != "" { + Metrics.BindAddress = Kubernetes.CompatMetricsBindAddress + } + if Kubernetes.CompatOVNMetricsBindAddress != "" { + Metrics.OVNMetricsBindAddress = Kubernetes.CompatOVNMetricsBindAddress + } + Metrics.EnablePprof = Kubernetes.CompatMetricsEnablePprof + + // Copy config file values over Kubernetes and default values + if err := overrideFields(&Metrics, &file.Metrics, &savedMetrics); err != nil { + return err + } + + // And CLI overrides over config file, Kubernetes, and default values + if err := overrideFields(&Metrics, &cli.Metrics, &savedMetrics); err != nil { + return err + } + + return nil +} + +func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&Gateway, &file.Gateway, &savedGateway); err != nil { + return err + } + + cli.Gateway.Mode = GatewayMode(ctx.String("gateway-mode")) + if cli.Gateway.Mode == GatewayModeDisabled { + // Handle legacy CLI options + if ctx.Bool("init-gateways") { + cli.Gateway.Mode = GatewayModeShared + if ctx.Bool("gateway-local") { + cli.Gateway.Mode = GatewayModeLocal + } + } + } + // And CLI overrides over config file and default values + if err := overrideFields(&Gateway, &cli.Gateway, &savedGateway); err != nil { + return err + } + + if Gateway.Mode != GatewayModeDisabled { + validModes := []string{string(GatewayModeShared), string(GatewayModeLocal)} + var found bool + for _, mode := range validModes { + if string(Gateway.Mode) == mode { + found = true + break + } + } + if !found { + return fmt.Errorf("invalid gateway mode %q: expect one of %s", string(Gateway.Mode), strings.Join(validModes, ",")) + } + } + + // Options are only valid if Mode is not disabled + if Gateway.Mode == GatewayModeDisabled { + if Gateway.Interface != "" { + return fmt.Errorf("gateway interface option %q not allowed when gateway is disabled", Gateway.Interface) + } + if Gateway.NextHop != "" { + return fmt.Errorf("gateway next-hop option %q not allowed when gateway is disabled", Gateway.NextHop) + } + } + + if Gateway.Mode != GatewayModeShared && Gateway.VLANID != 0 { + return fmt.Errorf("gateway VLAN ID option: %d is supported only in shared gateway mode", Gateway.VLANID) + } + + return nil +} + +func completeGatewayConfig(allSubnets *ConfigSubnets, masqueradeIPs *MasqueradeIPsConfig) error { + // Validate v4 and v6 join subnets + v4IP, v4JoinCIDR, err := net.ParseCIDR(Gateway.V4JoinSubnet) + if err != nil || utilnet.IsIPv6(v4IP) { + return fmt.Errorf("invalid gateway v4 join subnet specified, subnet: %s: error: %v", Gateway.V4JoinSubnet, err) + } + + v6IP, v6JoinCIDR, err := net.ParseCIDR(Gateway.V6JoinSubnet) + if err != nil || !utilnet.IsIPv6(v6IP) { + return fmt.Errorf("invalid gateway v6 join subnet specified, subnet: %s: error: %v", Gateway.V6JoinSubnet, err) + } + allSubnets.Append(ConfigSubnetJoin, v4JoinCIDR) + allSubnets.Append(ConfigSubnetJoin, v6JoinCIDR) + + //validate v4 and v6 masquerade subnets + v4MasqueradeIP, v4MasqueradeCIDR, err := net.ParseCIDR(Gateway.V4MasqueradeSubnet) + if err != nil || utilnet.IsIPv6(v4MasqueradeCIDR.IP) { + return fmt.Errorf("invalid gateway v4 masquerade subnet specified, subnet: %s: error: %v", Gateway.V4MasqueradeSubnet, err) + } + if err = AllocateV4MasqueradeIPs(v4MasqueradeIP, masqueradeIPs); err != nil { + return fmt.Errorf("unable to allocate V4MasqueradeIPs: %s", err) + } + + v6MasqueradeIP, v6MasqueradeCIDR, err := net.ParseCIDR(Gateway.V6MasqueradeSubnet) + if err != nil || !utilnet.IsIPv6(v6MasqueradeCIDR.IP) { + return fmt.Errorf("invalid gateway v6 masquerade subnet specified, subnet: %s: error: %v", Gateway.V6MasqueradeSubnet, err) + } + if err = AllocateV6MasqueradeIPs(v6MasqueradeIP, masqueradeIPs); err != nil { + return fmt.Errorf("unable to allocate V6MasqueradeIPs: %s", err) + } + + allSubnets.Append(ConfigSubnetMasquerade, v4MasqueradeCIDR) + allSubnets.Append(ConfigSubnetMasquerade, v6MasqueradeCIDR) + + return nil +} + +func buildOVNKubernetesFeatureConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&OVNKubernetesFeature, &file.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil { + return err + } + // And CLI overrides over config file and default values + if err := overrideFields(&OVNKubernetesFeature, &cli.OVNKubernetesFeature, &savedOVNKubernetesFeature); err != nil { + return err + } + return nil +} + +func buildMasterHAConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&MasterHA, &file.MasterHA, &savedMasterHA); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&MasterHA, &cli.MasterHA, &savedMasterHA); err != nil { + return err + } + + if MasterHA.ElectionLeaseDuration <= MasterHA.ElectionRenewDeadline { + return fmt.Errorf("invalid HA election lease duration '%d'. "+ + "It should be greater than HA election renew deadline '%d'", + MasterHA.ElectionLeaseDuration, MasterHA.ElectionRenewDeadline) + } + + if MasterHA.ElectionRenewDeadline <= MasterHA.ElectionRetryPeriod { + return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+ + "It should be greater than HA election retry period '%d'", + MasterHA.ElectionRenewDeadline, MasterHA.ElectionRetryPeriod) + } + return nil +} + +func buildClusterMgrHAConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&ClusterMgrHA, &file.ClusterMgrHA, &savedClusterMgrHA); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&ClusterMgrHA, &cli.ClusterMgrHA, &savedClusterMgrHA); err != nil { + return err + } + + if ClusterMgrHA.ElectionLeaseDuration <= ClusterMgrHA.ElectionRenewDeadline { + return fmt.Errorf("invalid HA election lease duration '%d'. "+ + "It should be greater than HA election renew deadline '%d'", + ClusterMgrHA.ElectionLeaseDuration, ClusterMgrHA.ElectionRenewDeadline) + } + + if ClusterMgrHA.ElectionRenewDeadline <= ClusterMgrHA.ElectionRetryPeriod { + return fmt.Errorf("invalid HA election renew deadline duration '%d'. "+ + "It should be greater than HA election retry period '%d'", + ClusterMgrHA.ElectionRenewDeadline, ClusterMgrHA.ElectionRetryPeriod) + } + return nil +} + +func buildMonitoringConfig(ctx *cli.Context, cli, file *config) error { + var err error + if err = overrideFields(&Monitoring, &file.Monitoring, &savedMonitoring); err != nil { + return err + } + if err = overrideFields(&Monitoring, &cli.Monitoring, &savedMonitoring); err != nil { + return err + } + return nil +} + +// completeMonitoringConfig completes the Monitoring config by parsing raw values +// into their final form. +func completeMonitoringConfig() error { + var err error + if Monitoring.RawNetFlowTargets != "" { + Monitoring.NetFlowTargets, err = ParseFlowCollectors(Monitoring.RawNetFlowTargets) + if err != nil { + return fmt.Errorf("netflow targets invalid: %v", err) + } + } + if Monitoring.RawSFlowTargets != "" { + Monitoring.SFlowTargets, err = ParseFlowCollectors(Monitoring.RawSFlowTargets) + if err != nil { + return fmt.Errorf("sflow targets invalid: %v", err) + } + } + if Monitoring.RawIPFIXTargets != "" { + Monitoring.IPFIXTargets, err = ParseFlowCollectors(Monitoring.RawIPFIXTargets) + if err != nil { + return fmt.Errorf("ipfix targets invalid: %v", err) + } + } + return nil +} + +func buildIPFIXConfig(cli, file *config) error { + if err := overrideFields(&IPFIX, &file.IPFIX, &savedIPFIX); err != nil { + return err + } + return overrideFields(&IPFIX, &cli.IPFIX, &savedIPFIX) +} + +func buildHybridOverlayConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&HybridOverlay, &file.HybridOverlay, &savedHybridOverlay); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&HybridOverlay, &cli.HybridOverlay, &savedHybridOverlay); err != nil { + return err + } + + if HybridOverlay.Enabled && HybridOverlay.VXLANPort > 65535 { + return fmt.Errorf("hybrid overlay vxlan port is invalid. The port cannot be larger than 65535") + } + + return nil +} + +// completeHybridOverlayConfig completes the HybridOverlay config by parsing raw values +// into their final form. +func completeHybridOverlayConfig(allSubnets *ConfigSubnets) error { + if !HybridOverlay.Enabled || len(HybridOverlay.RawClusterSubnets) == 0 { + return nil + } + + var err error + HybridOverlay.ClusterSubnets, err = ParseClusterSubnetEntries(HybridOverlay.RawClusterSubnets) + if err != nil { + return fmt.Errorf("hybrid overlay cluster subnet invalid: %v", err) + } + for _, subnet := range HybridOverlay.ClusterSubnets { + allSubnets.Append(ConfigSubnetHybrid, subnet.CIDR) + } + + return nil +} + +func buildClusterManagerConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&ClusterManager, &file.ClusterManager, &savedClusterManager); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&ClusterManager, &cli.ClusterManager, &savedClusterManager); err != nil { + return err + } + + return nil +} + +// completeClusterManagerConfig completes the ClusterManager config by parsing raw values +// into their final form. +func completeClusterManagerConfig(allSubnets *ConfigSubnets) error { + // Validate v4 and v6 transit switch subnets + v4IP, v4TransitCIDR, err := net.ParseCIDR(ClusterManager.V4TransitSwitchSubnet) + if err != nil || utilnet.IsIPv6(v4IP) { + return fmt.Errorf("invalid transit switch v4 subnet specified, subnet: %s: error: %v", ClusterManager.V4TransitSwitchSubnet, err) + } + + v6IP, v6TransitCIDR, err := net.ParseCIDR(ClusterManager.V6TransitSwitchSubnet) + if err != nil || !utilnet.IsIPv6(v6IP) { + return fmt.Errorf("invalid transit switch v6 subnet specified, subnet: %s: error: %v", ClusterManager.V6TransitSwitchSubnet, err) + } + allSubnets.Append(ConfigSubnetTransit, v4TransitCIDR) + allSubnets.Append(ConfigSubnetTransit, v6TransitCIDR) + return nil +} + +func buildDefaultConfig(cli, file *config) error { + if err := overrideFields(&Default, &file.Default, &savedDefault); err != nil { + return err + } + + if err := overrideFields(&Default, &cli.Default, &savedDefault); err != nil { + return err + } + + // Legacy cluster-subnet CLI option overrides config file or --cluster-subnets + if clusterSubnet != "" { + Default.RawClusterSubnets = clusterSubnet + } + if Default.RawClusterSubnets == "" { + return fmt.Errorf("cluster subnet is required") + } + + if Default.Zone == "" { + Default.Zone = types.OvnDefaultZone + } + return nil +} + +// completeDefaultConfig completes the Default config by parsing raw values +// into their final form. +func completeDefaultConfig(allSubnets *ConfigSubnets) error { + var err error + Default.ClusterSubnets, err = ParseClusterSubnetEntries(Default.RawClusterSubnets) + if err != nil { + return fmt.Errorf("cluster subnet invalid: %v", err) + } + for _, subnet := range Default.ClusterSubnets { + allSubnets.Append(ConfigSubnetCluster, subnet.CIDR) + } + + Default.UDNAllowedDefaultServices, err = parseServicesNamespacedNames(Default.RawUDNAllowedDefaultServices) + if err != nil { + return fmt.Errorf("UDN allowed services field is invalid: %v", err) + } + + Default.HostMasqConntrackZone = Default.ConntrackZone + 1 + Default.OVNMasqConntrackZone = Default.ConntrackZone + 2 + Default.HostNodePortConntrackZone = Default.ConntrackZone + 3 + Default.ReassemblyConntrackZone = Default.ConntrackZone + 4 + return nil +} + +// parseServicesNamespacedNames splits the input string by `,` and returns a slice +// of keys that were verified to be a valid namespaced service name. It ignores spaces between the elements. +func parseServicesNamespacedNames(servicesRaw string) ([]string, error) { + var services []string + for _, udnEnabledSVC := range strings.Split(servicesRaw, ",") { + svcKey := strings.TrimSpace(udnEnabledSVC) + namespace, name, err := cache.SplitMetaNamespaceKey(strings.TrimSpace(svcKey)) + if namespace == "" { + return nil, fmt.Errorf("UDN enabled service %q no namespace set: %v", svcKey, err) + } + if errs := validation.ValidateNamespaceName(namespace, false); len(errs) != 0 { + return nil, fmt.Errorf("UDN enabled service %q has an invalid namespace: %v", svcKey, err) + } + if errs := validation.NameIsDNSSubdomain(name, false); len(errs) != 0 { + return nil, fmt.Errorf("UDN enabled service %q has an invalid name: %v", svcKey, err) + } + services = append(services, svcKey) + } + return services, nil +} + +// getConfigFilePath returns config file path and 'true' if the config file is +// the fallback path (eg not given by the user), 'false' if given explicitly +// by the user +func getConfigFilePath(ctx *cli.Context) (string, bool) { + configFile := ctx.String("config-file") + if configFile != "" { + return configFile, false + } + return "/etc/openvswitch/ovn_k8s.conf", true +} + +// InitConfig reads the config file and common command-line options and +// constructs the global config object from them. It returns the config file +// path (if explicitly specified) or an error +func InitConfig(ctx *cli.Context, exec kexec.Interface, defaults *Defaults) (string, error) { + return initConfigWithPath(ctx, exec, kubeServiceAccountPath, defaults) +} + +// InitConfigSa reads the config file and common command-line options and +// constructs the global config object from them. It passes the service account directory. +// It returns the config file path (if explicitly specified) or an error +func InitConfigSa(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) { + return initConfigWithPath(ctx, exec, saPath, defaults) +} + +// stripTokenFromK8sConfig removes k8s SA token & CAData values +// from the KubernetesConfig struct used for logging. +func stripTokenFromK8sConfig() KubernetesConfig { + k8sConf := Kubernetes + // Token and CAData are sensitive fields so stripping + // them while logging. + k8sConf.Token = "" + k8sConf.CAData = []byte{} + return k8sConf +} + +// initConfigWithPath reads the given config file (or if empty, reads the config file +// specified by command-line arguments, or empty, the default config file) and +// common command-line options and constructs the global config object from +// them. It returns the config file path (if explicitly specified) or an error +func initConfigWithPath(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) { + var retConfigFile string + var configFile string + var configFileIsDefault bool + var err error + // initialize cfg with default values, allow file read to override + cfg := config{ + Default: savedDefault, + Logging: savedLogging, + IPFIX: savedIPFIX, + CNI: savedCNI, + OVNKubernetesFeature: savedOVNKubernetesFeature, + Kubernetes: savedKubernetes, + OvnNorth: savedOvnNorth, + OvnSouth: savedOvnSouth, + Gateway: savedGateway, + MasterHA: savedMasterHA, + ClusterMgrHA: savedClusterMgrHA, + HybridOverlay: savedHybridOverlay, + OvnKubeNode: savedOvnKubeNode, + ClusterManager: savedClusterManager, + } + + configFile, configFileIsDefault = getConfigFilePath(ctx) + + if !configFileIsDefault { + // Only return explicitly specified config file + retConfigFile = configFile + } + + f, err := os.Open(configFile) + // Failure to find a default config file is not a hard error + if err != nil && !configFileIsDefault { + return "", fmt.Errorf("failed to open config file %s: %v", configFile, err) + } + if f != nil { + defer f.Close() + + // Parse ovn-k8s config file. + if err = gcfg.ReadInto(&cfg, f); err != nil { + if gcfg.FatalOnly(err) != nil { + return "", fmt.Errorf("failed to parse config file %s: %v", f.Name(), err) + } + // error is only a warning -> log it but continue + klog.Warningf("Warning on parsing config file: %s", err) + } + klog.Infof("Parsed config file %s", f.Name()) + klog.Infof("Parsed config: %+v", cfg) + } + + if defaults == nil { + defaults = &Defaults{} + } + + // Build config that needs no special processing + if err = overrideFields(&CNI, &cfg.CNI, &savedCNI); err != nil { + return "", err + } + if err = overrideFields(&CNI, &cliConfig.CNI, &savedCNI); err != nil { + return "", err + } + + // Logging setup + if err = overrideFields(&Logging, &cfg.Logging, &savedLogging); err != nil { + return "", err + } + if err = overrideFields(&Logging, &cliConfig.Logging, &savedLogging); err != nil { + return "", err + } + + var level klog.Level + if err := level.Set(strconv.Itoa(Logging.Level)); err != nil { + return "", fmt.Errorf("failed to set klog log level %v", err) + } + if Logging.File != "" { + klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) + klog.InitFlags(klogFlags) + if err := klogFlags.Set("logtostderr", "false"); err != nil { + klog.Errorf("Error setting klog logtostderr: %v", err) + } + if err := klogFlags.Set("alsologtostderr", "true"); err != nil { + klog.Errorf("Error setting klog alsologtostderr: %v", err) + } + klog.SetOutput(&lumberjack.Logger{ + Filename: Logging.File, + MaxSize: Logging.LogFileMaxSize, // megabytes + MaxBackups: Logging.LogFileMaxBackups, + MaxAge: Logging.LogFileMaxAge, // days + Compress: true, + }) + } + + if err = buildDefaultConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildKubernetesConfig(exec, &cliConfig, &cfg, saPath, defaults); err != nil { + return "", err + } + + // Metrics must be built after Kubernetes to ensure metrics options override + // legacy Kubernetes metrics options + if err = buildMetricsConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildOVNKubernetesFeatureConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildGatewayConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildMasterHAConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildClusterMgrHAConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildMonitoringConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildIPFIXConfig(&cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildHybridOverlayConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildOvnKubeNodeConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + if err = buildClusterManagerConfig(ctx, &cliConfig, &cfg); err != nil { + return "", err + } + + tmpAuth, err := buildOvnAuth(exec, true, &cliConfig.OvnNorth, &cfg.OvnNorth, defaults.OvnNorthAddress) + if err != nil { + return "", err + } + OvnNorth = *tmpAuth + + tmpAuth, err = buildOvnAuth(exec, false, &cliConfig.OvnSouth, &cfg.OvnSouth, false) + if err != nil { + return "", err + } + OvnSouth = *tmpAuth + + if err := completeConfig(); err != nil { + return "", err + } + + klog.V(5).Infof("Default config: %+v", Default) + klog.V(5).Infof("Logging config: %+v", Logging) + klog.V(5).Infof("Monitoring config: %+v", Monitoring) + klog.V(5).Infof("IPFIX config: %+v", IPFIX) + klog.V(5).Infof("CNI config: %+v", CNI) + klog.V(5).Infof("Kubernetes config: %+v", stripTokenFromK8sConfig()) + klog.V(5).Infof("Gateway config: %+v", Gateway) + klog.V(5).Infof("OVN North config: %+v", OvnNorth) + klog.V(5).Infof("OVN South config: %+v", OvnSouth) + klog.V(5).Infof("Hybrid Overlay config: %+v", HybridOverlay) + klog.V(5).Infof("Ovnkube Node config: %+v", OvnKubeNode) + klog.V(5).Infof("Ovnkube Cluster Manager config: %+v", ClusterManager) + + return retConfigFile, nil +} + +func completeConfig() error { + allSubnets := NewConfigSubnets() + + if err := completeKubernetesConfig(allSubnets); err != nil { + return err + } + if err := completeDefaultConfig(allSubnets); err != nil { + return err + } + + if err := completeGatewayConfig(allSubnets, &Gateway.MasqueradeIPs); err != nil { + return err + } + if err := completeMonitoringConfig(); err != nil { + return err + } + if err := completeHybridOverlayConfig(allSubnets); err != nil { + return err + } + + if err := completeClusterManagerConfig(allSubnets); err != nil { + return err + } + + if err := allSubnets.CheckForOverlaps(); err != nil { + return err + } + + var err error + IPv4Mode, IPv6Mode, err = allSubnets.checkIPFamilies() + if err != nil { + return err + } + + return nil +} + +func pathExists(path string) bool { + _, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + return false + } + return true +} + +// parseAddress parses an OVN database address, which can be of form +// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" (OVS/OVN format) or +// "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641" (legacy ovnkube format) +// or "ssl:[fd01::1]:6641,ssl:[fd01::2]:6641 +// and returns the validated address(es) and the scheme +func parseAddress(urlString string) (string, OvnDBScheme, error) { + var parsedAddress, scheme string + var parsedScheme OvnDBScheme + + urlString = strings.Replace(urlString, "//", "", -1) + for _, ovnAddress := range strings.Split(urlString, ",") { + splits := strings.SplitN(ovnAddress, ":", 2) + if len(splits) != 2 { + return "", "", fmt.Errorf("failed to parse OVN address %s", urlString) + } + + if scheme == "" { + scheme = splits[0] + } else if scheme != splits[0] { + return "", "", fmt.Errorf("invalid protocols in OVN address %s", + urlString) + } + + if scheme == "unix" { + if parsedAddress != "" { + parsedAddress += "," + } + parsedAddress += ovnAddress + } else { + host, port, err := net.SplitHostPort(splits[1]) + if err != nil { + return "", "", fmt.Errorf("failed to parse OVN DB host/port %q: %v", + splits[1], err) + } + + if parsedAddress != "" { + parsedAddress += "," + } + parsedAddress += fmt.Sprintf("%s:%s", scheme, net.JoinHostPort(host, port)) + } + } + + switch { + case scheme == "ssl": + parsedScheme = OvnDBSchemeSSL + case scheme == "tcp": + parsedScheme = OvnDBSchemeTCP + case scheme == "unix": + parsedScheme = OvnDBSchemeUnix + default: + return "", "", fmt.Errorf("unknown OVN DB scheme %q", scheme) + } + return parsedAddress, parsedScheme, nil +} + +// buildOvnAuth returns an OvnAuthConfig object describing the connection to an +// OVN database, given a connection description string and authentication +// details +func buildOvnAuth(exec kexec.Interface, northbound bool, cliAuth, confAuth *OvnAuthConfig, readAddress bool) (*OvnAuthConfig, error) { + auth := &OvnAuthConfig{ + northbound: northbound, + exec: exec, + } + + var direction string + var defaultAuth *OvnAuthConfig + if northbound { + direction = "nb" + defaultAuth = &savedOvnNorth + } else { + direction = "sb" + defaultAuth = &savedOvnSouth + } + + // Determine final address so we know how to set cert/key defaults + address := cliAuth.Address + if address == "" { + address = confAuth.Address + } + if address == "" && readAddress { + address = getOVSExternalID(exec, "ovn-"+direction) + } + if strings.HasPrefix(address, "ssl") { + // Set up default SSL cert/key paths + auth.CACert = "/etc/openvswitch/ovn" + direction + "-ca.cert" + auth.PrivKey = "/etc/openvswitch/ovn" + direction + "-privkey.pem" + auth.Cert = "/etc/openvswitch/ovn" + direction + "-cert.pem" + } + + // Build the final auth config with overrides from CLI and config file + if err := overrideFields(auth, confAuth, defaultAuth); err != nil { + return nil, err + } + if err := overrideFields(auth, cliAuth, defaultAuth); err != nil { + return nil, err + } + + if address == "" { + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + auth.Scheme = OvnDBSchemeUnix + auth.Address = fmt.Sprintf("unix:/var/run/ovn/ovn%s_db.sock", direction) + return auth, nil + } + + var err error + auth.Address, auth.Scheme, err = parseAddress(address) + if err != nil { + return nil, err + } + + switch { + case auth.Scheme == OvnDBSchemeSSL: + if auth.PrivKey == "" || auth.Cert == "" || auth.CACert == "" || auth.CertCommonName == "" { + return nil, fmt.Errorf("must specify private key, certificate, CA certificate, and common name used in the certificate for 'ssl' scheme") + } + case auth.Scheme == OvnDBSchemeTCP: + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + case auth.Scheme == OvnDBSchemeUnix: + if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" { + return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?") + } + } + + return auth, nil +} + +func (a *OvnAuthConfig) ensureCACert() error { + if pathExists(a.CACert) { + // CA file exists, nothing to do + return nil + } + + // Client can bootstrap the CA from the OVN API. Use nbctl for both + // SB and NB since ovn-sbctl only supports --bootstrap-ca-cert from + // 2.9.90+. + // FIXME: change back to a.ctlCmd when sbctl supports --bootstrap-ca-cert + // https://github.com/openvswitch/ovs/pull/226 + args := []string{ + "--db=" + a.GetURL(), + "--timeout=5", + } + if a.Scheme == OvnDBSchemeSSL { + args = append(args, "--private-key="+a.PrivKey) + args = append(args, "--certificate="+a.Cert) + args = append(args, "--bootstrap-ca-cert="+a.CACert) + } + args = append(args, "list", "nb_global") + _, _ = rawExec(a.exec, "ovn-nbctl", args...) + if _, err := os.Stat(a.CACert); os.IsNotExist(err) { + klog.Warningf("Bootstrapping %s CA certificate failed", a.CACert) + } + return nil +} + +// GetURL returns a URL suitable for passing to ovn-northd which describes the +// transport mechanism for connection to the database +func (a *OvnAuthConfig) GetURL() string { + return a.Address +} + +// SetDBAuth sets the authentication configuration and connection method +// for the OVN northbound or southbound database server or client +func (a *OvnAuthConfig) SetDBAuth() error { + if a.Scheme == OvnDBSchemeSSL { + // Both server and client SSL schemes require privkey and cert + if !pathExists(a.PrivKey) { + return fmt.Errorf("private key file %s not found", a.PrivKey) + } + if !pathExists(a.Cert) { + return fmt.Errorf("certificate file %s not found", a.Cert) + } + + // Client can bootstrap the CA cert from the DB + if err := a.ensureCACert(); err != nil { + return err + } + + // Tell Southbound DB clients (like ovn-controller) + // which certificates to use to talk to the DB. + // Must happen *before* setting the "ovn-remote" + // external-id. + if !a.northbound { + out, err := runOVSVsctl(a.exec, "del-ssl") + if err != nil { + return fmt.Errorf("error deleting ovs-vsctl SSL "+ + "configuration: %q (%v)", out, err) + } + + out, err = runOVSVsctl(a.exec, "set-ssl", a.PrivKey, a.Cert, a.CACert) + if err != nil { + return fmt.Errorf("error setting client southbound DB SSL options: %v\n %q", err, out) + } + } + } + + if !a.northbound { + // store the Southbound Database address in an external id - "external_ids:ovn-remote" + if err := setOVSExternalID(a.exec, "ovn-remote", "\""+a.GetURL()+"\""); err != nil { + return err + } + } + + return nil +} + +func (a *OvnAuthConfig) updateIP(newIPs []string, port string) { + newAddresses := make([]string, 0, len(newIPs)) + for _, ipAddress := range newIPs { + newAddresses = append(newAddresses, fmt.Sprintf("%v:%s", a.Scheme, net.JoinHostPort(ipAddress, port))) + } + a.Address = strings.Join(newAddresses, ",") +} + +// UpdateOVNNodeAuth updates the host and URL in ClientAuth +// for both OvnNorth and OvnSouth. It updates them with the new masterIP. +func UpdateOVNNodeAuth(masterIP []string, southboundDBPort, northboundDBPort string) { + klog.V(5).Infof("Update OVN node auth with new master ip: %s", masterIP) + OvnNorth.updateIP(masterIP, northboundDBPort) + OvnSouth.updateIP(masterIP, southboundDBPort) +} + +// ovnKubeNodeModeSupported validates the provided mode is supported by ovnkube node +func ovnKubeNodeModeSupported(mode string) error { + found := false + supportedModes := []string{types.NodeModeFull, types.NodeModeDPU, types.NodeModeDPUHost} + for _, m := range supportedModes { + if mode == m { + found = true + break + } + } + if !found { + return fmt.Errorf("unexpected ovnkube-node-mode: %s. supported modes: %v", mode, supportedModes) + } + return nil +} + +// buildOvnKubeNodeConfig updates OvnKubeNode config from cli and config file +func buildOvnKubeNodeConfig(ctx *cli.Context, cli, file *config) error { + // Copy config file values over default values + if err := overrideFields(&OvnKubeNode, &file.OvnKubeNode, &savedOvnKubeNode); err != nil { + return err + } + + // And CLI overrides over config file and default values + if err := overrideFields(&OvnKubeNode, &cli.OvnKubeNode, &savedOvnKubeNode); err != nil { + return err + } + + // validate ovnkube-node-mode + if err := ovnKubeNodeModeSupported(OvnKubeNode.Mode); err != nil { + return err + } + + // ovnkube-node-mode dpu/dpu-host does not support hybrid overlay + if OvnKubeNode.Mode != types.NodeModeFull && HybridOverlay.Enabled { + return fmt.Errorf("hybrid overlay is not supported with ovnkube-node mode %s", OvnKubeNode.Mode) + } + + // Warn the user if both MgmtPortNetdev and MgmtPortDPResourceName are specified since they + // configure the management port. + if OvnKubeNode.MgmtPortNetdev != "" && OvnKubeNode.MgmtPortDPResourceName != "" { + klog.Warningf("ovnkube-node-mgmt-port-netdev (%s) and ovnkube-node-mgmt-port-dp-resource-name (%s) "+ + "both specified. The provided netdev in ovnkube-node-mgmt-port-netdev will be overriden by a netdev "+ + "chosen by the resource provided by ovnkube-node-mgmt-port-dp-resource-name.", + OvnKubeNode.MgmtPortNetdev, OvnKubeNode.MgmtPortDPResourceName) + } + + // when DPU is used, management port is always backed by a representor. On the + // host side, it needs to be provided through --ovnkube-node-mgmt-port-netdev. + // On the DPU, it is derrived from the annotation exposed on the host side. + if OvnKubeNode.Mode == types.NodeModeDPU && !(OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "") { + return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must not be provided") + } + if OvnKubeNode.Mode == types.NodeModeDPUHost && OvnKubeNode.MgmtPortNetdev == "" && OvnKubeNode.MgmtPortDPResourceName == "" { + return fmt.Errorf("ovnkube-node-mgmt-port-netdev or ovnkube-node-mgmt-port-dp-resource-name must be provided") + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go new file mode 100644 index 000000000..b27be4d7c --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config/utils.go @@ -0,0 +1,329 @@ +package config + +import ( + "fmt" + "net" + "reflect" + "strconv" + "strings" + + iputils "github.com/containernetworking/plugins/pkg/ip" + utilnet "k8s.io/utils/net" +) + +// HostPort is the object that holds the definition for a host and port tuple +type HostPort struct { + Host *net.IP + Port int32 +} + +// String representation of a HostPort entry +func (hp *HostPort) String() string { + switch { + case hp.Host == nil: + return fmt.Sprintf(":%d", hp.Port) + case hp.Host.To4() != nil: + return fmt.Sprintf("%s:%d", *hp.Host, hp.Port) + default: + return fmt.Sprintf("[%s]:%d", *hp.Host, hp.Port) + } +} + +// CIDRNetworkEntry is the object that holds the definition for a single network CIDR range +type CIDRNetworkEntry struct { + CIDR *net.IPNet + HostSubnetLength int +} + +func (c CIDRNetworkEntry) String() string { + return fmt.Sprintf("%s/%d", c.CIDR.String(), c.HostSubnetLength) +} + +// ParseClusterSubnetEntriesWithDefaults returns the parsed set of +// CIDRNetworkEntries. These entries define a network space by specifying a set +// of CIDR and netmasks the SDN can allocate addresses from including how that +// network space is partitioned for each of the cluster nodes. When no host +// specific prefix length is specified, the provided ones are assumed as +// default. The host specific prefix length is validated to be greater than the +// overall subnet length. When 0 is specified as default host specific prefix +// length, no host specific prefix length is allowed or validated. +func ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd string, ipv4HostLength, ipv6HostLength int) ([]CIDRNetworkEntry, error) { + var parsedClusterList []CIDRNetworkEntry + clusterEntriesList := strings.Split(clusterSubnetCmd, ",") + + ipv4HostLengthAllowed := ipv4HostLength != 0 + ipv6HostLengthAllowed := ipv6HostLength != 0 + + for _, clusterEntry := range clusterEntriesList { + clusterEntry := strings.TrimSpace(clusterEntry) + splitClusterEntry := strings.Split(clusterEntry, "/") + + if len(splitClusterEntry) < 2 || len(splitClusterEntry) > 3 { + return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry) + } + + var err error + var parsedClusterEntry CIDRNetworkEntry + _, parsedClusterEntry.CIDR, err = net.ParseCIDR(fmt.Sprintf("%s/%s", splitClusterEntry[0], splitClusterEntry[1])) + if err != nil { + return nil, err + } + + ipv6 := utilnet.IsIPv6(parsedClusterEntry.CIDR.IP) + hostLengthAllowed := (ipv6 && ipv6HostLengthAllowed) || (!ipv6 && ipv4HostLengthAllowed) + + entryMaskLength, _ := parsedClusterEntry.CIDR.Mask.Size() + if len(splitClusterEntry) == 3 { + if !hostLengthAllowed { + return nil, fmt.Errorf("CIDR %q not properly formatted", clusterEntry) + } + tmp, err := strconv.Atoi(splitClusterEntry[2]) + if err != nil { + return nil, err + } + parsedClusterEntry.HostSubnetLength = tmp + } else { + if ipv6 { + parsedClusterEntry.HostSubnetLength = ipv6HostLength + } else { + // default for backward compatibility + parsedClusterEntry.HostSubnetLength = ipv4HostLength + } + } + + if hostLengthAllowed { + if ipv6 && ipv6HostLengthAllowed && parsedClusterEntry.HostSubnetLength != 64 { + return nil, fmt.Errorf("IPv6 only supports /64 host subnets") + } + + if !ipv6 && parsedClusterEntry.HostSubnetLength > 32 { + return nil, fmt.Errorf("invalid host subnet, IPv4 subnet must be < 32") + } + + if parsedClusterEntry.HostSubnetLength <= entryMaskLength { + return nil, fmt.Errorf("cannot use a host subnet length mask shorter than or equal to the cluster subnet mask. "+ + "host subnet length: %d, cluster subnet length: %d", parsedClusterEntry.HostSubnetLength, entryMaskLength) + } + } + + parsedClusterList = append(parsedClusterList, parsedClusterEntry) + } + + if len(parsedClusterList) == 0 { + return nil, fmt.Errorf("failed to parse any CIDRs from %q", clusterSubnetCmd) + } + + return parsedClusterList, nil +} + +// ParseClusterSubnetEntries returns the parsed set of +// CIDRNetworkEntries. If not specified, it assumes a default host specific +// prefix length of 24 or 64 bits for ipv4 and ipv6 respectively. +func ParseClusterSubnetEntries(clusterSubnetCmd string) ([]CIDRNetworkEntry, error) { + // default to 24 bits host specific prefix length for backward compatibility + return ParseClusterSubnetEntriesWithDefaults(clusterSubnetCmd, 24, 64) +} + +// ParseFlowCollectors returns the parsed set of HostPorts passed by the user on the command line +// These entries define the flow collectors OVS will send flow metadata by using NetFlow/SFlow/IPFIX. +func ParseFlowCollectors(flowCollectors string) ([]HostPort, error) { + var parsedFlowsCollectors []HostPort + readCollectors := map[string]struct{}{} + collectors := strings.Split(flowCollectors, ",") + for _, v := range collectors { + host, port, err := net.SplitHostPort(v) + if err != nil { + return nil, fmt.Errorf("cannot parse hostport: %v", err) + } + var ipp *net.IP + // If the host IP is not provided, we keep it nil and later will assume the Node IP + if host != "" { + ip := net.ParseIP(host) + if ip == nil { + return nil, fmt.Errorf("collector IP %s is not a valid IP", host) + } + ipp = &ip + } + parsedPort, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return nil, fmt.Errorf("collector port %s is not a valid port: %v", port, err) + } + // checking if HostPort entry is duplicate + hostPort := HostPort{Host: ipp, Port: int32(parsedPort)} + hps := hostPort.String() + if _, ok := readCollectors[hps]; ok { + // duplicate flow collector. Ignore it + continue + } + readCollectors[hps] = struct{}{} + parsedFlowsCollectors = append(parsedFlowsCollectors, hostPort) + } + + return parsedFlowsCollectors, nil +} + +type ConfigSubnetType string + +const ( + ConfigSubnetJoin ConfigSubnetType = "built-in join subnet" + ConfigSubnetCluster ConfigSubnetType = "cluster subnet" + ConfigSubnetService ConfigSubnetType = "service subnet" + ConfigSubnetHybrid ConfigSubnetType = "hybrid overlay subnet" + ConfigSubnetMasquerade ConfigSubnetType = "masquerade subnet" + ConfigSubnetTransit ConfigSubnetType = "transit switch subnet" + UserDefinedSubnets ConfigSubnetType = "user defined subnet" + UserDefinedJoinSubnet ConfigSubnetType = "user defined join subnet" +) + +type ConfigSubnet struct { + SubnetType ConfigSubnetType + Subnet *net.IPNet +} + +// ConfigSubnets represents a set of configured subnets (and their names) +type ConfigSubnets struct { + Subnets []ConfigSubnet + V4 map[ConfigSubnetType]bool + V6 map[ConfigSubnetType]bool +} + +// NewConfigSubnets returns a new ConfigSubnets +func NewConfigSubnets() *ConfigSubnets { + return &ConfigSubnets{ + V4: make(map[ConfigSubnetType]bool), + V6: make(map[ConfigSubnetType]bool), + } +} + +// append adds a single subnet to cs +func (cs *ConfigSubnets) Append(subnetType ConfigSubnetType, subnet *net.IPNet) { + cs.Subnets = append(cs.Subnets, ConfigSubnet{SubnetType: subnetType, Subnet: subnet}) + if subnetType == ConfigSubnetCluster || subnetType == ConfigSubnetService || subnetType == ConfigSubnetHybrid { + if utilnet.IsIPv6CIDR(subnet) { + cs.V6[subnetType] = true + } else { + cs.V4[subnetType] = true + } + } +} + +// CheckForOverlaps checks if any of the subnets in cs overlap +func (cs *ConfigSubnets) CheckForOverlaps() error { + for i, si := range cs.Subnets { + for j := 0; j < i; j++ { + sj := cs.Subnets[j] + if si.Subnet.Contains(sj.Subnet.IP) || sj.Subnet.Contains(si.Subnet.IP) { + return fmt.Errorf("illegal network configuration: %s %q overlaps %s %q", + si.SubnetType, si.Subnet.String(), + sj.SubnetType, sj.Subnet.String()) + } + } + } + return nil +} + +func (cs *ConfigSubnets) describeSubnetType(subnetType ConfigSubnetType) string { + ipv4 := cs.V4[subnetType] + ipv6 := cs.V6[subnetType] + var familyType string + switch { + case ipv4 && !ipv6: + familyType = "IPv4" + case !ipv4 && ipv6: + familyType = "IPv6" + case ipv4 && ipv6: + familyType = "dual-stack" + default: + familyType = "unknown type" + } + return familyType + " " + string(subnetType) +} + +// checkIPFamilies determines if cs contains a valid single-stack IPv4 configuration, a +// valid single-stack IPv6 configuration, a valid dual-stack configuration, or none of the +// above. +func (cs *ConfigSubnets) checkIPFamilies() (usingIPv4, usingIPv6 bool, err error) { + if len(cs.V6) == 0 { + // Single-stack IPv4 + return true, false, nil + } else if len(cs.V4) == 0 { + // Single-stack IPv6 + return false, true, nil + } else if reflect.DeepEqual(cs.V4, cs.V6) { + // Dual-stack + return true, true, nil + } + + netConfig := cs.describeSubnetType(ConfigSubnetCluster) + netConfig += ", " + cs.describeSubnetType(ConfigSubnetService) + if cs.V4[ConfigSubnetHybrid] || cs.V6[ConfigSubnetHybrid] { + netConfig += ", " + cs.describeSubnetType(ConfigSubnetHybrid) + } + + return false, false, fmt.Errorf("illegal network configuration: %s", netConfig) +} + +// masqueradeIP represents the masqueradeIPs used by the masquerade subnets for host to service traffic +type MasqueradeIPsConfig struct { + V4OVNMasqueradeIP net.IP + V6OVNMasqueradeIP net.IP + V4HostMasqueradeIP net.IP + V6HostMasqueradeIP net.IP + V4HostETPLocalMasqueradeIP net.IP + V6HostETPLocalMasqueradeIP net.IP + V4DummyNextHopMasqueradeIP net.IP + V6DummyNextHopMasqueradeIP net.IP + V4OVNServiceHairpinMasqueradeIP net.IP + V6OVNServiceHairpinMasqueradeIP net.IP +} + +// allocateV4/6MasqueradeIPs allocates the masqueradeIPs based off of the passed in masqueradeSubnet (.0) +// it does this by cascading down from the initial ip down to the .5 currently (more masqueradeIps may be added in the future) + +func AllocateV4MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error { + masqueradeIPs.V4OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress) + if masqueradeIPs.V4OVNMasqueradeIP == nil { + return fmt.Errorf("error setting V4OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress) + } + masqueradeIPs.V4HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V4OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down + if masqueradeIPs.V4HostMasqueradeIP == nil { + return fmt.Errorf("error setting V4HostMasqueradeIP: %s", masqueradeIPs.V4OVNMasqueradeIP) + } + masqueradeIPs.V4HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostMasqueradeIP) + if masqueradeIPs.V4HostETPLocalMasqueradeIP == nil { + return fmt.Errorf("error setting V4HostETPLocalMasqueradeIP: %s", masqueradeIPs.V4HostMasqueradeIP) + } + masqueradeIPs.V4DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V4HostETPLocalMasqueradeIP) + if masqueradeIPs.V4DummyNextHopMasqueradeIP == nil { + return fmt.Errorf("error setting V4DummyNextHopMasqueradeIP: %s", masqueradeIPs.V4HostETPLocalMasqueradeIP) + } + masqueradeIPs.V4OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V4DummyNextHopMasqueradeIP) + if masqueradeIPs.V4OVNServiceHairpinMasqueradeIP == nil { + return fmt.Errorf("error setting V4OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V4DummyNextHopMasqueradeIP) + } + return nil +} + +func AllocateV6MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIPs *MasqueradeIPsConfig) error { + masqueradeIPs.V6OVNMasqueradeIP = iputils.NextIP(masqueradeSubnetNetworkAddress) + if masqueradeIPs.V6OVNMasqueradeIP == nil { + return fmt.Errorf("error setting V6OVNMasqueradeIP: %s", masqueradeSubnetNetworkAddress) + } + masqueradeIPs.V6HostMasqueradeIP = iputils.NextIP(masqueradeIPs.V6OVNMasqueradeIP) //using the last set ip we can cascade from the .0 down + if masqueradeIPs.V6HostMasqueradeIP == nil { + return fmt.Errorf("error setting V6HostMasqueradeIP: %s", masqueradeIPs.V6OVNMasqueradeIP) + } + masqueradeIPs.V6HostETPLocalMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostMasqueradeIP) + if masqueradeIPs.V6HostETPLocalMasqueradeIP == nil { + return fmt.Errorf("error setting V6HostETPLocalMasqueradeIP: %s", masqueradeIPs.V6HostMasqueradeIP) + } + masqueradeIPs.V6DummyNextHopMasqueradeIP = iputils.NextIP(masqueradeIPs.V6HostETPLocalMasqueradeIP) + if masqueradeIPs.V6DummyNextHopMasqueradeIP == nil { + return fmt.Errorf("error setting V6DummyNextHopMasqueradeIP: %s", masqueradeIPs.V6HostETPLocalMasqueradeIP) + } + masqueradeIPs.V6OVNServiceHairpinMasqueradeIP = iputils.NextIP(masqueradeIPs.V6DummyNextHopMasqueradeIP) + if masqueradeIPs.V6OVNServiceHairpinMasqueradeIP == nil { + return fmt.Errorf("error setting V6OVNServiceHairpinMasqueradeIP: %s", masqueradeIPs.V6DummyNextHopMasqueradeIP) + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go new file mode 100644 index 000000000..9924c9b1d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand/cryptorand.go @@ -0,0 +1,47 @@ +package cryptorand + +import ( + "crypto/rand" + "encoding/binary" + "k8s.io/klog/v2" + "math/big" +) + +func Intn(n int64) uint64 { + val := new(big.Int).SetInt64(n) + randNum, err := rand.Int(rand.Reader, val) + if err != nil { + klog.Errorf("Error generating random number using crypto/rand : %v", err) + return 0 + } + return randNum.Uint64() +} + +func Uint32() uint32 { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err) + return 0 + } + return binary.LittleEndian.Uint32(b) +} + +func Uint64() uint64 { + b := make([]byte, 8) + _, err := rand.Read(b) + if err != nil { + klog.Errorf("Error reading bytes for random number generation using crypto/rand: %v", err) + return 0 + } + return binary.LittleEndian.Uint64(b) +} + +func Read(randBytes []byte) []byte { + _, err := rand.Read(randBytes) + if err != nil { + klog.Errorf("Error reading bytes using crypto/rand: %v", err) + return nil + } + return randBytes +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go new file mode 100644 index 000000000..3d6e81514 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/acl.go @@ -0,0 +1,181 @@ +package ops + +import ( + "context" + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// GetACLName returns the ACL name if it has one otherwise returns +// an empty string. +func GetACLName(acl *nbdb.ACL) string { + if acl.Name != nil { + return *acl.Name + } + return "" +} + +func getACLMutableFields(acl *nbdb.ACL) []interface{} { + return []interface{}{&acl.Action, &acl.Direction, &acl.ExternalIDs, &acl.Log, &acl.Match, &acl.Meter, + &acl.Name, &acl.Options, &acl.Priority, &acl.Severity, &acl.Tier, &acl.SampleNew, &acl.SampleEst} +} + +type aclPredicate func(*nbdb.ACL) bool + +// FindACLsWithPredicate looks up ACLs from the cache based on a given predicate +func FindACLsWithPredicate(nbClient libovsdbclient.Client, p aclPredicate) ([]*nbdb.ACL, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + acls := []*nbdb.ACL{} + err := nbClient.WhereCache(p).List(ctx, &acls) + return acls, err +} + +func FindACLs(nbClient libovsdbclient.Client, acls []*nbdb.ACL) ([]*nbdb.ACL, error) { + opModels := make([]operationModel, 0, len(acls)) + foundACLs := make([]*nbdb.ACL, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + found := []*nbdb.ACL{} + opModel := operationModel{ + Model: acl, + ExistingResult: &found, + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { + if len(found) > 0 { + foundACLs = append(foundACLs, found[0]) + } + }, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + err := modelClient.Lookup(opModels...) + return foundACLs, err +} + +// BuildACL builds an ACL with empty optional properties unset +func BuildACL(name string, direction nbdb.ACLDirection, priority int, match string, action nbdb.ACLAction, meter string, + severity nbdb.ACLSeverity, log bool, externalIds map[string]string, options map[string]string, tier int) *nbdb.ACL { + name = fmt.Sprintf("%.63s", name) + + var realName *string + var realMeter *string + var realSeverity *string + if len(name) != 0 { + realName = &name + } + if len(meter) != 0 { + realMeter = &meter + } + if len(severity) != 0 { + realSeverity = &severity + } + acl := &nbdb.ACL{ + Name: realName, + Direction: direction, + Match: match, + Action: action, + Priority: priority, + Severity: realSeverity, + Log: log, + Meter: realMeter, + ExternalIDs: externalIds, + Options: options, + Tier: tier, + } + + return acl +} + +func SetACLLogging(acl *nbdb.ACL, severity nbdb.ACLSeverity, log bool) { + var realSeverity *string + if len(severity) != 0 { + realSeverity = &severity + } + acl.Severity = realSeverity + acl.Log = log +} + +// CreateOrUpdateACLsOps creates or updates the provided ACLs returning the +// corresponding ops +func CreateOrUpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + // ensure names are truncated (let's cover our bases from snippets that don't call BuildACL and call this directly) + if acl.Name != nil { + // node ACLs won't have names set + *acl.Name = fmt.Sprintf("%.63s", *acl.Name) + } + opModels = addSample(samplingConfig, opModels, acl) + opModel := operationModel{ + Model: acl, + OnModelUpdates: getACLMutableFields(acl), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + opModel := operationModel{ + Model: acl, + OnModelUpdates: getACLMutableFields(acl), + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdateACLs creates or updates the provided ACLs +func CreateOrUpdateACLs(nbClient libovsdbclient.Client, samplingConfig *SamplingConfig, acls ...*nbdb.ACL) error { + ops, err := CreateOrUpdateACLsOps(nbClient, nil, samplingConfig, acls...) + if err != nil { + return err + } + + _, err = TransactAndCheckAndSetUUIDs(nbClient, acls, ops) + return err +} + +// UpdateACLsLoggingOps updates the log and severity on the provided ACLs and +// returns the corresponding ops +func UpdateACLsLoggingOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(acls)) + for i := range acls { + // can't use i in the predicate, for loop replaces it in-memory + acl := acls[i] + opModel := operationModel{ + Model: acl, + OnModelUpdates: []interface{}{&acl.Severity, &acl.Log}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go new file mode 100644 index 000000000..920bde95f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/address_set.go @@ -0,0 +1,231 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type addressSetPredicate func(*nbdb.AddressSet) bool + +// getNonZeroAddressSetMutableFields builds a list of address set +// mutable fields with non zero values to be used as the list of fields to +// Update. +// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices +// as default and thus being filtered out of the update. The intention is to +// use non-nil empty maps/slices to clear them out in the update. +// See: https://github.com/ovn-org/libovsdb/issues/226 +func getNonZeroAddressSetMutableFields(as *nbdb.AddressSet) []interface{} { + fields := []interface{}{} + if as.Addresses != nil { + fields = append(fields, &as.Addresses) + } + if as.ExternalIDs != nil { + fields = append(fields, &as.ExternalIDs) + } + return fields +} + +// FindAddressSetsWithPredicate looks up address sets from the cache based on a +// given predicate +func FindAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) ([]*nbdb.AddressSet, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.AddressSet{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetAddressSet looks up an address sets from the cache +func GetAddressSet(nbClient libovsdbclient.Client, as *nbdb.AddressSet) (*nbdb.AddressSet, error) { + found := []*nbdb.AddressSet{} + opModel := operationModel{ + Model: as, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateAddressSetsOps creates the create-ops for the provided address sets +func CreateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: onModelUpdatesNone(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateAddressSets creates the provided address sets +func CreateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + ops, err := CreateAddressSetsOps(nbClient, nil, addrSets...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func CreateOrUpdateAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: getNonZeroAddressSetMutableFields(as), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdateAddressSets creates or updates the provided address sets +func CreateOrUpdateAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + ops, err := CreateOrUpdateAddressSetsOps(nbClient, nil, addrSets...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// UpdateAddressSetsAddresses updates the Addresses on the provided address sets +func UpdateAddressSetsAddresses(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + OnModelUpdates: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + return err +} + +// AddAddressesToAddressSetOps adds the provided addresses to the provided address set and +// returns the corresponding ops +func AddAddressesToAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) { + originalAddresses := as.Addresses + as.Addresses = addresses + opModel := operationModel{ + Model: as, + OnModelMutations: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + as.Addresses = originalAddresses + return ops, err +} + +// DeleteAddressesFromAddressSetOps removes the provided addresses from the provided address +// set and returns the corresponding ops +func DeleteAddressesFromAddressSetOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, as *nbdb.AddressSet, addresses ...string) ([]libovsdb.Operation, error) { + originalAddresses := as.Addresses + as.Addresses = addresses + opModel := operationModel{ + Model: as, + OnModelMutations: []interface{}{&as.Addresses}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModel) + as.Addresses = originalAddresses + return ops, err +} + +func DeleteAddressSetsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, addrSets ...*nbdb.AddressSet) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteAddressSets deletes the provided address sets +func DeleteAddressSets(nbClient libovsdbclient.Client, addrSets ...*nbdb.AddressSet) error { + opModels := make([]operationModel, 0, len(addrSets)) + for i := range addrSets { + as := addrSets[i] + opModel := operationModel{ + Model: as, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// DeleteAddressSetsWithPredicateOps returns the ops to delete address sets based on a given predicate +func DeleteAddressSetsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p addressSetPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.AddressSet{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &deleted, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteAddressSetsWithPredicate looks up address sets from the cache based on +// a given predicate and deletes them +func DeleteAddressSetsWithPredicate(nbClient libovsdbclient.Client, p addressSetPredicate) error { + ops, err := DeleteAddressSetsWithPredicateOps(nbClient, nil, p) + if err != nil { + return nil + } + _, err = TransactAndCheck(nbClient, ops) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go new file mode 100644 index 000000000..3cc17b64f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/chassis.go @@ -0,0 +1,166 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// ListChassis looks up all chassis from the cache +func ListChassis(sbClient libovsdbclient.Client) ([]*sbdb.Chassis, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + searchedChassis := []*sbdb.Chassis{} + err := sbClient.List(ctx, &searchedChassis) + return searchedChassis, err +} + +// ListChassisPrivate looks up all chassis private models from the cache +func ListChassisPrivate(sbClient libovsdbclient.Client) ([]*sbdb.ChassisPrivate, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*sbdb.ChassisPrivate{} + err := sbClient.List(ctx, &found) + return found, err +} + +// GetChassis looks up a chassis from the cache using the 'Name' column which is an indexed +// column. +func GetChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis) (*sbdb.Chassis, error) { + found := []*sbdb.Chassis{} + opModel := operationModel{ + Model: chassis, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// DeleteChassis deletes the provided chassis and associated private chassis +func DeleteChassis(sbClient libovsdbclient.Client, chassis ...*sbdb.Chassis) error { + opModels := make([]operationModel, 0, len(chassis)) + for i := range chassis { + foundChassis := []*sbdb.Chassis{} + chassisPrivate := sbdb.ChassisPrivate{ + Name: chassis[i].Name, + } + chassisUUID := "" + opModel := []operationModel{ + { + Model: chassis[i], + ExistingResult: &foundChassis, + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { + if len(foundChassis) > 0 { + chassisPrivate.Name = foundChassis[0].Name + chassisUUID = foundChassis[0].UUID + } + }, + }, + { + Model: &chassisPrivate, + ErrNotFound: false, + BulkOp: false, + }, + // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups + // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty + { + Model: &sbdb.IGMPGroup{}, + ModelPredicate: func(group *sbdb.IGMPGroup) bool { + return group.Chassis != nil && chassisUUID != "" && *group.Chassis == chassisUUID + }, + ErrNotFound: false, + BulkOp: true, + }, + } + opModels = append(opModels, opModel...) + } + + m := newModelClient(sbClient) + err := m.Delete(opModels...) + return err +} + +type chassisPredicate func(*sbdb.Chassis) bool + +// DeleteChassisWithPredicate looks up chassis from the cache based on a given +// predicate and deletes them as well as the associated private chassis +func DeleteChassisWithPredicate(sbClient libovsdbclient.Client, p chassisPredicate) error { + foundChassis := []*sbdb.Chassis{} + foundChassisNames := sets.NewString() + foundChassisUUIDS := sets.NewString() + opModels := []operationModel{ + { + Model: &sbdb.Chassis{}, + ModelPredicate: p, + ExistingResult: &foundChassis, + ErrNotFound: false, + BulkOp: true, + DoAfter: func() { + for _, chassis := range foundChassis { + foundChassisNames.Insert(chassis.Name) + foundChassisUUIDS.Insert(chassis.UUID) + } + }, + }, + { + Model: &sbdb.ChassisPrivate{}, + ModelPredicate: func(item *sbdb.ChassisPrivate) bool { return foundChassisNames.Has(item.Name) }, + ErrNotFound: false, + BulkOp: true, + }, + // IGMPGroup has a weak link to chassis, deleting multiple chassis may result in IGMP_Groups + // with identical values on columns "address", "datapath", and "chassis", when "chassis" goes empty + { + Model: &sbdb.IGMPGroup{}, + ModelPredicate: func(group *sbdb.IGMPGroup) bool { return group.Chassis != nil && foundChassisUUIDS.Has(*group.Chassis) }, + ErrNotFound: false, + BulkOp: true, + }, + } + m := newModelClient(sbClient) + err := m.Delete(opModels...) + return err +} + +// CreateOrUpdateChassis creates or updates the chassis record along with the encap record +func CreateOrUpdateChassis(sbClient libovsdbclient.Client, chassis *sbdb.Chassis, encap *sbdb.Encap) error { + m := newModelClient(sbClient) + opModels := []operationModel{ + { + Model: encap, + DoAfter: func() { + chassis.Encaps = []string{encap.UUID} + }, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + }, + { + Model: chassis, + OnModelMutations: []interface{}{&chassis.OtherConfig}, + OnModelUpdates: []interface{}{&chassis.Encaps}, + ErrNotFound: false, + BulkOp: false, + }, + } + + if _, err := m.CreateOrUpdate(opModels...); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go new file mode 100644 index 000000000..dac95c5c0 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/copp.go @@ -0,0 +1,47 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type coppPredicate func(*nbdb.Copp) bool + +// CreateOrUpdateCOPPsOps creates or updates the provided COPP returning the +// corresponding ops +func CreateOrUpdateCOPPsOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, copps ...*nbdb.Copp) ([]ovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(copps)) + for i := range copps { + // can't use i in the predicate, for loop replaces it in-memory + copp := copps[i] + opModel := operationModel{ + Model: copp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteCOPPsOps deletes the provided COPPs found using the predicate, returning the +// corresponding ops +func DeleteCOPPsWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, p coppPredicate) ([]ovsdb.Operation, error) { + copp := nbdb.Copp{} + opModels := []operationModel{ + { + Model: &copp, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + }, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go new file mode 100644 index 000000000..b242d7d3e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_ids.go @@ -0,0 +1,332 @@ +package ops + +import ( + "fmt" + "strings" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +type dbObjType int +type ownerType = string +type ExternalIDKey string + +func (key ExternalIDKey) String() string { + return string(key) +} + +// ObjectIDsType defines which ExternalIDs are used to indentify db objects. +// ExternalIDs are defined based on dbObjType and ownerType, e.g. default network controller creates address +// sets for namespaces and network policies, and needs to use different sets of ids for them. So it will +// create ObjectIDsType with the same dbObjType=addressSet, but different ownerTypes NamespaceOwnerType and +// NetworkPolicyOwnerType. Then it can define a set of ExternalIDs that will be used for each type. +// From the db perspective, dbObjType is identified based on the db table, and ownerType is used directly +// in the ExternalIDs with OwnerTypeKey key. +type ObjectIDsType struct { + dbTable dbObjType + ownerObjectType ownerType + // externalIDKeys is a slice, because primary id for given ObjectIDsType will be built from the + // ExternalIDKey values in given order + externalIDKeys []ExternalIDKey + externalIDsMap map[ExternalIDKey]bool +} + +func (it ObjectIDsType) GetExternalIDKeys() []ExternalIDKey { + return it.externalIDKeys +} + +func (it ObjectIDsType) HasKey(key ExternalIDKey) bool { + return it.externalIDsMap[key] +} + +func (it ObjectIDsType) IsSameType(it2 *ObjectIDsType) bool { + return it.ownerObjectType == it2.ownerObjectType && it.dbTable == it2.dbTable +} + +const ( + // ExternalIDs keys that will be a part of a client index. + // OwnerControllerKey and OwnerTypeKey define managing entity (a.k.a. owner) for given db object. + // All the other ids are object-related. + // PrimaryIDKey will be used a primary client index. + // A combination of OwnerControllerKey, OwnerTypeKey, and ObjectNameKey will be used a secondary client index. + // While owner-related keys together with PrimaryIDKey will always be present in the ExternalIDs, + // ObjectNameKey may or may not be used, based on ObjectIDsType. + OwnerControllerKey ExternalIDKey = types.OvnK8sPrefix + "/owner-controller" + OwnerTypeKey ExternalIDKey = types.OvnK8sPrefix + "/owner-type" + // ObjectNameKey is a part of a secondary index, together with OwnerControllerKey and OwnerTypeKey + // May be used by controllers to store e.g. namespace+name of the object. + ObjectNameKey ExternalIDKey = types.OvnK8sPrefix + "/name" + // PrimaryIDKey will be used as a primary index, that is unique for every db object, + // and can be built based on the combination of all the other ids. + PrimaryIDKey ExternalIDKey = types.PrimaryIDKey +) + +// ObjectNameKey may be used as a secondary ID in the future. To ensure easy filtering for namespaced +// objects, you can combine namespace and name in that key. To unify this process (and potential parsing of the key) +// the following 2 functions exist: +// - BuildNamespaceNameKey to combine namespace and name into one key +// - ParseNamespaceNameKey to split the key back into namespace and name + +func BuildNamespaceNameKey(namespace, name string) string { + return namespace + ":" + name +} + +func ParseNamespaceNameKey(key string) (namespace, name string, err error) { + s := strings.Split(key, ":") + if len(s) != 2 { + err = fmt.Errorf("failed to parse namespaced name key %v, expected format :", key) + return + } + return s[0], s[1], nil +} + +// dbIDsMap is used to make sure the same ownerType is not defined twice for the same dbObjType to avoid conflicts. +// It is filled in newObjectIDsType when registering new ObjectIDsType +var dbIDsMap = map[dbObjType]map[ownerType]bool{} + +func newObjectIDsType(dbTable dbObjType, ownerObjectType ownerType, keys []ExternalIDKey) *ObjectIDsType { + if dbIDsMap[dbTable][ownerObjectType] { + panic(fmt.Sprintf("ObjectIDsType for params %v %v is already registered", dbTable, ownerObjectType)) + } + if dbIDsMap[dbTable] == nil { + dbIDsMap[dbTable] = map[ownerType]bool{} + } + dbIDsMap[dbTable][ownerObjectType] = true + keysMap := map[ExternalIDKey]bool{} + for _, key := range keys { + keysMap[key] = true + } + return &ObjectIDsType{dbTable, ownerObjectType, keys, keysMap} +} + +// DbObjectIDs is a structure representing a set of db object ExternalIDs, used to identify +// an object in the db (as a primary/secondary index) or for a predicate search. +// DbObjectIDs consists of 3 parts: +// - idsType defines which IDs are used for a given object, as an ObjectIDsType, +// idsType.ownerObjectType will be written to ExternalIDs[OwnerTypeKey] +// - ownerControllerName defines who manages given object. It is required in case there are more than 1 controllers +// using the same idsType to make sure every controller only updates objects it owns. +// - objectIDs provide values for keys that are used by given idsType. To create a new object, all fields should be set. +// For predicate search, only some values that need to be matched may be set. +// +// dbIndex := NewDbObjectIDs(AddressSetEgressFirewallDNS, "DefaultController", +// map[ExternalIDKey]string{ +// ObjectNameKey: "dns.name", +// IPFamilyKey: "ipv4" +// }) +// +// uses AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{ +// // dnsName +// ObjectNameKey, +// IPFamilyKey, +// }) +// +// its dbIndex will be mapped to the following ExternalIDs +// +// { +// "k8s.ovn.org/owner-controller": "DefaultController" +// "k8s.ovn.org/owner-type": "EgressFirewallDNS" (value of EgressFirewallDNSOwnerType) +// "k8s.ovn.org/name": "dns.name" +// "k8s.ovn.org/ipFamily": "ipv4" +// "k8s.ovn.org/id": "DefaultController:EgressFirewallDNS:dns.name:ipv4" +// } +type DbObjectIDs struct { + idsType *ObjectIDsType + // ownerControllerName specifies which controller owns the object. + // Controller should only change objects it owns, make sure to always set this field. + ownerControllerName string + // objectIDs store values for keys required by given ObjectIDsType, and may be different for different ObjectIDsType. + // These ids should uniquely identify db object with the same ownerControllerName and OwnerTypeKey. + objectIDs map[ExternalIDKey]string +} + +// NewDbObjectIDs is used to construct DbObjectIDs, idsType and controller are always required, +// objectIds may be empty, or half-filled for predicate search. +// objectIds keys that are not used by given idsType will cause panic. +func NewDbObjectIDs(idsType *ObjectIDsType, controller string, objectIds map[ExternalIDKey]string) *DbObjectIDs { + if controller == "" { + panic("NewDbObjectIDs failed: controller should not be empty") + } + externalIDKeys := idsType.GetExternalIDKeys() + if externalIDKeys == nil { + // can only happen if ObjectIDsType{} is passed + panic(fmt.Sprintf("NewDbObjectIDs failed: ObjectIDsType %v should not be empty", idsType)) + } + // only use values for keys from idsType + for key := range objectIds { + if !idsType.HasKey(key) { + panic(fmt.Sprintf("NewDbObjectIDs failed: key %v is unknown", key)) + } + } + if objectIds == nil { + objectIds = map[ExternalIDKey]string{} + } + objectIDs := &DbObjectIDs{ + idsType: idsType, + ownerControllerName: controller, + objectIDs: objectIds, + } + return objectIDs +} + +// AddIDs creates new DbObjectIDs with the additional extraObjectIds. +// If at least one of extraObjectIds keys is not used by the objectIDs.idsType it will cause panic. +func (objectIDs *DbObjectIDs) AddIDs(extraObjectIds map[ExternalIDKey]string) *DbObjectIDs { + ids := deepcopyMap(objectIDs.objectIDs) + for key, value := range extraObjectIds { + ids[key] = value + } + return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids} +} + +func (objectIDs *DbObjectIDs) RemoveIDs(idsToDelete ...ExternalIDKey) *DbObjectIDs { + ids := deepcopyMap(objectIDs.objectIDs) + for _, keyToDel := range idsToDelete { + delete(ids, keyToDel) + } + return &DbObjectIDs{objectIDs.idsType, objectIDs.ownerControllerName, ids} +} + +func (objectIDs *DbObjectIDs) HasSameOwner(ownerController string, objectIDsType *ObjectIDsType) bool { + return objectIDs.ownerControllerName == ownerController && objectIDs.idsType.IsSameType(objectIDsType) +} + +func (objectIDs *DbObjectIDs) GetUnsetKeys() []ExternalIDKey { + unsetKeys := []ExternalIDKey{} + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + if _, ok := objectIDs.objectIDs[key]; !ok { + unsetKeys = append(unsetKeys, key) + } + } + return unsetKeys +} + +// GetObjectID returns value from objectIDs.objectIDs map, and empty string for not found values. +// Usually objectIDs.objectIDs doesn't include PrimaryIDKey, OwnerTypeKey, and OwnerControllerKey. +func (objectIDs *DbObjectIDs) GetObjectID(key ExternalIDKey) string { + return objectIDs.objectIDs[key] +} + +// GetExternalIDs should only be used to build ids before creating the new db object. +// If at least one of required by DbObjectIDs.idsType keys is not present in the DbObjectIDs.objectIDs it will panic. +// GetExternalIDs returns a map of ids, that always includes keys +// - OwnerControllerKey +// - OwnerTypeKey +// - PrimaryIDKey +// and also all keys that are preset in objectIDs.objectIDs. +// PrimaryIDKey value consists of the following values joined with ":" +// - objectIDs.ownerControllerName +// - objectIDs.idsType.ownerObjectType +// - values from DbObjectIDs.objectIDs are added in order set in ObjectIDsType.externalIDKeys +func (objectIDs *DbObjectIDs) GetExternalIDs() map[string]string { + return objectIDs.getExternalIDs(false) +} + +func (objectIDs *DbObjectIDs) getExternalIDs(allowEmptyKeys bool) map[string]string { + externalIDs := map[string]string{ + OwnerControllerKey.String(): objectIDs.ownerControllerName, + OwnerTypeKey.String(): objectIDs.idsType.ownerObjectType, + } + for key, value := range objectIDs.objectIDs { + externalIDs[key.String()] = value + } + primaryID, err := objectIDs.getUniqueID() + if err == nil { + // err == nil => primary id was properly built + externalIDs[PrimaryIDKey.String()] = primaryID + } else if !allowEmptyKeys { + panic(fmt.Sprintf("Failed to build Primary ID for %+v: %v", objectIDs, err)) + } + return externalIDs +} + +// String returns a string that is similar to PrimaryIDKey value, but if some required keys are not present +// in the DbObjectIDs.objectIDs, they will be replaced with empty strings. +// String returns the representation of all the information set in DbObjectIDs. +func (objectIDs *DbObjectIDs) String() string { + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + id += ":" + objectIDs.objectIDs[key] + } + return id +} + +func (objectIDs *DbObjectIDs) GetIDsType() *ObjectIDsType { + return objectIDs.idsType +} + +// getUniqueID returns primary id that is build based on objectIDs values. +// If at least one required key is missing, an error will be returned. +func (objectIDs *DbObjectIDs) getUniqueID() (string, error) { + id := objectIDs.ownerControllerName + ":" + objectIDs.idsType.ownerObjectType + for _, key := range objectIDs.idsType.GetExternalIDKeys() { + value, ok := objectIDs.objectIDs[key] + if !ok { + return "", fmt.Errorf("key %v is required but not present", key) + } + id += ":" + value + } + return id, nil +} + +// NewDbObjectIDsFromExternalIDs is used to parse object ExternalIDs, it sets DbObjectIDs.ownerControllerName based +// on OwnerControllerKey key, and verifies OwnerControllerKey value matches given objectIDsType. +// All the other ids from objectIDsType will be set to DbObjectIDs.objectIDs. +func NewDbObjectIDsFromExternalIDs(objectIDsType *ObjectIDsType, externalIDs map[string]string) (*DbObjectIDs, error) { + if externalIDs[OwnerTypeKey.String()] != objectIDsType.ownerObjectType { + return nil, fmt.Errorf("expected ExternalID %s to equal %s, got %s", + OwnerTypeKey, objectIDsType.ownerObjectType, externalIDs[OwnerTypeKey.String()]) + } + if externalIDs[OwnerControllerKey.String()] == "" { + return nil, fmt.Errorf("required ExternalID %s is empty", OwnerControllerKey) + } + objIDs := map[ExternalIDKey]string{} + for key, value := range externalIDs { + if objectIDsType.HasKey(ExternalIDKey(key)) { + objIDs[ExternalIDKey(key)] = value + } + } + return NewDbObjectIDs(objectIDsType, externalIDs[OwnerControllerKey.String()], objIDs), nil +} + +// hasExternalIDs interface should only include types that use new ExternalIDs from DbObjectIDs. +type hasExternalIDs interface { + GetExternalIDs() map[string]string +} + +// GetNoOwnerPredicate should only be used on initial sync when switching to new ExternalIDs. +// Otherwise, use GetPredicate with the specific OwnerControllerKey id. +func GetNoOwnerPredicate[T hasExternalIDs]() func(item T) bool { + return func(item T) bool { + return item.GetExternalIDs()[OwnerControllerKey.String()] == "" + } +} + +// GetPredicate returns a predicate to search for db obj of type nbdbT. +// Only non-empty ids will be matched (that always includes DbObjectIDs.OwnerTypeKey and DbObjectIDs.ownerControllerName), +// but the other IDs may be empty and will be ignored in the filtering, additional filter function f may be passed, or set +// to nil. +func GetPredicate[nbdbT hasExternalIDs](objectIDs *DbObjectIDs, f func(item nbdbT) bool) func(item nbdbT) bool { + predicateIDs := objectIDs.getExternalIDs(true) + if primaryID, ok := predicateIDs[PrimaryIDKey.String()]; ok { + // when primary id is set, other ids are not required + predicateIDs = map[string]string{PrimaryIDKey.String(): primaryID} + } + return func(item nbdbT) bool { + dbExternalIDs := item.GetExternalIDs() + for predKey, predValue := range predicateIDs { + if dbExternalIDs[predKey] != predValue { + return false + } + } + return f == nil || f(item) + } +} + +func deepcopyMap(m map[ExternalIDKey]string) map[ExternalIDKey]string { + result := map[ExternalIDKey]string{} + for key, value := range m { + result[key] = value + } + return result +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go new file mode 100644 index 000000000..9f0e8dfe8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/db_object_types.go @@ -0,0 +1,328 @@ +package ops + +import "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + +const ( + addressSet dbObjType = iota + acl + dhcpOptions + portGroup + logicalRouterPolicy + qos +) + +const ( + // owner types + EgressFirewallDNSOwnerType ownerType = "EgressFirewallDNS" + EgressFirewallOwnerType ownerType = "EgressFirewall" + EgressQoSOwnerType ownerType = "EgressQoS" + AdminNetworkPolicyOwnerType ownerType = "AdminNetworkPolicy" + BaselineAdminNetworkPolicyOwnerType ownerType = "BaselineAdminNetworkPolicy" + // NetworkPolicyOwnerType is deprecated for address sets, should only be used for sync. + // New owner of network policy address sets, is PodSelectorOwnerType. + NetworkPolicyOwnerType ownerType = "NetworkPolicy" + NetpolDefaultOwnerType ownerType = "NetpolDefault" + PodSelectorOwnerType ownerType = "PodSelector" + NamespaceOwnerType ownerType = "Namespace" + // HybridNodeRouteOwnerType is transferred from egressgw to apbRoute controller with the same dbIDs + HybridNodeRouteOwnerType ownerType = "HybridNodeRoute" + EgressIPOwnerType ownerType = "EgressIP" + EgressServiceOwnerType ownerType = "EgressService" + MulticastNamespaceOwnerType ownerType = "MulticastNS" + MulticastClusterOwnerType ownerType = "MulticastCluster" + NetpolNodeOwnerType ownerType = "NetpolNode" + NetpolNamespaceOwnerType ownerType = "NetpolNamespace" + VirtualMachineOwnerType ownerType = "VirtualMachine" + // NetworkPolicyPortIndexOwnerType is the old version of NetworkPolicyOwnerType, kept for sync only + NetworkPolicyPortIndexOwnerType ownerType = "NetworkPolicyPortIndexOwnerType" + // ClusterOwnerType means the object is cluster-scoped and doesn't belong to any k8s objects + ClusterOwnerType ownerType = "Cluster" + // UDNIsolationOwnerType means the object is needed to implement UserDefinedNetwork isolation + UDNIsolationOwnerType ownerType = "UDNIsolation" + + // owner extra IDs, make sure to define only 1 ExternalIDKey for every string value + PriorityKey ExternalIDKey = "priority" + PolicyDirectionKey ExternalIDKey = "direction" + GressIdxKey ExternalIDKey = "gress-index" + IPFamilyKey ExternalIDKey = "ip-family" + TypeKey ExternalIDKey = "type" + IpKey ExternalIDKey = "ip" + PortPolicyIndexKey ExternalIDKey = "port-policy-index" + IpBlockIndexKey ExternalIDKey = "ip-block-index" + RuleIndex ExternalIDKey = "rule-index" + CIDRKey ExternalIDKey = types.OvnK8sPrefix + "/cidr" + PortPolicyProtocolKey ExternalIDKey = "port-policy-protocol" +) + +// ObjectIDsTypes should only be created here + +var AddressSetAdminNetworkPolicy = newObjectIDsType(addressSet, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // anp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetBaselineAdminNetworkPolicy = newObjectIDsType(addressSet, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // banp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetEgressFirewallDNS = newObjectIDsType(addressSet, EgressFirewallDNSOwnerType, []ExternalIDKey{ + // dnsName + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetHybridNodeRoute = newObjectIDsType(addressSet, HybridNodeRouteOwnerType, []ExternalIDKey{ + // nodeName + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressQoS = newObjectIDsType(addressSet, EgressQoSOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // egress qos priority + PriorityKey, + IPFamilyKey, +}) + +var AddressSetPodSelector = newObjectIDsType(addressSet, PodSelectorOwnerType, []ExternalIDKey{ + // pod selector string representation + ObjectNameKey, + IPFamilyKey, +}) + +// deprecated, should only be used for sync +var AddressSetNetworkPolicy = newObjectIDsType(addressSet, NetworkPolicyOwnerType, []ExternalIDKey{ + // namespace_name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + IPFamilyKey, +}) + +var AddressSetNamespace = newObjectIDsType(addressSet, NamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressIP = newObjectIDsType(addressSet, EgressIPOwnerType, []ExternalIDKey{ + // cluster-wide address set name + ObjectNameKey, + IPFamilyKey, +}) + +var AddressSetEgressService = newObjectIDsType(addressSet, EgressServiceOwnerType, []ExternalIDKey{ + // cluster-wide address set name + ObjectNameKey, + IPFamilyKey, +}) + +var ACLAdminNetworkPolicy = newObjectIDsType(acl, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // anp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + // gress rule's peer port's protocol index + PortPolicyProtocolKey, +}) + +var ACLBaselineAdminNetworkPolicy = newObjectIDsType(acl, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // banp name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule's index + GressIdxKey, + // gress rule's peer port's protocol index + PortPolicyProtocolKey, +}) + +var ACLNetpolDefault = newObjectIDsType(acl, NetpolDefaultOwnerType, []ExternalIDKey{ + // for now there is only 1 acl of this type, but we use a name in case more types are needed in the future + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLMulticastNamespace = newObjectIDsType(acl, MulticastNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLMulticastCluster = newObjectIDsType(acl, MulticastClusterOwnerType, []ExternalIDKey{ + // cluster-scoped multicast acls + // there are 2 possible TypeKey values for cluster default multicast acl: DefaultDeny and AllowInterNode + TypeKey, + // egress or ingress + PolicyDirectionKey, +}) + +var ACLNetpolNode = newObjectIDsType(acl, NetpolNodeOwnerType, []ExternalIDKey{ + // node name + ObjectNameKey, + // exact ip for management port, every node may have more than 1 management ip + IpKey, +}) + +// ACLNetworkPolicyPortIndex define a unique index for every network policy ACL. +// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy. +// ACLs are created for every gp.portPolicies: +// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch) +// OR +// - all selector-based peers ACL +// - for every IPBlock +1 ACL +// Therefore unique id for a given gressPolicy is portPolicy idx + IPBlock idx +// (empty policy and all selector-based peers ACLs will have idx=-1) +// Note: keep for backward compatibility only +// Deprecated, should only be used for sync +var ACLNetworkPolicyPortIndex = newObjectIDsType(acl, NetworkPolicyPortIndexOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + PortPolicyIndexKey, + IpBlockIndexKey, +}) + +// ACLNetworkPolicy define a unique index for every network policy ACL. +// ingress/egress + NetworkPolicy[In/E]gressRule idx - defines given gressPolicy. +// ACLs are created for gp.portPolicies which are grouped by protocol: +// - for empty policy (no selectors and no ip blocks) - empty ACL (see allIPsMatch) +// OR +// - all selector-based peers ACL +// - for every IPBlock +1 ACL +// Therefore unique id for a given gressPolicy is protocol name + IPBlock idx +// (protocol will be "None" if no port policy is defined, and empty policy and all +// selector-based peers ACLs will have idx=-1) +var ACLNetworkPolicy = newObjectIDsType(acl, NetworkPolicyOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, + // gress rule index + GressIdxKey, + PortPolicyProtocolKey, + IpBlockIndexKey, +}) + +var ACLNetpolNamespace = newObjectIDsType(acl, NetpolNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // in the same namespace there can be 2 default deny port groups, egress and ingress + PolicyDirectionKey, + // every port group has default deny and arp allow acl. + TypeKey, +}) + +var ACLEgressFirewall = newObjectIDsType(acl, EgressFirewallOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // there can only be 1 egress firewall object in every namespace, named "default" + // The only additional id we need is the index of the EgressFirewall.Spec.Egress rule. + RuleIndex, +}) + +var ACLUDN = newObjectIDsType(acl, UDNIsolationOwnerType, []ExternalIDKey{ + // name of a UDN-related ACL + ObjectNameKey, + // egress or ingress + PolicyDirectionKey, +}) + +var VirtualMachineDHCPOptions = newObjectIDsType(dhcpOptions, VirtualMachineOwnerType, []ExternalIDKey{ + // We can have multiple VMs with same CIDR they may have different + // hostname. + // vm "namespace/name" + ObjectNameKey, + // CIDR field from DHCPOptions with ":" replaced by "." + CIDRKey, +}) + +var PortGroupNamespace = newObjectIDsType(portGroup, NamespaceOwnerType, []ExternalIDKey{ + // namespace name + ObjectNameKey, +}) + +// every namespace that has at least 1 network policy, has resources that are shared by all network policies +// in that namespace. +var PortGroupNetpolNamespace = newObjectIDsType(portGroup, NetpolNamespaceOwnerType, []ExternalIDKey{ + // namespace + ObjectNameKey, + // in the same namespace there can be 2 default deny port groups, egress and ingress + PolicyDirectionKey, +}) + +var PortGroupNetworkPolicy = newObjectIDsType(portGroup, NetworkPolicyOwnerType, []ExternalIDKey{ + // policy namespace+name + ObjectNameKey, +}) + +var PortGroupAdminNetworkPolicy = newObjectIDsType(portGroup, AdminNetworkPolicyOwnerType, []ExternalIDKey{ + // ANP name + ObjectNameKey, +}) + +var PortGroupBaselineAdminNetworkPolicy = newObjectIDsType(portGroup, BaselineAdminNetworkPolicyOwnerType, []ExternalIDKey{ + // BANP name + ObjectNameKey, +}) + +var PortGroupCluster = newObjectIDsType(portGroup, ClusterOwnerType, []ExternalIDKey{ + // name of a global port group + // currently ClusterPortGroup and ClusterRtrPortGroup are present + ObjectNameKey, +}) + +var PortGroupUDN = newObjectIDsType(portGroup, UDNIsolationOwnerType, []ExternalIDKey{ + // name of a UDN port group + // currently uses: + // secondaryPods - on default network switch to distinguish non-primary pods + ObjectNameKey, +}) + +var LogicalRouterPolicyEgressIP = newObjectIDsType(logicalRouterPolicy, EgressIPOwnerType, []ExternalIDKey{ + // the priority of the LRP + PriorityKey, + // for the reroute policies it should be the "EIPName_Namespace/podName" + // for the no-reroute global policies it should be the unique global name + ObjectNameKey, + // the IP Family for this policy, ip4 or ip6 or ip(dualstack) + IPFamilyKey, +}) + +var QoSEgressQoS = newObjectIDsType(qos, EgressQoSOwnerType, []ExternalIDKey{ + // the priority of the QoSRule (OVN priority is the same as the rule index priority for this feature) + // this value will be unique in a given namespace + PriorityKey, + // namespace + ObjectNameKey, +}) + +var QoSRuleEgressIP = newObjectIDsType(qos, EgressIPOwnerType, []ExternalIDKey{ + // the priority of the QoSRule + PriorityKey, + // should be the unique global name + ObjectNameKey, + // the IP Family for this policy, ip4 or ip6 or ip(dualstack) + IPFamilyKey, +}) diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go new file mode 100644 index 000000000..8eb17c6df --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/dhcp.go @@ -0,0 +1,84 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type DHCPOptionsPredicate func(*nbdb.DHCPOptions) bool + +// CreateOrUpdateDhcpOptionsOps will configure logical switch port DHCPv4Options and DHCPv6Options fields with +// options at dhcpv4Options and dhcpv6Options arguments and create/update DHCPOptions objects that matches the +// pv4 and pv6 predicates. The missing DHCP options will default to nil in the LSP attributes. +func CreateOrUpdateDhcpOptionsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) ([]libovsdb.Operation, error) { + opModels := []operationModel{} + if dhcpIPv4Options != nil { + opModel := operationModel{ + Model: dhcpIPv4Options, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lsp.Dhcpv4Options = &dhcpIPv4Options.UUID }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + if dhcpIPv6Options != nil { + opModel := operationModel{ + Model: dhcpIPv6Options, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lsp.Dhcpv6Options = &dhcpIPv6Options.UUID }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModels = append(opModels, operationModel{ + Model: lsp, + OnModelUpdates: []interface{}{ + &lsp.Dhcpv4Options, + &lsp.Dhcpv6Options, + }, + ErrNotFound: true, + BulkOp: false, + }) + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +func CreateOrUpdateDhcpOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort, dhcpIPv4Options, dhcpIPv6Options *nbdb.DHCPOptions) error { + ops, err := CreateOrUpdateDhcpOptionsOps(nbClient, nil, lsp, dhcpIPv4Options, dhcpIPv6Options) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func DeleteDHCPOptions(nbClient libovsdbclient.Client, dhcpOptions *nbdb.DHCPOptions) error { + opModels := []operationModel{} + opModel := operationModel{ + Model: dhcpOptions, + ErrNotFound: false, + BulkOp: true, + } + opModels = append(opModels, opModel) + m := newModelClient(nbClient) + return m.Delete(opModels...) + +} + +func DeleteDHCPOptionsWithPredicate(nbClient libovsdbclient.Client, p DHCPOptionsPredicate) error { + opModels := []operationModel{} + opModel := operationModel{ + Model: &nbdb.DHCPOptions{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + opModels = append(opModels, opModel) + m := newModelClient(nbClient) + return m.Delete(opModels...) + +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go new file mode 100644 index 000000000..854c8f2b2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/lbgroup.go @@ -0,0 +1,88 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// CreateOrUpdateLoadBalancerGroupOps returns the ops to create or update the +// provided load balancer group +func CreateOrUpdateLoadBalancerGroupOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, group *nbdb.LoadBalancerGroup) ([]ovsdb.Operation, error) { + // lb group has no fields other than name, safe to update just with non-default values + opModel := operationModel{ + Model: group, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + if err != nil { + return nil, err + } + return ops, nil +} + +// AddLoadBalancersToGroupOps adds the provided load balancers to the provided +// group and returns the corresponding ops +func AddLoadBalancersToGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := group.LoadBalancer + group.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + group.LoadBalancer = append(group.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: group, + ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name }, + OnModelMutations: []interface{}{&group.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModel) + group.LoadBalancer = originalLBs + return ops, err +} + +// RemoveLoadBalancersFromGroupOps removes the provided load balancers from the +// provided group and returns the corresponding ops +func RemoveLoadBalancersFromGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, group *nbdb.LoadBalancerGroup, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := group.LoadBalancer + group.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + group.LoadBalancer = append(group.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: group, + ModelPredicate: func(item *nbdb.LoadBalancerGroup) bool { return item.Name == group.Name }, + OnModelMutations: []interface{}{&group.LoadBalancer}, + // if we want to delete loadbalancer from the port group that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModel) + group.LoadBalancer = originalLBs + return ops, err +} + +type loadBalancerGroupPredicate func(*nbdb.LoadBalancerGroup) bool + +// FindLoadBalancerGroupsWithPredicate looks up load balancer groups from the +// cache based on a given predicate +func FindLoadBalancerGroupsWithPredicate(nbClient libovsdbclient.Client, p loadBalancerGroupPredicate) ([]*nbdb.LoadBalancerGroup, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + groups := []*nbdb.LoadBalancerGroup{} + err := nbClient.WhereCache(p).List(ctx, &groups) + return groups, err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go new file mode 100644 index 000000000..097d37b4c --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/loadbalancer.go @@ -0,0 +1,151 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// getNonZeroLoadBalancerMutableFields builds a list of load balancer +// mutable fields with non zero values to be used as the list of fields to +// Update. +// The purpose is to prevent libovsdb interpreting non-nil empty maps/slices +// as default and thus being filtered out of the update. The intention is to +// use non-nil empty maps/slices to clear them out in the update. +// See: https://github.com/ovn-org/libovsdb/issues/226 +func getNonZeroLoadBalancerMutableFields(lb *nbdb.LoadBalancer) []interface{} { + fields := []interface{}{} + if lb.Name != "" { + fields = append(fields, &lb.Name) + } + if lb.ExternalIDs != nil { + fields = append(fields, &lb.ExternalIDs) + } + if lb.HealthCheck != nil { + fields = append(fields, &lb.HealthCheck) + } + if lb.IPPortMappings != nil { + fields = append(fields, &lb.IPPortMappings) + } + if lb.Options != nil { + fields = append(fields, &lb.Options) + } + if lb.Protocol != nil { + fields = append(fields, &lb.Protocol) + } + if lb.SelectionFields != nil { + fields = append(fields, &lb.SelectionFields) + } + if lb.Vips != nil { + fields = append(fields, &lb.Vips) + } + return fields +} + +// BuildLoadBalancer builds a load balancer +func BuildLoadBalancer(name string, protocol nbdb.LoadBalancerProtocol, selectionFields []nbdb.LoadBalancerSelectionFields, vips, options, externalIds map[string]string) *nbdb.LoadBalancer { + return &nbdb.LoadBalancer{ + Name: name, + Protocol: &protocol, + Vips: vips, + SelectionFields: selectionFields, + Options: options, + ExternalIDs: externalIds, + } +} + +// CreateOrUpdateLoadBalancersOps creates or updates the provided load balancers +// returning the corresponding ops +func CreateOrUpdateLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lbs)) + for i := range lbs { + // can't use i in the predicate, for loop replaces it in-memory + lb := lbs[i] + opModel := operationModel{ + Model: lb, + OnModelUpdates: getNonZeroLoadBalancerMutableFields(lb), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// RemoveLoadBalancerVipsOps removes the provided VIPs from the provided load +// balancer set and returns the corresponding ops +func RemoveLoadBalancerVipsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lb *nbdb.LoadBalancer, vips ...string) ([]libovsdb.Operation, error) { + originalVips := lb.Vips + lb.Vips = make(map[string]string, len(vips)) + for _, vip := range vips { + lb.Vips[vip] = "" + } + opModel := operationModel{ + Model: lb, + OnModelMutations: []interface{}{&lb.Vips}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.DeleteOps(ops, opModel) + lb.Vips = originalVips + return ops, err +} + +// DeleteLoadBalancersOps deletes the provided load balancers and returns the +// corresponding ops +func DeleteLoadBalancersOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lbs)) + for i := range lbs { + // can't use i in the predicate, for loop replaces it in-memory + lb := lbs[i] + opModel := operationModel{ + Model: lb, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} + +// DeleteLoadBalancers deletes the provided load balancers +func DeleteLoadBalancers(nbClient libovsdbclient.Client, lbs []*nbdb.LoadBalancer) error { + ops, err := DeleteLoadBalancersOps(nbClient, nil, lbs...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// ListLoadBalancers looks up all load balancers from the cache +func ListLoadBalancers(nbClient libovsdbclient.Client) ([]*nbdb.LoadBalancer, error) { + lbs := []*nbdb.LoadBalancer{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.List(ctx, &lbs) + return lbs, err +} + +type loadBalancerPredicate func(*nbdb.LoadBalancer) bool + +// FindLoadBalancersWithPredicate looks up loadbalancers from the cache +// based on a given predicate +func FindLoadBalancersWithPredicate(nbClient libovsdbclient.Client, p loadBalancerPredicate) ([]*nbdb.LoadBalancer, error) { + found := []*nbdb.LoadBalancer{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go new file mode 100644 index 000000000..1f7a76ba8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/mac_binding.go @@ -0,0 +1,61 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// CreateOrUpdateStaticMacBinding creates or updates the provided static mac binding +func CreateOrUpdateStaticMacBinding(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error { + opModels := make([]operationModel, len(smbs)) + for i := range smbs { + opModel := operationModel{ + Model: smbs[i], + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels[i] = opModel + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + return err +} + +// DeleteStaticMacBindings deletes the provided static mac bindings +func DeleteStaticMacBindings(nbClient libovsdbclient.Client, smbs ...*nbdb.StaticMACBinding) error { + opModels := make([]operationModel, len(smbs)) + for i := range smbs { + opModel := operationModel{ + Model: smbs[i], + ErrNotFound: false, + BulkOp: false, + } + opModels[i] = opModel + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +type staticMACBindingPredicate func(*nbdb.StaticMACBinding) bool + +// DeleteStaticMACBindingWithPredicate deletes a Static MAC entry for a logical port from the cache +func DeleteStaticMACBindingWithPredicate(nbClient libovsdbclient.Client, p staticMACBindingPredicate) error { + found := []*nbdb.StaticMACBinding{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &found, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Delete(opModel) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go new file mode 100644 index 000000000..8f1814968 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/meter.go @@ -0,0 +1,66 @@ +package ops + +import ( + "reflect" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +func equalsMeterBand(a, b *nbdb.MeterBand) bool { + return a.Action == b.Action && + a.BurstSize == b.BurstSize && + a.Rate == b.Rate && + reflect.DeepEqual(a.ExternalIDs, b.ExternalIDs) +} + +// CreateMeterBandOps creates the provided meter band if it does not exist +func CreateMeterBandOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meterBand *nbdb.MeterBand) ([]ovsdb.Operation, error) { + bands := []*nbdb.MeterBand{} + opModel := operationModel{ + Model: meterBand, + ModelPredicate: func(item *nbdb.MeterBand) bool { return equalsMeterBand(item, meterBand) }, + OnModelUpdates: onModelUpdatesNone(), + ExistingResult: &bands, + DoAfter: func() { + // in case we have multiple equal bands, pick the first one for + // convergence, OVSDB will remove unreferenced ones + if len(bands) > 0 { + uuids := sets.NewString() + for _, band := range bands { + uuids.Insert(band.UUID) + } + meterBand.UUID = uuids.List()[0] + } + }, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// CreateOrUpdateMeterOps creates or updates the provided meter associated to +// the provided meter bands and returns the corresponding ops +func CreateOrUpdateMeterOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, meter *nbdb.Meter, meterBands []*nbdb.MeterBand, fields ...interface{}) ([]ovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + meter.Bands = make([]string, 0, len(meterBands)) + for _, band := range meterBands { + meter.Bands = append(meter.Bands, band.UUID) + } + opModel := operationModel{ + Model: meter, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go new file mode 100644 index 000000000..b40dd4104 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model.go @@ -0,0 +1,516 @@ +package ops + +import ( + "fmt" + "reflect" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +func getUUID(model model.Model) string { + switch t := model.(type) { + case *nbdb.ACL: + return t.UUID + case *nbdb.AddressSet: + return t.UUID + case *nbdb.BFD: + return t.UUID + case *nbdb.Copp: + return t.UUID + case *nbdb.GatewayChassis: + return t.UUID + case *nbdb.LoadBalancer: + return t.UUID + case *nbdb.LoadBalancerGroup: + return t.UUID + case *nbdb.LogicalRouter: + return t.UUID + case *nbdb.LogicalRouterPolicy: + return t.UUID + case *nbdb.LogicalRouterPort: + return t.UUID + case *nbdb.LogicalRouterStaticRoute: + return t.UUID + case *nbdb.LogicalSwitch: + return t.UUID + case *nbdb.LogicalSwitchPort: + return t.UUID + case *nbdb.NAT: + return t.UUID + case *nbdb.PortGroup: + return t.UUID + case *nbdb.NBGlobal: + return t.UUID + case *nbdb.MeterBand: + return t.UUID + case *nbdb.Meter: + return t.UUID + case *nbdb.Sample: + return t.UUID + case *nbdb.SampleCollector: + return t.UUID + case *nbdb.SamplingApp: + return t.UUID + case *nbdb.StaticMACBinding: + return t.UUID + case *sbdb.Chassis: + return t.UUID + case *sbdb.ChassisPrivate: + return t.UUID + case *sbdb.IGMPGroup: + return t.UUID + case *sbdb.Encap: + return t.UUID + case *sbdb.PortBinding: + return t.UUID + case *sbdb.SBGlobal: + return t.UUID + case *nbdb.QoS: + return t.UUID + case *nbdb.ChassisTemplateVar: + return t.UUID + case *nbdb.DHCPOptions: + return t.UUID + default: + panic(fmt.Sprintf("getUUID: unknown model %T", t)) + } +} + +func setUUID(model model.Model, uuid string) { + switch t := model.(type) { + case *nbdb.ACL: + t.UUID = uuid + case *nbdb.AddressSet: + t.UUID = uuid + case *nbdb.BFD: + t.UUID = uuid + case *nbdb.Copp: + t.UUID = uuid + case *nbdb.GatewayChassis: + t.UUID = uuid + case *nbdb.LoadBalancer: + t.UUID = uuid + case *nbdb.LoadBalancerGroup: + t.UUID = uuid + case *nbdb.LogicalRouter: + t.UUID = uuid + case *nbdb.LogicalRouterPolicy: + t.UUID = uuid + case *nbdb.LogicalRouterPort: + t.UUID = uuid + case *nbdb.LogicalRouterStaticRoute: + t.UUID = uuid + case *nbdb.LogicalSwitch: + t.UUID = uuid + case *nbdb.LogicalSwitchPort: + t.UUID = uuid + case *nbdb.NAT: + t.UUID = uuid + case *nbdb.PortGroup: + t.UUID = uuid + case *nbdb.NBGlobal: + t.UUID = uuid + case *nbdb.MeterBand: + t.UUID = uuid + case *nbdb.Meter: + t.UUID = uuid + case *nbdb.Sample: + t.UUID = uuid + case *nbdb.SampleCollector: + t.UUID = uuid + case *nbdb.SamplingApp: + t.UUID = uuid + case *nbdb.StaticMACBinding: + t.UUID = uuid + case *sbdb.Chassis: + t.UUID = uuid + case *sbdb.ChassisPrivate: + t.UUID = uuid + case *sbdb.IGMPGroup: + t.UUID = uuid + case *sbdb.Encap: + t.UUID = uuid + case *sbdb.PortBinding: + t.UUID = uuid + case *sbdb.SBGlobal: + t.UUID = uuid + case *nbdb.QoS: + t.UUID = uuid + case *nbdb.ChassisTemplateVar: + t.UUID = uuid + case *nbdb.DHCPOptions: + t.UUID = uuid + default: + panic(fmt.Sprintf("setUUID: unknown model %T", t)) + } +} + +func copyIndexes(model model.Model) model.Model { + switch t := model.(type) { + case *nbdb.ACL: + return &nbdb.ACL{ + UUID: t.UUID, + ExternalIDs: map[string]string{ + types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey], + }, + } + case *nbdb.AddressSet: + return &nbdb.AddressSet{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.BFD: + return &nbdb.BFD{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + DstIP: t.DstIP, + } + case *nbdb.Copp: + return &nbdb.Copp{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.GatewayChassis: + return &nbdb.GatewayChassis{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LoadBalancer: + return &nbdb.LoadBalancer{ + UUID: t.UUID, + // client index + Name: t.Name, + } + case *nbdb.LoadBalancerGroup: + return &nbdb.LoadBalancerGroup{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouter: + return &nbdb.LogicalRouter{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouterPolicy: + return &nbdb.LogicalRouterPolicy{ + UUID: t.UUID, + } + case *nbdb.LogicalRouterPort: + return &nbdb.LogicalRouterPort{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalRouterStaticRoute: + return &nbdb.LogicalRouterStaticRoute{ + UUID: t.UUID, + } + case *nbdb.LogicalSwitch: + return &nbdb.LogicalSwitch{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.LogicalSwitchPort: + return &nbdb.LogicalSwitchPort{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.NAT: + return &nbdb.NAT{ + UUID: t.UUID, + } + case *nbdb.PortGroup: + return &nbdb.PortGroup{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.NBGlobal: + return &nbdb.NBGlobal{ + UUID: t.UUID, + } + case *nbdb.MeterBand: + return &nbdb.MeterBand{ + UUID: t.UUID, + } + case *nbdb.Meter: + return &nbdb.Meter{ + UUID: t.UUID, + Name: t.Name, + } + case *nbdb.Sample: + return &nbdb.Sample{ + UUID: t.UUID, + Metadata: t.Metadata, + } + case *nbdb.SampleCollector: + return &nbdb.SampleCollector{ + UUID: t.UUID, + ID: t.ID, + } + case *nbdb.SamplingApp: + return &nbdb.SamplingApp{ + UUID: t.UUID, + Type: t.Type, + } + case *nbdb.StaticMACBinding: + return &nbdb.StaticMACBinding{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + IP: t.IP, + } + case *sbdb.Chassis: + return &sbdb.Chassis{ + UUID: t.UUID, + Name: t.Name, + } + case *sbdb.ChassisPrivate: + return &sbdb.ChassisPrivate{ + UUID: t.UUID, + Name: t.Name, + } + case *sbdb.IGMPGroup: + return &sbdb.IGMPGroup{ + UUID: t.UUID, + } + case *sbdb.Encap: + return &sbdb.Encap{ + UUID: t.UUID, + Type: t.Type, + IP: t.IP, + ChassisName: t.ChassisName, + } + case *sbdb.PortBinding: + return &sbdb.PortBinding{ + UUID: t.UUID, + LogicalPort: t.LogicalPort, + Datapath: t.Datapath, + TunnelKey: t.TunnelKey, + } + case *sbdb.SBGlobal: + return &sbdb.SBGlobal{ + UUID: t.UUID, + } + case *nbdb.QoS: + return &nbdb.QoS{ + UUID: t.UUID, + ExternalIDs: map[string]string{ + types.PrimaryIDKey: t.ExternalIDs[types.PrimaryIDKey], + }, + } + case *nbdb.ChassisTemplateVar: + return &nbdb.ChassisTemplateVar{ + UUID: t.UUID, + Chassis: t.Chassis, + } + case *nbdb.DHCPOptions: + return &nbdb.DHCPOptions{ + UUID: t.UUID, + ExternalIDs: copyExternalIDs(t.ExternalIDs, types.PrimaryIDKey), + } + default: + panic(fmt.Sprintf("copyIndexes: unknown model %T", t)) + } +} + +func getListFromModel(model model.Model) interface{} { + switch t := model.(type) { + case *nbdb.ACL: + return &[]*nbdb.ACL{} + case *nbdb.AddressSet: + return &[]*nbdb.AddressSet{} + case *nbdb.BFD: + return &[]*nbdb.BFD{} + case *nbdb.Copp: + return &[]*nbdb.Copp{} + case *nbdb.GatewayChassis: + return &[]*nbdb.GatewayChassis{} + case *nbdb.LoadBalancer: + return &[]*nbdb.LoadBalancer{} + case *nbdb.LoadBalancerGroup: + return &[]*nbdb.LoadBalancerGroup{} + case *nbdb.LogicalRouter: + return &[]*nbdb.LogicalRouter{} + case *nbdb.LogicalRouterPolicy: + return &[]*nbdb.LogicalRouterPolicy{} + case *nbdb.LogicalRouterPort: + return &[]*nbdb.LogicalRouterPort{} + case *nbdb.LogicalRouterStaticRoute: + return &[]*nbdb.LogicalRouterStaticRoute{} + case *nbdb.LogicalSwitch: + return &[]*nbdb.LogicalSwitch{} + case *nbdb.LogicalSwitchPort: + return &[]*nbdb.LogicalSwitchPort{} + case *nbdb.NAT: + return &[]*nbdb.NAT{} + case *nbdb.PortGroup: + return &[]*nbdb.PortGroup{} + case *nbdb.NBGlobal: + return &[]*nbdb.NBGlobal{} + case *nbdb.MeterBand: + return &[]*nbdb.MeterBand{} + case *nbdb.Meter: + return &[]*nbdb.Meter{} + case *nbdb.Sample: + return &[]*nbdb.Sample{} + case *nbdb.SampleCollector: + return &[]*nbdb.SampleCollector{} + case *nbdb.SamplingApp: + return &[]*nbdb.SamplingApp{} + case *nbdb.StaticMACBinding: + return &[]*nbdb.StaticMACBinding{} + case *sbdb.Chassis: + return &[]*sbdb.Chassis{} + case *sbdb.ChassisPrivate: + return &[]*sbdb.ChassisPrivate{} + case *sbdb.IGMPGroup: + return &[]*sbdb.IGMPGroup{} + case *sbdb.Encap: + return &[]*sbdb.Encap{} + case *sbdb.PortBinding: + return &[]*sbdb.PortBinding{} + case *nbdb.QoS: + return &[]nbdb.QoS{} + case *nbdb.ChassisTemplateVar: + return &[]*nbdb.ChassisTemplateVar{} + case *nbdb.DHCPOptions: + return &[]nbdb.DHCPOptions{} + default: + panic(fmt.Sprintf("getModelList: unknown model %T", t)) + } +} + +// onModels applies the provided function to a collection of +// models presented in different ways: +// - a single model (pointer to a struct) +// - a slice of models or pointer to slice of models +// - a slice of structs or pointer to a slice of structs +// If the provided function returns an error, iteration stops and +// that error is returned. +func onModels(models interface{}, do func(interface{}) error) error { + v := reflect.ValueOf(models) + if !v.IsValid() { + return nil + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return nil + } + v = v.Elem() + } + switch v.Kind() { + case reflect.Slice: + switch v.Type().Elem().Kind() { + case reflect.Struct: + for i := 0; i < v.Len(); i++ { + model := v.Index(i).Addr().Interface() + err := do(model) + if err != nil { + return err + } + } + case reflect.Interface: + fallthrough + case reflect.Ptr: + for i := 0; i < v.Len(); i++ { + model := v.Index(i).Interface() + err := do(model) + if err != nil { + return err + } + } + default: + panic(fmt.Sprintf("Expected slice of pointers or structs but got %s", v.Type().Elem().Kind())) + } + case reflect.Struct: + err := do(models) + if err != nil { + return err + } + default: + panic(fmt.Sprintf("Expected slice or struct but got %s", v.Kind())) + } + return nil +} + +// buildFailOnDuplicateOps builds a wait operation on a condition that will fail +// if a duplicate to the provided model is considered to be found. We use this +// to avoid duplicates on certain unknown scenarios that are still to be tracked +// down. See: https://bugzilla.redhat.com/show_bug.cgi?id=2042001. +// When no specific operation is required for the provided model, returns an empty +// array for convenience. +func buildFailOnDuplicateOps(c client.Client, m model.Model) ([]ovsdb.Operation, error) { + // Right now we mostly consider models with a "Name" field that is not an + // index for which we don't expect duplicate names. + // A duplicate Name field that is an index will fail without the + // need of this wait operation. + // Some models that require a complex condition to detect duplicates are not + // considered for the time being due to the performance hit (e.g ACLs). + timeout := types.OVSDBWaitTimeout + var field interface{} + var value string + switch t := m.(type) { + case *nbdb.LogicalRouter: + field = &t.Name + value = t.Name + case *nbdb.LogicalSwitch: + field = &t.Name + value = t.Name + case *nbdb.LogicalRouterPolicy: + condPriority := model.Condition{ + Field: &t.Priority, + Function: ovsdb.ConditionEqual, + Value: t.Priority, + } + condMatch := model.Condition{ + Field: &t.Match, + Function: ovsdb.ConditionEqual, + Value: t.Match, + } + return c.WhereAll(t, condPriority, condMatch).Wait( + ovsdb.WaitConditionNotEqual, + &timeout, + t, + &t.Priority, + &t.Match, + ) + default: + return []ovsdb.Operation{}, nil + } + + cond := model.Condition{ + Field: field, + Function: ovsdb.ConditionEqual, + Value: value, + } + return c.WhereAny(m, cond).Wait(ovsdb.WaitConditionNotEqual, &timeout, m, field) +} + +// getAllUpdatableFields returns a list of all of the columns/fields that can be updated for a model +func getAllUpdatableFields(model model.Model) []interface{} { + switch t := model.(type) { + case *nbdb.LogicalSwitchPort: + return []interface{}{&t.Addresses, &t.Type, &t.TagRequest, &t.Options, &t.PortSecurity} + case *nbdb.PortGroup: + return []interface{}{&t.ACLs, &t.Ports, &t.ExternalIDs} + default: + panic(fmt.Sprintf("getAllUpdatableFields: unknown model %T", t)) + } +} + +func copyExternalIDs(externalIDs map[string]string, keys ...string) map[string]string { + var externalIDsCopy map[string]string + for _, key := range keys { + value, ok := externalIDs[key] + if ok { + if externalIDsCopy == nil { + externalIDsCopy = map[string]string{} + } + externalIDsCopy[key] = value + } + } + return externalIDsCopy +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go new file mode 100644 index 000000000..04f54f0e8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/model_client.go @@ -0,0 +1,511 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/klog/v2" +) + +var errMultipleResults = errors.New("unexpectedly found multiple results for provided predicate") +var errNoIndexes = errors.New("no indexes found for given model") + +type modelClient struct { + client client.Client +} + +func newModelClient(client client.Client) modelClient { + return modelClient{ + client: client, + } +} + +/* +extractUUIDsFromModels is a helper function which constructs a mutation +for the specified field and mutator extracting the UUIDs of the provided +models as the value for the mutation. +*/ +func extractUUIDsFromModels(models interface{}) []string { + ids := []string{} + _ = onModels(models, func(model interface{}) error { + uuid := getUUID(model) + if uuid != "" { + ids = append(ids, uuid) + } + return nil + }) + if len(ids) == 0 { + return nil + } + return ids +} + +// buildMutationsFromFields builds mutations that use the fields as values. +func buildMutationsFromFields(fields []interface{}, mutator ovsdb.Mutator) ([]model.Mutation, error) { + mutations := []model.Mutation{} + for _, field := range fields { + switch v := field.(type) { + case *map[string]string: + if v == nil || len(*v) == 0 { + continue + } + if mutator == ovsdb.MutateOperationDelete { + // turn empty map values into a mutation to remove the key for + // delete mutations + removeKeys := make([]string, 0, len(*v)) + updateKeys := make(map[string]string, len(*v)) + for key, value := range *v { + if value == "" { + removeKeys = append(removeKeys, key) + } else { + updateKeys[key] = value + } + } + if len(removeKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: removeKeys, + } + mutations = append(mutations, mutation) + } + if len(updateKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: updateKeys, + } + mutations = append(mutations, mutation) + } + continue + } + // RFC 7047, section 5.1: a MutateOperationDelete is generated + // automatically for every updated key. + removeKeys := make([]string, 0, len(*v)) + for key := range *v { + removeKeys = append(removeKeys, key) + } + if len(removeKeys) > 0 { + mutation := model.Mutation{ + Field: field, + Mutator: ovsdb.MutateOperationDelete, + Value: removeKeys, + } + mutations = append(mutations, mutation) + } + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: *v, + } + mutations = append(mutations, mutation) + case *[]string: + if v == nil || len(*v) == 0 { + continue + } + if mutator == ovsdb.MutateOperationInsert { + // Most of string sets are UUIDs. The real server does not allow + // this to be empty but the test server does for now. On other + // types of sets most probably there is no need to have empty + // items. So catch this early. + for _, value := range *v { + if value == "" { + return nil, fmt.Errorf("unsupported mutation of set with empty values: %v", *v) + } + } + } + mutation := model.Mutation{ + Field: field, + Mutator: mutator, + Value: *v, + } + mutations = append(mutations, mutation) + default: + return nil, fmt.Errorf("mutation for type %T not implemented", v) + } + } + + return mutations, nil +} + +/* +operationModel is a struct which uses reflection to determine and perform +idempotent operations against OVS DB (NB DB by default). +*/ +type operationModel struct { + // Model specifies the model to be created, or to look up in the cache. + // The values in the fields of the Model are used for mutations and updates + // as well. If this Model is looked up or created, it will have its UUID set + // after the operation. + Model interface{} + // ModelPredicate specifies a predicate to look up models in the cache. + // If Model is provided with non-zero index column values, ModelPredicate + // will be ignored. + ModelPredicate interface{} + // ExistingResult is where the results of the look up are added to. + // Required when Model is not specified. + ExistingResult interface{} + // OnModelMutations specifies the fields from Model that will be used as + // the mutation value. + OnModelMutations []interface{} + // OnModelUpdates specifies the fields from Model that will be used as + // the update value. + // Note: while it is okay to have update and mutate operations on the same row, it + // is an undefined behavior if the same column is used in both update and mutate. + OnModelUpdates []interface{} + // ErrNotFound flags this operation to fail with ErrNotFound if a model is + // not found. + ErrNotFound bool + // BulkOp flags this operation as a bulk operation capable of updating or + // mutating more than 1 model. + BulkOp bool + // DoAfter is invoked at the end of the operation and allows to setup a + // subsequent operation with values obtained from this one. + // If model lookup was successful, or a new db entry was created, + // Model will have UUID set, and it can be used in DoAfter. This only works + // if BulkOp is false and Model != nil. + DoAfter func() +} + +func onModelUpdatesNone() []interface{} { + return nil +} + +func onModelUpdatesAllNonDefault() []interface{} { + return []interface{}{} +} + +/* +CreateOrUpdate performs idempotent operations against libovsdb according to the +following logic: + +a) performs a lookup of the models in the cache by ModelPredicate if provided, +or by Model otherwise. If the models do not exist and ErrNotFound is set, +it returns ErrNotFound + +b) if OnModelUpdates is specified; it performs a direct update of the model if +it exists. + +c) if OnModelMutations is specified; it performs a direct mutation (insert) of +the Model if it exists. + +d) if b) and c) are not true, but Model is provided, it creates the Model +if it does not exist. + +e) if none of the above are true, ErrNotFound is returned. + +If BulkOp is set, update or mutate can happen accross multiple models found. +*/ +func (m *modelClient) CreateOrUpdate(opModels ...operationModel) ([]ovsdb.OperationResult, error) { + created, ops, err := m.createOrUpdateOps(nil, opModels...) + if err != nil { + return nil, err + } + return TransactAndCheckAndSetUUIDs(m.client, created, ops) +} + +func (m *modelClient) CreateOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) { + _, ops, err := m.createOrUpdateOps(ops, opModels...) + return ops, err +} + +func (m *modelClient) createOrUpdateOps(ops []ovsdb.Operation, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) { + hasGuardOp := len(ops) > 0 && isGuardOp(&ops[0]) + guardOp := []ovsdb.Operation{} + doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + // nil represents onModelUpdatesNone + if opModel.OnModelUpdates != nil { + o, err = m.update(model, opModel) + } + // Note: while it is okay to have update and mutate operations on the same row, it is + // an undefined behavior if the same exact column is used in both update and mutate. + if err == nil && opModel.OnModelMutations != nil { + var o2 []ovsdb.Operation + o2, err = m.mutate(model, opModel, ovsdb.MutateOperationInsert) + o = append(o, o2...) + } + return + } + doWhenNotFound := func(model interface{}, opModel *operationModel) ([]ovsdb.Operation, error) { + if !hasGuardOp { + // for the first insert of certain models, build a wait operation + // that checks for duplicates as a guard op to prevent against + // duplicate transactions + var err error + guardOp, err = buildFailOnDuplicateOps(m.client, opModel.Model) + if err != nil { + return nil, err + } + hasGuardOp = len(guardOp) > 0 + } + return m.create(opModel) + } + created, ops, err := m.buildOps(ops, doWhenFound, doWhenNotFound, opModels...) + if len(guardOp) > 0 { + // set the guard op as the first of the list + ops = append(guardOp, ops...) + } + return created, ops, err +} + +/* +Delete performs idempotent delete operations against libovsdb according to the +following logic: + +a) performs a lookup of the models in the cache by ModelPredicate if provided, +or by Model otherwise. If the models do not exist and ErrNotFound is set +it returns ErrNotFound. + +b) if OnModelMutations is specified; it performs a direct mutation (delete) of the +Model if it exists. + +c) if b) is not true; it performs a direct delete of the Model if it exists. + +If BulkOp is set, delete or mutate can happen accross multiple models found. +*/ +func (m *modelClient) Delete(opModels ...operationModel) error { + ops, err := m.DeleteOps(nil, opModels...) + if err != nil { + return err + } + _, err = TransactAndCheck(m.client, ops) + return err +} + +func (m *modelClient) DeleteOps(ops []ovsdb.Operation, opModels ...operationModel) ([]ovsdb.Operation, error) { + doWhenFound := func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + if opModel.OnModelMutations != nil { + return m.mutate(model, opModel, ovsdb.MutateOperationDelete) + } else { + return m.delete(model, opModel) + } + } + _, ops, err := m.buildOps(ops, doWhenFound, nil, opModels...) + return ops, err +} + +type opModelToOpMapper func(model interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) + +func (m *modelClient) buildOps(ops []ovsdb.Operation, doWhenFound opModelToOpMapper, doWhenNotFound opModelToOpMapper, opModels ...operationModel) (interface{}, []ovsdb.Operation, error) { + if ops == nil { + ops = []ovsdb.Operation{} + } + notfound := []interface{}{} + for _, opModel := range opModels { + // do lookup + err := m.lookup(&opModel) + if err != nil && !errors.Is(err, client.ErrNotFound) { + return nil, nil, fmt.Errorf("unable to lookup model %+v: %w", opModel, err) + } + + // do updates + var hadExistingResults bool + err = onModels(opModel.ExistingResult, func(model interface{}) error { + if hadExistingResults && !opModel.BulkOp { + return errMultipleResults + } + hadExistingResults = true + + if doWhenFound != nil { + o, err := doWhenFound(model, &opModel) + if err != nil { + return err + } + ops = append(ops, o...) + } + return nil + }) + if err != nil { + return nil, nil, err + } + + // otherwise act when not found + if !hadExistingResults { + // return ErrNotFound, + // - if caller explicitly requested for it or + // - failed to provide a Model for us to apply the operation on + if opModel.ErrNotFound || (doWhenNotFound != nil && opModel.Model == nil) { + return nil, nil, client.ErrNotFound + } + if doWhenNotFound != nil && opModel.Model != nil { + o, err := doWhenNotFound(nil, &opModel) + if err != nil { + return nil, nil, err + } + ops = append(ops, o...) + notfound = append(notfound, opModel.Model) + } + } + + if opModel.DoAfter != nil { + opModel.DoAfter() + } + } + + return notfound, ops, nil +} + +/* +create does a bit more than just "create". create needs to set the generated +UUID (because if this function is called we know the item does not exists yet) +then create the item. Generates an until clause and uses a wait operation to avoid +https://bugzilla.redhat.com/show_bug.cgi?id=2042001 +*/ +func (m *modelClient) create(opModel *operationModel) ([]ovsdb.Operation, error) { + uuid := getUUID(opModel.Model) + if uuid == "" { + setUUID(opModel.Model, buildNamedUUID()) + } + + ops, err := m.client.Create(opModel.Model) + if err != nil { + return nil, fmt.Errorf("unable to create model, err: %w", err) + } + + klog.V(5).Infof("Create operations generated as: %+v", ops) + return ops, nil +} + +func (m *modelClient) update(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + o, err = m.client.Where(lookUpModel).Update(opModel.Model, opModel.OnModelUpdates...) + if err != nil { + return nil, fmt.Errorf("unable to update model, err: %w", err) + } + klog.V(5).Infof("Update operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) mutate(lookUpModel interface{}, opModel *operationModel, mutator ovsdb.Mutator) (o []ovsdb.Operation, err error) { + if opModel.OnModelMutations == nil { + return nil, nil + } + modelMutations, err := buildMutationsFromFields(opModel.OnModelMutations, mutator) + if len(modelMutations) == 0 || err != nil { + return nil, err + } + o, err = m.client.Where(lookUpModel).Mutate(opModel.Model, modelMutations...) + if err != nil { + return nil, fmt.Errorf("unable to mutate model, err: %w", err) + } + klog.V(5).Infof("Mutate operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) delete(lookUpModel interface{}, opModel *operationModel) (o []ovsdb.Operation, err error) { + o, err = m.client.Where(lookUpModel).Delete() + if err != nil { + return nil, fmt.Errorf("unable to delete model, err: %w", err) + } + klog.V(5).Infof("Delete operations generated as: %+v", o) + return o, nil +} + +func (m *modelClient) Lookup(opModels ...operationModel) error { + _, _, err := m.buildOps(nil, nil, nil, opModels...) + return err +} + +// CreateOrUpdate, Delete and Lookup can be called to +// 1. create or update a single model +// Model should be set, bulkOp = false, errNotfound = false +// 2. update/delete/lookup 0..n models (create can't be done for multiple models at the same time) +// Model index or predicate should be set +// +// The allowed combination of operationModel fields is different for these cases. +// Both Model db index, and ModelPredicate can only be empty for the first case +func lookupRequired(opModel *operationModel) bool { + // we know create is not supposed to be performed, if these fields are set + if opModel.BulkOp || opModel.ErrNotFound { + return true + } + return false +} + +// lookup the model in the cache prioritizing provided indexes over a +// predicate +// If lookup was successful, opModel.Model will have UUID set, +// so that further user operations with the same model are indexed by UUID +func (m *modelClient) lookup(opModel *operationModel) error { + if opModel.ExistingResult == nil && opModel.Model != nil { + opModel.ExistingResult = getListFromModel(opModel.Model) + } + + var err error + if opModel.Model != nil { + err = m.where(opModel) + if err != errNoIndexes { + // if index wasn't provided by the Model, try predicate search + // otherwise return where result + return err + } + } + // if index wasn't provided by the Model (errNoIndexes) or Model == nil, try predicate search + if opModel.ModelPredicate != nil { + return m.whereCache(opModel) + } + // the only operation that can be performed without a lookup (it can have no db indexes and no ModelPredicate set) + // is Create. + if lookupRequired(opModel) { + return fmt.Errorf("missing model indexes or predicate when a lookup was required") + } + return nil +} + +func (m *modelClient) where(opModel *operationModel) error { + copyModel := copyIndexes(opModel.Model) + if reflect.ValueOf(copyModel).Elem().IsZero() { + // no indexes available + return errNoIndexes + } + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + var err error + if err = m.client.Where(copyModel).List(ctx, opModel.ExistingResult); err != nil { + return err + } + if opModel.Model == nil || opModel.BulkOp { + return nil + } + // for non-bulk op cases, copy (the one) uuid found to model provided. + // so that further user operations with the same model are indexed by UUID + err = onModels(opModel.ExistingResult, func(model interface{}) error { + uuid := getUUID(model) + setUUID(opModel.Model, uuid) + return nil + }) + return err +} + +func (m *modelClient) whereCache(opModel *operationModel) error { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + var err error + if err = m.client.WhereCache(opModel.ModelPredicate).List(ctx, opModel.ExistingResult); err != nil { + return err + } + + if opModel.Model == nil || opModel.BulkOp { + return nil + } + + // for non-bulk op cases, copy (the one) uuid found to model provided. + // so that further user operations with the same model are indexed by UUID + err = onModels(opModel.ExistingResult, func(model interface{}) error { + uuid := getUUID(model) + setUUID(opModel.Model, uuid) + return nil + }) + return err +} + +func isGuardOp(op *ovsdb.Operation) bool { + return op != nil && op.Op == ovsdb.OperationWait && op.Timeout != nil && *op.Timeout == types.OVSDBWaitTimeout +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go new file mode 100644 index 000000000..1860668a7 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/named_uuid.go @@ -0,0 +1,28 @@ +package ops + +import ( + "fmt" + "sync/atomic" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand" +) + +const ( + namedUUIDPrefix = 'u' +) + +var ( + namedUUIDCounter = cryptorand.Uint32() +) + +// isNamedUUID checks if the passed id is a named-uuid built with +// BuildNamedUUID +func isNamedUUID(id string) bool { + return id != "" && id[0] == namedUUIDPrefix +} + +// buildNamedUUID builds an id that can be used as a named-uuid +// as per OVSDB rfc 7047 section 5.1 +func buildNamedUUID() string { + return fmt.Sprintf("%c%010d", namedUUIDPrefix, atomic.AddUint32(&namedUUIDCounter, 1)) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go new file mode 100644 index 000000000..2bb46d3a8 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/nb_global.go @@ -0,0 +1,64 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// GetNBGlobal looks up the NB Global entry from the cache +func GetNBGlobal(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) (*nbdb.NBGlobal, error) { + found := []*nbdb.NBGlobal{} + opModel := operationModel{ + Model: nbGlobal, + ModelPredicate: func(item *nbdb.NBGlobal) bool { return true }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// UpdateNBGlobalSetOptions sets options on the NB Global entry adding any +// missing, removing the ones set to an empty value and updating existing +func UpdateNBGlobalSetOptions(nbClient libovsdbclient.Client, nbGlobal *nbdb.NBGlobal) error { + // find the nbGlobal table's UUID, we don't have any other way to reliably look this table entry since it can + // only be indexed by UUID + updatedNbGlobal, err := GetNBGlobal(nbClient, nbGlobal) + if err != nil { + return err + } + + if updatedNbGlobal.Options == nil { + updatedNbGlobal.Options = map[string]string{} + } + + for k, v := range nbGlobal.Options { + if v == "" { + delete(updatedNbGlobal.Options, k) + } else { + updatedNbGlobal.Options[k] = v + } + } + + // Update the options column in the nbGlobal entry since we already performed a lookup + opModel := operationModel{ + Model: updatedNbGlobal, + OnModelUpdates: []interface{}{ + &updatedNbGlobal.Options, + }, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go new file mode 100644 index 000000000..861a63cb9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portbinding.go @@ -0,0 +1,53 @@ +package ops + +import ( + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that +// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds +// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis +// record for the remote zone nodes. +// TODO (numans) remove this function once OVN supports binding a port binding for a remote +// chassis. +func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error { + ch, err := GetChassis(sbClient, chassis) + if err != nil { + return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err) + } + portBinding.Chassis = &ch.UUID + + opModel := operationModel{ + Model: portBinding, + OnModelUpdates: []interface{}{&portBinding.Chassis}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// GetPortBinding looks up a portBinding in SBDB +func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) { + found := []*sbdb.PortBinding{} + opModel := operationModel{ + Model: portBinding, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go new file mode 100644 index 000000000..c8045c09e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/portgroup.go @@ -0,0 +1,329 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type portGroupPredicate func(group *nbdb.PortGroup) bool + +// FindPortGroupsWithPredicate looks up port groups from the cache based on a +// given predicate +func FindPortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) ([]*nbdb.PortGroup, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.PortGroup{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdatePortGroupsOps creates or updates the provided port groups +// returning the corresponding ops +func CreateOrUpdatePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, pgs ...*nbdb.PortGroup) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(pgs)) + for i := range pgs { + pg := pgs[i] + opModel := operationModel{ + Model: pg, + OnModelUpdates: getAllUpdatableFields(pg), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// CreateOrUpdatePortGroups creates or updates the provided port groups +func CreateOrUpdatePortGroups(nbClient libovsdbclient.Client, pgs ...*nbdb.PortGroup) error { + ops, err := CreateOrUpdatePortGroupsOps(nbClient, nil, pgs...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreatePortGroup creates the provided port group if it doesn't exist +func CreatePortGroup(nbClient libovsdbclient.Client, portGroup *nbdb.PortGroup) error { + opModel := operationModel{ + Model: portGroup, + OnModelUpdates: onModelUpdatesNone(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// GetPortGroup looks up a port group from the cache +func GetPortGroup(nbClient libovsdbclient.Client, pg *nbdb.PortGroup) (*nbdb.PortGroup, error) { + found := []*nbdb.PortGroup{} + opModel := operationModel{ + Model: pg, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +func AddPortsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) { + if len(ports) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + Ports: ports, + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.Ports}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// AddPortsToPortGroup adds the provided ports to the provided port group +func AddPortsToPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error { + ops, err := AddPortsToPortGroupOps(nbClient, nil, name, ports...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortsFromPortGroupOps removes the provided ports from the provided port +// group and returns the corresponding ops +func DeletePortsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, ports ...string) ([]libovsdb.Operation, error) { + if len(ports) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + Ports: ports, + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.Ports}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeletePortsFromPortGroup removes the provided ports from the provided port +// group +func DeletePortsFromPortGroup(nbClient libovsdbclient.Client, name string, ports ...string) error { + ops, err := DeletePortsFromPortGroupOps(nbClient, nil, name, ports...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// AddACLsToPortGroupOps adds the provided ACLs to the provided port group and +// returns the corresponding ops +func AddACLsToPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + if len(acls) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// UpdatePortGroupSetACLsOps updates the provided ACLs on the provided port group and +// returns the corresponding ops. It entirely replaces the existing ACLs on the PG with +// the newly provided list +func UpdatePortGroupSetACLsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls []*nbdb.ACL) ([]libovsdb.Operation, error) { + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + opModel := operationModel{ + Model: &pg, + OnModelUpdates: []interface{}{&pg.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModel) +} + +// DeleteACLsFromPortGroupOps removes the provided ACLs from the provided port +// group and returns the corresponding ops +func DeleteACLsFromPortGroupOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + if len(acls) == 0 { + return ops, nil + } + + pg := nbdb.PortGroup{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +func DeleteACLsFromPortGroups(nbClient libovsdbclient.Client, names []string, acls ...*nbdb.ACL) error { + var err error + var ops []libovsdb.Operation + for _, pgName := range names { + ops, err = DeleteACLsFromPortGroupOps(nbClient, ops, pgName, acls...) + if err != nil { + return err + } + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +func DeleteACLsFromAllPortGroups(nbClient libovsdbclient.Client, acls ...*nbdb.ACL) error { + if len(acls) == 0 { + return nil + } + + pg := nbdb.PortGroup{ + ACLs: make([]string, 0, len(acls)), + } + + for _, acl := range acls { + pg.ACLs = append(pg.ACLs, acl.UUID) + } + + opModel := operationModel{ + Model: &pg, + ModelPredicate: func(item *nbdb.PortGroup) bool { return true }, + OnModelMutations: []interface{}{&pg.ACLs}, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(nil, opModel) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortGroupsOps deletes the provided port groups and returns the +// corresponding ops +func DeletePortGroupsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, names ...string) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(names)) + for _, name := range names { + pg := nbdb.PortGroup{ + Name: name, + } + opModel := operationModel{ + Model: &pg, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeletePortGroups deletes the provided port groups and returns the +// corresponding ops +func DeletePortGroups(nbClient libovsdbclient.Client, names ...string) error { + ops, err := DeletePortGroupsOps(nbClient, nil, names...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeletePortGroupsWithPredicateOps returns the corresponding ops to delete port groups based on +// a given predicate +func DeletePortGroupsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p portGroupPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.PortGroup{} + opModel := operationModel{ + ModelPredicate: p, + ExistingResult: &deleted, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeletePortGroupsWithPredicate deletes the port groups based on the provided predicate +func DeletePortGroupsWithPredicate(nbClient libovsdbclient.Client, p portGroupPredicate) error { + ops, err := DeletePortGroupsWithPredicateOps(nbClient, nil, p) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go new file mode 100644 index 000000000..a83a176df --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/qos.go @@ -0,0 +1,119 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type QoSPredicate func(*nbdb.QoS) bool + +// FindQoSesWithPredicate looks up QoSes from the cache based on a +// given predicate +func FindQoSesWithPredicate(nbClient libovsdbclient.Client, p QoSPredicate) ([]*nbdb.QoS, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.QoS{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateQoSesOps returns the ops to create or update the provided QoSes. +func CreateOrUpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + OnModelUpdates: []interface{}{}, // update all fields + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + OnModelUpdates: []interface{}{}, // update all fields + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +// AddQoSesToLogicalSwitchOps returns the ops to add the provided QoSes to the switch +func AddQoSesToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels) +} + +// DeleteQoSesOps returns the ops to delete the provided QoSes. +func DeleteQoSesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(qoses)) + for i := range qoses { + // can't use i in the predicate, for loop replaces it in-memory + qos := qoses[i] + opModel := operationModel{ + Model: qos, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels...) +} + +// RemoveQoSesFromLogicalSwitchOps returns the ops to remove the provided QoSes from the provided switch. +func RemoveQoSesFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, qoses ...*nbdb.QoS) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + QOSRules: make([]string, 0, len(qoses)), + } + for _, qos := range qoses { + sw.QOSRules = append(sw.QOSRules, qos.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.QOSRules}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModels) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go new file mode 100644 index 000000000..9aeb42123 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/router.go @@ -0,0 +1,1222 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "net" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// ROUTER OPs + +type logicalRouterPredicate func(*nbdb.LogicalRouter) bool + +// GetLogicalRouter looks up a logical router from the cache +func GetLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) (*nbdb.LogicalRouter, error) { + found := []*nbdb.LogicalRouter{} + opModel := operationModel{ + Model: router, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// FindLogicalRoutersWithPredicate looks up logical routers from the cache based on a +// given predicate +func FindLogicalRoutersWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPredicate) ([]*nbdb.LogicalRouter, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouter{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateLogicalRouter creates or updates the provided logical router +func CreateOrUpdateLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, fields ...interface{}) error { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + opModel := operationModel{ + Model: router, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// UpdateLogicalRouterSetExternalIDs sets external IDs on the provided logical +// router adding any missing, removing the ones set to an empty value and +// updating existing +func UpdateLogicalRouterSetExternalIDs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error { + externalIds := router.ExternalIDs + router, err := GetLogicalRouter(nbClient, router) + if err != nil { + return err + } + + if router.ExternalIDs == nil { + router.ExternalIDs = map[string]string{} + } + + for k, v := range externalIds { + if v == "" { + delete(router.ExternalIDs, k) + } else { + router.ExternalIDs[k] = v + } + } + + opModel := operationModel{ + Model: router, + OnModelUpdates: []interface{}{&router.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// DeleteLogicalRoutersWithPredicateOps returns the operations to delete the logical routers matching the provided predicate +func DeleteLogicalRoutersWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p logicalRouterPredicate) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalRouter{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalRouterOps returns the operations to delete the provided logical router +func DeleteLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + router *nbdb.LogicalRouter) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: router, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalRouter deletes the provided logical router +func DeleteLogicalRouter(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) error { + ops, err := DeleteLogicalRouterOps(nbClient, nil, router) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// LOGICAL ROUTER PORT OPs + +type logicalRouterPortPredicate func(*nbdb.LogicalRouterPort) bool + +// FindLogicalRouterPortWithPredicate looks up logical router port from +// the cache based on a given predicate +func FindLogicalRouterPortWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPortPredicate) ([]*nbdb.LogicalRouterPort, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterPort{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalRouterPort looks up a logical router port from the cache +func GetLogicalRouterPort(nbClient libovsdbclient.Client, lrp *nbdb.LogicalRouterPort) (*nbdb.LogicalRouterPort, error) { + found := []*nbdb.LogicalRouterPort{} + opModel := operationModel{ + Model: lrp, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalRouterPort creates or updates the provided logical +// router port together with the gateway chassis (if not nil), and adds it to the provided logical router +func CreateOrUpdateLogicalRouterPort(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, + lrp *nbdb.LogicalRouterPort, chassis *nbdb.GatewayChassis, fields ...interface{}) error { + opModels := []operationModel{} + if chassis != nil { + opModels = append(opModels, operationModel{ + Model: chassis, + OnModelUpdates: onModelUpdatesAllNonDefault(), + DoAfter: func() { lrp.GatewayChassis = []string{chassis.UUID} }, + ErrNotFound: false, + BulkOp: false, + }) + } + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } else if chassis != nil { + fields = append(fields, &lrp.GatewayChassis) + } + originalPorts := router.Ports + router.Ports = []string{} + opModels = append(opModels, operationModel{ + Model: lrp, + OnModelUpdates: fields, + DoAfter: func() { router.Ports = append(router.Ports, lrp.UUID) }, + ErrNotFound: false, + BulkOp: false, + }) + opModels = append(opModels, operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Ports}, + ErrNotFound: true, + BulkOp: false, + }) + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModels...) + router.Ports = originalPorts + return err +} + +// DeleteLogicalRouterPorts deletes the provided logical router ports and +// removes them from the provided logical router +func DeleteLogicalRouterPorts(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, lrps ...*nbdb.LogicalRouterPort) error { + originalPorts := router.Ports + router.Ports = make([]string, 0, len(lrps)) + opModels := make([]operationModel, 0, len(lrps)+1) + for i := range lrps { + lrp := lrps[i] + opModel := operationModel{ + Model: lrp, + DoAfter: func() { + if lrp.UUID != "" { + router.Ports = append(router.Ports, lrp.UUID) + } + }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + err := m.Delete(opModels...) + router.Ports = originalPorts + return err +} + +// LOGICAL ROUTER POLICY OPs + +type logicalRouterPolicyPredicate func(*nbdb.LogicalRouterPolicy) bool + +// FindLogicalRouterPoliciesWithPredicate looks up logical router policies from +// the cache based on a given predicate +func FindLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterPolicyPredicate) ([]*nbdb.LogicalRouterPolicy, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterPolicy{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalRouterPolicy looks up a logical router policy from the cache +func GetLogicalRouterPolicy(nbClient libovsdbclient.Client, policy *nbdb.LogicalRouterPolicy) (*nbdb.LogicalRouterPolicy, error) { + found := []*nbdb.LogicalRouterPolicy{} + opModel := operationModel{ + Model: policy, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalRouterPolicyWithPredicate looks up a logical router +// policy from the cache based on a given predicate. If it does not exist, it +// creates the provided logical router policy. If it does, it updates it. The +// logical router policy is added to the provided logical router. +// fields determines which columns to updated. Passing no fields is assumes +// all fields need to be updated. Passing a single nil field indicates no fields should be updated. +// Otherwise a caller may pass as many individual fields as desired to specify which columsn need updating. +func CreateOrUpdateLogicalRouterPolicyWithPredicate(nbClient libovsdbclient.Client, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) error { + ops, err := CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, lrp, p, fields...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreateOrUpdateLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router policy. If it does, it +// updates it. The logical router policy is added to the provided logical +// router. Returns the corresponding ops +func CreateOrUpdateLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate, fields ...interface{}) ([]libovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrp, + ModelPredicate: p, + OnModelUpdates: fields, + DoAfter: func() { router.Policies = []string{lrp.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +func UpdateLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(lrps)) + for i := range lrps { + lrp := lrps[i] + opModel := []operationModel{ + { + Model: lrp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: true, + BulkOp: false, + }, + } + opModels = append(opModels, opModel...) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate and returns the +// corresponding ops to delete it and remove it from the provided router. +func DeleteLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + deleted := []*nbdb.LogicalRouterPolicy{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { router.Policies = extractUUIDsFromModels(&deleted) }, + ErrNotFound: false, + BulkOp: true, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: false, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteLogicalRouterPoliciesWithPredicate looks up logical router policies +// from the cache based on a given predicate, deletes them and removes them from +// the provided logical router +func DeleteLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate) error { + ops, err := DeleteLogicalRouterPolicyWithPredicateOps(nbClient, nil, routerName, p) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate. If it doesn't find +// any, it creates the provided logical router policy. If it does, adds any +// missing Nexthops to the existing logical router policy. The logical router +// policy is added to the provided logical router. Returns the corresponding ops +func CreateOrAddNextHopsToLogicalRouterPolicyWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrp *nbdb.LogicalRouterPolicy, p logicalRouterPolicyPredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrp, + ModelPredicate: p, + OnModelMutations: []interface{}{&lrp.Nexthops}, + DoAfter: func() { router.Policies = []string{lrp.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteNextHopsFromLogicalRouterPolicyOps removes the Nexthops from the +// provided logical router policies. +func DeleteNextHopsFromLogicalRouterPolicyOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps []*nbdb.LogicalRouterPolicy, nextHops ...string) ([]libovsdb.Operation, error) { + nextHopSet := sets.NewString(nextHops...) + opModels := []operationModel{} + router := &nbdb.LogicalRouter{ + Name: routerName, + Policies: []string{}, + } + + for i := range lrps { + lrp := lrps[i] + if nextHopSet.HasAll(lrp.Nexthops...) { + // if no next-hops remain in the policy, remove it alltogether + router.Policies = append(router.Policies, lrp.UUID) + opModel := operationModel{ + Model: lrp, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } else { + // otherwise just remove the next-hops + lrp.Nexthops = nextHops + opModel := operationModel{ + Model: lrp, + OnModelMutations: []interface{}{&lrp.Nexthops}, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } + } + + if len(router.Policies) > 0 { + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + BulkOp: false, + ErrNotFound: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteNextHopsFromLogicalRouterPolicies removes the Nexthops from the +// provided logical router policies. If a logical router policy ends up with no +// Nexthops, it is deleted and removed from the provided logical router. +func DeleteNextHopsFromLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error { + ops := []libovsdb.Operation{} + for _, lrp := range lrps { + nextHops := lrp.Nexthops + lrp, err := GetLogicalRouterPolicy(nbClient, lrp) + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + if err != nil { + return err + } + + ops, err = DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, []*nbdb.LogicalRouterPolicy{lrp}, nextHops...) + if err != nil { + return err + } + } + + _, err := TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps looks up a logical +// router policy from the cache based on a given predicate and removes the +// provided Nexthop from it. If the logical router policy ends up with no +// Nexthops, it is deleted and removed from the provided logical router. Returns +// the corresponding ops +func DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterPolicyPredicate, nextHop string) ([]libovsdb.Operation, error) { + lrps, err := FindLogicalRouterPoliciesWithPredicate(nbClient, p) + if err != nil { + return nil, err + } + + return DeleteNextHopsFromLogicalRouterPolicyOps(nbClient, ops, routerName, lrps, nextHop) +} + +// DeleteNextHopFromLogicalRouterPoliciesWithPredicate looks up a logical router +// policy from the cache based on a given predicate and removes the provided +// Nexthop from it. If the logical router policy ends up with no Nexthops, it is +// deleted and removed from the provided logical router. +func DeleteNextHopFromLogicalRouterPoliciesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterPolicyPredicate, nextHop string) error { + ops, err := DeleteNextHopFromLogicalRouterPoliciesWithPredicateOps(nbClient, nil, routerName, p, nextHop) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterPolicies deletes the logical router policies and removes +// them from the provided logical router +func DeleteLogicalRouterPolicies(nbClient libovsdbclient.Client, routerName string, lrps ...*nbdb.LogicalRouterPolicy) error { + opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...) + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// DeleteLogicalRouterPoliciesOps builds and returns corresponding delete operations for Logical Router +// Policies from the provided logical router. +func DeleteLogicalRouterPoliciesOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, lrps ...*nbdb.LogicalRouterPolicy) ([]libovsdb.Operation, error) { + opModels := getDeleteOpModelsForLogicalRouterPolicies(routerName, lrps...) + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +func getDeleteOpModelsForLogicalRouterPolicies(routerName string, lrps ...*nbdb.LogicalRouterPolicy) []operationModel { + router := &nbdb.LogicalRouter{ + Name: routerName, + Policies: make([]string, 0, len(lrps)), + } + + opModels := make([]operationModel, 0, len(lrps)+1) + for _, lrp := range lrps { + router.Policies = append(router.Policies, lrp.UUID) + opModel := operationModel{ + Model: lrp, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Policies}, + ErrNotFound: true, + BulkOp: false, + } + + return append(opModels, opModel) +} + +// LOGICAL ROUTER STATIC ROUTES + +type logicalRouterStaticRoutePredicate func(*nbdb.LogicalRouterStaticRoute) bool + +// FindLogicalRouterStaticRoutesWithPredicate looks up logical router static +// routes from the cache based on a given predicate +func FindLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, p logicalRouterStaticRoutePredicate) ([]*nbdb.LogicalRouterStaticRoute, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + found := []*nbdb.LogicalRouterStaticRoute{} + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps looks up a logical +// router static route from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router static route. If it does, it +// updates it. The logical router static route is added to the provided logical +// router. Returns the corresponding ops +func CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + routerName string, lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) ([]libovsdb.Operation, error) { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + opModels := []operationModel{ + { + Model: lrsr, + OnModelUpdates: fields, + DoAfter: func() { router.StaticRoutes = []string{lrsr.UUID} }, + ErrNotFound: false, + BulkOp: false, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + }, + } + + if p != nil { + opModels[0].ModelPredicate = p + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// PolicyEqualPredicate determines if two static routes have the same routing policy (dst-ip or src-ip) +// If policy is nil, OVN considers that as dst-ip +func PolicyEqualPredicate(p1, p2 *nbdb.LogicalRouterStaticRoutePolicy) bool { + if p1 == nil { + return p2 == nil || (p2 != nil && *p2 == nbdb.LogicalRouterStaticRoutePolicyDstIP) + } + + if p2 == nil { + return *p1 == nbdb.LogicalRouterStaticRoutePolicyDstIP + } + + return *p1 == *p2 +} + +// CreateOrReplaceLogicalRouterStaticRouteWithPredicate looks up a logical +// router static route from the cache based on a given predicate. If it does not +// exist, it creates the provided logical router static route. If it does, it +// updates it. The logical router static route is added to the provided logical +// router. +// If more than one route matches the predicate on the router, the additional routes are removed. +func CreateOrReplaceLogicalRouterStaticRouteWithPredicate(nbClient libovsdbclient.Client, routerName string, + lrsr *nbdb.LogicalRouterStaticRoute, p logicalRouterStaticRoutePredicate, fields ...interface{}) error { + + lr := &nbdb.LogicalRouter{Name: routerName} + router, err := GetLogicalRouter(nbClient, lr) + if err != nil { + return fmt.Errorf("unable to get logical router %s: %w", routerName, err) + } + newPredicate := func(item *nbdb.LogicalRouterStaticRoute) bool { + for _, routeUUID := range router.StaticRoutes { + if routeUUID == item.UUID && p(item) { + return true + } + } + return false + } + routes, err := FindLogicalRouterStaticRoutesWithPredicate(nbClient, newPredicate) + if err != nil { + return fmt.Errorf("unable to get logical router static routes with predicate on router %s: %w", routerName, err) + } + + var ops []libovsdb.Operation + m := newModelClient(nbClient) + + if len(routes) > 0 { + lrsr.UUID = routes[0].UUID + } + + if len(routes) > 1 { + // should only be a single route remove all except the first + routes = routes[1:] + opModels := make([]operationModel, 0, len(routes)+1) + router.StaticRoutes = []string{} + for _, route := range routes { + route := route + router.StaticRoutes = append(router.StaticRoutes, route.UUID) + opModel := operationModel{ + Model: route, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + ops, err = m.DeleteOps(nil, opModels...) + if err != nil { + return err + } + } + + ops, err = CreateOrUpdateLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, lrsr, nil, fields...) + if err != nil { + return fmt.Errorf("unable to get create or update logical router static routes on router %s: %w", routerName, err) + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterStaticRoutesWithPredicate looks up logical router static +// routes from the cache based on a given predicate, deletes them and removes +// them from the provided logical router +func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterStaticRoutePredicate) error { + var ops []libovsdb.Operation + var err error + ops, err = DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient, ops, routerName, p) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteLogicalRouterStaticRoutesWithPredicateOps looks up logical router static +// routes from the cache based on a given predicate, and returns the ops to delete +// them and remove them from the provided logical router +func DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, routerName string, p logicalRouterStaticRoutePredicate) ([]libovsdb.Operation, error) { + router := &nbdb.LogicalRouter{ + Name: routerName, + } + + deleted := []*nbdb.LogicalRouterStaticRoute{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { router.StaticRoutes = extractUUIDsFromModels(deleted) }, + ErrNotFound: false, + BulkOp: true, + }, + { + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: false, + BulkOp: false, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// DeleteLogicalRouterStaticRoutes deletes the logical router static routes and +// removes them from the provided logical router +func DeleteLogicalRouterStaticRoutes(nbClient libovsdbclient.Client, routerName string, lrsrs ...*nbdb.LogicalRouterStaticRoute) error { + router := &nbdb.LogicalRouter{ + Name: routerName, + StaticRoutes: make([]string, 0, len(lrsrs)), + } + + opModels := make([]operationModel, 0, len(lrsrs)+1) + for _, lrsr := range lrsrs { + lrsr := lrsr + router.StaticRoutes = append(router.StaticRoutes, lrsr.UUID) + opModel := operationModel{ + Model: lrsr, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.StaticRoutes}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +// BFD ops + +// CreateOrUpdateBFDOps creates or updates the provided BFDs and returns +// the corresponding ops +func CreateOrUpdateBFDOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, bfds ...*nbdb.BFD) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(bfds)) + for i := range bfds { + bfd := bfds[i] + opModel := operationModel{ + Model: bfd, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels...) +} + +// DeleteBFDs deletes the provided BFDs +func DeleteBFDs(nbClient libovsdbclient.Client, bfds ...*nbdb.BFD) error { + opModels := make([]operationModel, 0, len(bfds)) + for i := range bfds { + bfd := bfds[i] + opModel := operationModel{ + Model: bfd, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + m := newModelClient(nbClient) + return m.Delete(opModels...) +} + +func LookupBFD(nbClient libovsdbclient.Client, bfd *nbdb.BFD) (*nbdb.BFD, error) { + found := []*nbdb.BFD{} + opModel := operationModel{ + Model: bfd, + ModelPredicate: func(item *nbdb.BFD) bool { return item.DstIP == bfd.DstIP && item.LogicalPort == bfd.LogicalPort }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + return found[0], nil +} + +// LB OPs + +// AddLoadBalancersToLogicalRouterOps adds the provided load balancers to the +// provided logical router and returns the corresponding ops +func AddLoadBalancersToLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := router.LoadBalancer + router.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + router.LoadBalancer = append(router.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.CreateOrUpdateOps(ops, opModel) + router.LoadBalancer = originalLBs + return ops, err +} + +// RemoveLoadBalancersFromLogicalRouterOps removes the provided load balancers from the +// provided logical router and returns the corresponding ops +func RemoveLoadBalancersFromLogicalRouterOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + originalLBs := router.LoadBalancer + router.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + router.LoadBalancer = append(router.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.LoadBalancer}, + // if we want to delete loadbalancer from the router that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + ops, err := modelClient.DeleteOps(ops, opModel) + router.LoadBalancer = originalLBs + return ops, err +} + +func buildNAT( + natType nbdb.NATType, + externalIP string, + logicalIP string, + logicalPort string, + externalMac string, + externalIDs map[string]string, + match string, +) *nbdb.NAT { + nat := &nbdb.NAT{ + Type: natType, + ExternalIP: externalIP, + LogicalIP: logicalIP, + Options: map[string]string{"stateless": "false"}, + ExternalIDs: externalIDs, + Match: match, + } + + if logicalPort != "" { + nat.LogicalPort = &logicalPort + } + + if externalMac != "" { + nat.ExternalMAC = &externalMac + } + + return nat +} + +// BuildSNAT builds a logical router SNAT +func BuildSNAT( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalIDs map[string]string, +) *nbdb.NAT { + return BuildSNATWithMatch(externalIP, logicalIP, logicalPort, externalIDs, "") +} + +func BuildSNATWithMatch( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalIDs map[string]string, + match string, +) *nbdb.NAT { + externalIPStr := "" + if externalIP != nil { + externalIPStr = externalIP.String() + } + // Strip out mask of logicalIP only if it is a host mask + logicalIPMask, _ := logicalIP.Mask.Size() + logicalIPStr := logicalIP.IP.String() + if logicalIPMask != 32 && logicalIPMask != 128 { + logicalIPStr = logicalIP.String() + } + return buildNAT(nbdb.NATTypeSNAT, externalIPStr, logicalIPStr, logicalPort, "", externalIDs, match) +} + +// BuildDNATAndSNAT builds a logical router DNAT/SNAT +func BuildDNATAndSNAT( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalMac string, + externalIDs map[string]string, +) *nbdb.NAT { + return BuildDNATAndSNATWithMatch(externalIP, logicalIP, logicalPort, externalMac, externalIDs, "") +} + +func BuildDNATAndSNATWithMatch( + externalIP *net.IP, + logicalIP *net.IPNet, + logicalPort string, + externalMac string, + externalIDs map[string]string, + match string, +) *nbdb.NAT { + externalIPStr := "" + if externalIP != nil { + externalIPStr = externalIP.String() + } + logicalIPStr := "" + if logicalIP != nil { + logicalIPStr = logicalIP.IP.String() + } + return buildNAT( + nbdb.NATTypeDNATAndSNAT, + externalIPStr, + logicalIPStr, + logicalPort, + externalMac, + externalIDs, + match) +} + +// isEquivalentNAT checks if the `searched` NAT is equivalent to `existing`. +// Returns true if the UUID is set in `searched` and matches the UUID of `existing`. +// Otherwise, perform the following checks: +// - Compare the Type and Match fields. +// - Compare ExternalIP if it is set in `searched`. +// - Compare LogicalIP if the Type in `searched` is SNAT. +// - Compare LogicalPort if it is set in `searched`. +// - Ensure that all ExternalIDs of `searched` exist and have the same value in `existing`. +func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { + // Simple case: uuid was provided. + if searched.UUID != "" && existing.UUID == searched.UUID { + return true + } + + if searched.Type != existing.Type { + return false + } + + if searched.Match != existing.Match { + return false + } + + // Compre externalIP if its not empty. + if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { + return false + } + + // Compare logicalIP only for SNAT, since DNAT types must have unique ExternalIP. + if searched.Type == nbdb.NATTypeSNAT && searched.LogicalIP != existing.LogicalIP { + return false + } + + // When searching based on logicalPort, no need to go any further. + if searched.LogicalPort != nil && + (existing.LogicalPort == nil || *searched.LogicalPort != *existing.LogicalPort) { + return false + } + + // When searched external ids is populated, check if provided key,value exist in existing row. + // A usage case is when doing NAT operations where external id "name" is provided. + for externalIdKey, externalIdValue := range searched.ExternalIDs { + if foundValue, found := existing.ExternalIDs[externalIdKey]; !found || foundValue != externalIdValue { + return false + } + } + + return true +} + +type natPredicate func(*nbdb.NAT) bool + +// GetNAT looks up an NAT from the cache +func GetNAT(nbClient libovsdbclient.Client, nat *nbdb.NAT) (*nbdb.NAT, error) { + found := []*nbdb.NAT{} + opModel := operationModel{ + Model: nat, + ModelPredicate: func(item *nbdb.NAT) bool { return isEquivalentNAT(item, nat) }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// FindNATsWithPredicate looks up NATs from the cache based on a given predicate +func FindNATsWithPredicate(nbClient libovsdbclient.Client, predicate natPredicate) ([]*nbdb.NAT, error) { + nats := []*nbdb.NAT{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(predicate).List(ctx, &nats) + return nats, err +} + +// GetRouterNATs looks up NATs associated to the provided logical router from +// the cache +func GetRouterNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter) ([]*nbdb.NAT, error) { + r, err := GetLogicalRouter(nbClient, router) + if err != nil { + return nil, fmt.Errorf("failed to get router: %s, error: %w", router.Name, err) + } + + nats := []*nbdb.NAT{} + for _, uuid := range r.Nat { + nat, err := GetNAT(nbClient, &nbdb.NAT{UUID: uuid}) + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + if err != nil { + return nil, fmt.Errorf("failed to lookup NAT entry with uuid: %s, error: %w", uuid, err) + } + nats = append(nats, nat) + } + + return nats, nil +} + +// CreateOrUpdateNATsOps creates or updates the provided NATs, adds them to +// the provided logical router and returns the corresponding ops +func CreateOrUpdateNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) { + routerNats, err := GetRouterNATs(nbClient, router) + if err != nil { + return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err) + } + + originalNats := router.Nat + router.Nat = make([]string, 0, len(nats)) + opModels := make([]operationModel, 0, len(nats)+1) + for i := range nats { + inputNat := nats[i] + for _, routerNat := range routerNats { + if isEquivalentNAT(routerNat, inputNat) { + inputNat.UUID = routerNat.UUID + break + } + } + opModel := operationModel{ + Model: inputNat, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + DoAfter: func() { router.Nat = append(router.Nat, inputNat.UUID) }, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: true, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err = m.CreateOrUpdateOps(ops, opModels...) + router.Nat = originalNats + return ops, err +} + +// CreateOrUpdateNATs creates or updates the provided NATs and adds them to +// the provided logical router +func CreateOrUpdateNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error { + ops, err := CreateOrUpdateNATsOps(nbClient, nil, router, nats...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNATsOps deletes the provided NATs, removes them from the provided +// logical router and returns the corresponding ops +func DeleteNATsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) ([]libovsdb.Operation, error) { + routerNats, err := GetRouterNATs(nbClient, router) + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + if err != nil { + return ops, fmt.Errorf("unable to get NAT entries for router %+v: %w", router, err) + } + + originalNats := router.Nat + router.Nat = make([]string, 0, len(nats)) + opModels := make([]operationModel, 0, len(routerNats)+1) + for _, routerNat := range routerNats { + for _, inputNat := range nats { + if isEquivalentNAT(routerNat, inputNat) { + router.Nat = append(router.Nat, routerNat.UUID) + opModel := operationModel{ + Model: routerNat, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + break + } + } + } + if len(router.Nat) == 0 { + return ops, nil + } + opModel := operationModel{ + Model: router, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err = m.DeleteOps(ops, opModels...) + router.Nat = originalNats + return ops, err +} + +// DeleteNATs deletes the provided NATs and removes them from the provided +// logical router +func DeleteNATs(nbClient libovsdbclient.Client, router *nbdb.LogicalRouter, nats ...*nbdb.NAT) error { + ops, err := DeleteNATsOps(nbClient, nil, router, nats...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteNATsWithPredicateOps looks up NATs from the cache based on a given +// predicate, deletes them, removes them from associated logical routers and +// returns the corresponding ops +func DeleteNATsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, p natPredicate) ([]libovsdb.Operation, error) { + deleted := []*nbdb.NAT{} + router := &nbdb.LogicalRouter{} + natUUIDs := sets.Set[string]{} + opModels := []operationModel{ + { + ModelPredicate: p, + ExistingResult: &deleted, + DoAfter: func() { + router.Nat = extractUUIDsFromModels(&deleted) + natUUIDs.Insert(router.Nat...) + }, + BulkOp: true, + ErrNotFound: false, + }, + { + Model: router, + ModelPredicate: func(lr *nbdb.LogicalRouter) bool { return natUUIDs.HasAny(lr.Nat...) }, + OnModelMutations: []interface{}{&router.Nat}, + ErrNotFound: false, + BulkOp: true, + }, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go new file mode 100644 index 000000000..7f4f527d1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sample.go @@ -0,0 +1,221 @@ +package ops + +import ( + "golang.org/x/net/context" + "hash/fnv" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +func CreateOrUpdateSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func UpdateSampleCollectorExternalIDs(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + OnModelUpdates: []interface{}{&collector.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +func DeleteSampleCollector(nbClient libovsdbclient.Client, collector *nbdb.SampleCollector) error { + opModel := operationModel{ + Model: collector, + ErrNotFound: false, + BulkOp: false, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func DeleteSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SampleCollector) bool) error { + opModel := operationModel{ + Model: &nbdb.SampleCollector{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSampleCollectorWithPredicate(nbClient libovsdbclient.Client, p func(*nbdb.SampleCollector) bool) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.WhereCache(p).List(ctx, &collectors) + return collectors, err +} + +func ListSampleCollectors(nbClient libovsdbclient.Client) ([]*nbdb.SampleCollector, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + collectors := []*nbdb.SampleCollector{} + err := nbClient.List(ctx, &collectors) + return collectors, err +} + +func CreateOrUpdateSamplingAppsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, samplingApps ...*nbdb.SamplingApp) ([]libovsdb.Operation, error) { + opModels := make([]operationModel, 0, len(samplingApps)) + for i := range samplingApps { + // can't use i in the predicate, for loop replaces it in-memory + samplingApp := samplingApps[i] + opModel := operationModel{ + Model: samplingApp, + OnModelUpdates: onModelUpdatesAllNonDefault(), + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModels...) +} + +func DeleteSamplingAppsWithPredicate(nbClient libovsdbclient.Client, p func(collector *nbdb.SamplingApp) bool) error { + opModel := operationModel{ + Model: &nbdb.SamplingApp{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + m := newModelClient(nbClient) + return m.Delete(opModel) +} + +func FindSample(nbClient libovsdbclient.Client, sampleMetadata int) (*nbdb.Sample, error) { + sample := &nbdb.Sample{ + Metadata: sampleMetadata, + } + return GetSample(nbClient, sample) +} + +func GetSample(nbClient libovsdbclient.Client, sample *nbdb.Sample) (*nbdb.Sample, error) { + found := []*nbdb.Sample{} + opModel := operationModel{ + Model: sample, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + modelClient := newModelClient(nbClient) + err := modelClient.Lookup(opModel) + if err != nil { + return nil, err + } + return found[0], err +} + +type SampleFeature = string + +const ( + EgressFirewallSample SampleFeature = "EgressFirewall" + NetworkPolicySample SampleFeature = "NetworkPolicy" + AdminNetworkPolicySample SampleFeature = "AdminNetworkPolicy" + MulticastSample SampleFeature = "Multicast" + UDNIsolationSample SampleFeature = "UDNIsolation" +) + +// SamplingConfig is used to configure sampling for different db objects. +type SamplingConfig struct { + featureCollectors map[SampleFeature][]string +} + +func NewSamplingConfig(featureCollectors map[SampleFeature][]string) *SamplingConfig { + return &SamplingConfig{ + featureCollectors: featureCollectors, + } +} + +func addSample(c *SamplingConfig, opModels []operationModel, model model.Model) []operationModel { + switch t := model.(type) { + case *nbdb.ACL: + return createOrUpdateSampleForACL(opModels, c, t) + } + return opModels +} + +// createOrUpdateSampleForACL should be called before acl operationModel is appended to opModels. +func createOrUpdateSampleForACL(opModels []operationModel, c *SamplingConfig, acl *nbdb.ACL) []operationModel { + if c == nil { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + collectors := c.featureCollectors[getACLSampleFeature(acl)] + if len(collectors) == 0 { + acl.SampleEst = nil + acl.SampleNew = nil + return opModels + } + aclID := GetACLSampleID(acl) + sample := &nbdb.Sample{ + Collectors: collectors, + // 32 bits + Metadata: int(aclID), + } + opModel := operationModel{ + Model: sample, + DoAfter: func() { + acl.SampleEst = &sample.UUID + acl.SampleNew = &sample.UUID + }, + OnModelUpdates: []interface{}{&sample.Collectors}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + return opModels +} + +func GetACLSampleID(acl *nbdb.ACL) uint32 { + // primaryID is unique for each ACL, but established connections will keep sampleID that is set on + // connection creation. Here is the situation we want to avoid: + // 1. ACL1 is created with sampleID=1 (e.g. based on ANP namespace+name+...+rule index with action Allow) + // 2. connection A is established with sampleID=1, sample is decoded to say "Allowed by ANP namespace+name" + // 3. ACL1 is updated with sampleID=1 (e.g. now same rule in ANP says Deny, but PrimaryIDKey is the same) + // 4. connection A still generates samples with sampleID=1, but now it is "Denied by ANP namespace+name" + // In reality, connection A is still allowed, as existing connections are not affected by ANP updates. + // To avoid this, we encode Match and Action to the sampleID, to ensure a new sampleID is assigned on Match or action change. + // In that case stale sampleIDs will just report messages like "sampling for this connection was updated or deleted". + primaryID := acl.ExternalIDs[PrimaryIDKey.String()] + acl.Match + acl.Action + h := fnv.New32a() + h.Write([]byte(primaryID)) + return h.Sum32() +} + +func getACLSampleFeature(acl *nbdb.ACL) SampleFeature { + switch acl.ExternalIDs[OwnerTypeKey.String()] { + case AdminNetworkPolicyOwnerType, BaselineAdminNetworkPolicyOwnerType: + return AdminNetworkPolicySample + case MulticastNamespaceOwnerType, MulticastClusterOwnerType: + return MulticastSample + case NetpolNodeOwnerType, NetworkPolicyOwnerType, NetpolNamespaceOwnerType: + return NetworkPolicySample + case EgressFirewallOwnerType: + return EgressFirewallSample + case UDNIsolationOwnerType: + return UDNIsolationSample + } + return "" +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go new file mode 100644 index 000000000..1c5dde339 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/sb_global.go @@ -0,0 +1,27 @@ +package ops + +import ( + libovsdbclient "github.com/ovn-org/libovsdb/client" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// GetNBGlobal looks up the SB Global entry from the cache +func GetSBGlobal(sbClient libovsdbclient.Client, sbGlobal *sbdb.SBGlobal) (*sbdb.SBGlobal, error) { + found := []*sbdb.SBGlobal{} + opModel := operationModel{ + Model: sbGlobal, + ModelPredicate: func(item *sbdb.SBGlobal) bool { return true }, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(sbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go new file mode 100644 index 000000000..964e711bb --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/switch.go @@ -0,0 +1,484 @@ +package ops + +import ( + "context" + "errors" + "fmt" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// LOGICAL_SWITCH OPs + +type switchPredicate func(*nbdb.LogicalSwitch) bool + +// FindLogicalSwitchesWithPredicate looks up logical switches from the cache +// based on a given predicate +func FindLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate) ([]*nbdb.LogicalSwitch, error) { + found := []*nbdb.LogicalSwitch{} + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + err := nbClient.WhereCache(p).List(ctx, &found) + return found, err +} + +// GetLogicalSwitch looks up a logical switch from the cache +func GetLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) (*nbdb.LogicalSwitch, error) { + found := []*nbdb.LogicalSwitch{} + opModel := operationModel{ + Model: sw, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +// CreateOrUpdateLogicalSwitch creates or updates the provided logical switch +func CreateOrUpdateLogicalSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, fields ...interface{}) error { + if len(fields) == 0 { + fields = onModelUpdatesAllNonDefault() + } + opModel := operationModel{ + Model: sw, + OnModelUpdates: fields, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err := m.CreateOrUpdate(opModel) + return err +} + +// UpdateLogicalSwitchSetExternalIDs updates the external IDs on the provided logical +// switch. Empty values means the corresponding keys are to be deleted. +func UpdateLogicalSwitchSetExternalIDs(nbClient libovsdbclient.Client, logicalSwitch *nbdb.LogicalSwitch) error { + externalIds := logicalSwitch.ExternalIDs + logicalSwitch, err := GetLogicalSwitch(nbClient, logicalSwitch) + if err != nil { + return err + } + + if logicalSwitch.ExternalIDs == nil { + logicalSwitch.ExternalIDs = map[string]string{} + } + + for k, v := range externalIds { + if v == "" { + delete(logicalSwitch.ExternalIDs, k) + } else { + logicalSwitch.ExternalIDs[k] = v + } + } + + opModel := operationModel{ + Model: logicalSwitch, + OnModelUpdates: []interface{}{&logicalSwitch.ExternalIDs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +type logicalSwitchPredicate func(*nbdb.LogicalSwitch) bool + +// DeleteLogicalSwitchesWithPredicateOps returns the operations to delete the logical switches matching the provided predicate +func DeleteLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p logicalSwitchPredicate) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalSwitch{}, + ModelPredicate: p, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalSwitchOps returns the operations to delete the provided logical switch +func DeleteLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + swName string) ([]libovsdb.Operation, error) { + opModel := operationModel{ + Model: &nbdb.LogicalSwitch{Name: swName}, + ErrNotFound: false, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// DeleteLogicalSwitch deletes the provided logical switch +func DeleteLogicalSwitch(nbClient libovsdbclient.Client, swName string) error { + ops, err := DeleteLogicalSwitchOps(nbClient, nil, swName) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// LB ops + +// AddLoadBalancersToLogicalSwitchOps adds the provided load balancers to the +// provided logical switch and returns the corresponding ops +func AddLoadBalancersToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + sw.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.LoadBalancer}, + ErrNotFound: true, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, opModel) +} + +// RemoveLoadBalancersFromLogicalSwitchOps removes the provided load balancers from the +// provided logical switch and returns the corresponding ops +func RemoveLoadBalancersFromLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lbs ...*nbdb.LoadBalancer) ([]libovsdb.Operation, error) { + sw.LoadBalancer = make([]string, 0, len(lbs)) + for _, lb := range lbs { + sw.LoadBalancer = append(sw.LoadBalancer, lb.UUID) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.LoadBalancer}, + // if we want to delete loadbalancer from the switch that doesn't exist, that is noop + ErrNotFound: false, + BulkOp: false, + } + + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, opModel) +} + +// ACL ops + +// AddACLsToLogicalSwitchOps adds the provided ACLs to the provided logical +// switch and returns the corresponding ops +func AddACLsToLogicalSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, name string, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + sw := &nbdb.LogicalSwitch{ + Name: name, + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + sw.ACLs = append(sw.ACLs, acl.UUID) + } + + opModels := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.ACLs}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + return m.CreateOrUpdateOps(ops, opModels) +} + +// RemoveACLsFromLogicalSwitchesWithPredicateOps looks up logical switches from the cache +// based on a given predicate, removes from them the provided ACLs, and returns the +// corresponding ops +func RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, + p switchPredicate, acls ...*nbdb.ACL) ([]libovsdb.Operation, error) { + sw := nbdb.LogicalSwitch{ + ACLs: make([]string, 0, len(acls)), + } + for _, acl := range acls { + sw.ACLs = append(sw.ACLs, acl.UUID) + } + opModel := operationModel{ + Model: &sw, + ModelPredicate: p, + OnModelMutations: []interface{}{&sw.ACLs}, + ErrNotFound: false, + BulkOp: true, + } + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModel) +} + +// RemoveACLsFromLogicalSwitchesWithPredicate looks up logical switches from the cache +// based on a given predicate and removes from them the provided ACLs +func RemoveACLsFromLogicalSwitchesWithPredicate(nbClient libovsdbclient.Client, p switchPredicate, acls ...*nbdb.ACL) error { + ops, err := RemoveACLsFromLogicalSwitchesWithPredicateOps(nbClient, nil, p, acls...) + if err != nil { + return err + } + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// UpdateLogicalSwitchSetOtherConfig sets other config on the provided logical +// switch adding any missing, removing the ones set to an empty value and +// updating existing +func UpdateLogicalSwitchSetOtherConfig(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch) error { + otherConfig := sw.OtherConfig + sw, err := GetLogicalSwitch(nbClient, sw) + if err != nil { + return err + } + + if sw.OtherConfig == nil { + sw.OtherConfig = map[string]string{} + } + + for k, v := range otherConfig { + if v == "" { + delete(sw.OtherConfig, k) + } else { + sw.OtherConfig[k] = v + } + } + + opModel := operationModel{ + Model: sw, + OnModelUpdates: []interface{}{&sw.OtherConfig}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} + +// LOGICAL SWITCH PORT OPs + +// GetLogicalSwitchPort looks up a logical switch port from the cache +func GetLogicalSwitchPort(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) (*nbdb.LogicalSwitchPort, error) { + found := []*nbdb.LogicalSwitchPort{} + opModel := operationModel{ + Model: lsp, + ExistingResult: &found, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + err := m.Lookup(opModel) + if err != nil { + return nil, err + } + + return found[0], nil +} + +func createOrUpdateLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + originalPorts := sw.Ports + sw.Ports = make([]string, 0, len(lsps)) + opModels := make([]operationModel, 0, len(lsps)+1) + for i := range lsps { + lsp := lsps[i] + opModel := operationModel{ + Model: lsp, + OnModelUpdates: getAllUpdatableFields(lsp), + DoAfter: func() { sw.Ports = append(sw.Ports, lsp.UUID) }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: !createSwitch, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err := m.CreateOrUpdateOps(ops, opModels...) + sw.Ports = originalPorts + if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) && !createSwitch { + err = fmt.Errorf("could not find switch: %q, %w", sw.Name, err) + } + return ops, err +} + +func createOrUpdateLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, createSwitch bool, lsps ...*nbdb.LogicalSwitchPort) error { + ops, err := createOrUpdateLogicalSwitchPortsOps(nbClient, nil, sw, createSwitch, lsps...) + if err != nil { + return err + } + + _, err = TransactAndCheckAndSetUUIDs(nbClient, lsps, ops) + return err +} + +// CreateOrUpdateLogicalSwitchPortsOnSwitchOps creates or updates the provided +// logical switch ports, adds them to the provided logical switch and returns +// the corresponding ops +func CreateOrUpdateLogicalSwitchPortsOnSwitchOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + return createOrUpdateLogicalSwitchPortsOps(nbClient, ops, sw, false, lsps...) +} + +// CreateOrUpdateLogicalSwitchPortsOnSwitch creates or updates the provided +// logical switch ports and adds them to the provided logical switch +func CreateOrUpdateLogicalSwitchPortsOnSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + return createOrUpdateLogicalSwitchPorts(nbClient, sw, false, lsps...) +} + +// CreateOrUpdateLogicalSwitchPortsAndSwitch creates or updates the provided +// logical switch ports and adds them to the provided logical switch creating it +// if it does not exist +func CreateOrUpdateLogicalSwitchPortsAndSwitch(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + return createOrUpdateLogicalSwitchPorts(nbClient, sw, true, lsps...) +} + +// DeleteLogicalSwitchPortsOps deletes the provided logical switch ports, removes +// them from the provided logical switch and returns the corresponding ops +func DeleteLogicalSwitchPortsOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) ([]libovsdb.Operation, error) { + originalPorts := sw.Ports + sw.Ports = make([]string, 0, len(lsps)) + opModels := make([]operationModel, 0, len(lsps)+1) + for i := range lsps { + lsp := lsps[i] + opModel := operationModel{ + Model: lsp, + DoAfter: func() { + if lsp.UUID != "" { + sw.Ports = append(sw.Ports, lsp.UUID) + } + }, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + ops, err := m.DeleteOps(ops, opModels...) + sw.Ports = originalPorts + return ops, err +} + +// DeleteLogicalSwitchPorts deletes the provided logical switch ports and +// removes them from the provided logical switch +func DeleteLogicalSwitchPorts(nbClient libovsdbclient.Client, sw *nbdb.LogicalSwitch, lsps ...*nbdb.LogicalSwitchPort) error { + ops, err := DeleteLogicalSwitchPortsOps(nbClient, nil, sw, lsps...) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +type logicalSwitchPortPredicate func(*nbdb.LogicalSwitchPort) bool + +// DeleteLogicalSwitchPortsWithPredicateOps looks up logical switch ports from +// the cache based on a given predicate and removes from them the provided +// logical switch +func DeleteLogicalSwitchPortsWithPredicateOps(nbClient libovsdbclient.Client, ops []libovsdb.Operation, sw *nbdb.LogicalSwitch, p logicalSwitchPortPredicate) ([]libovsdb.Operation, error) { + swName := sw.Name + sw, err := GetLogicalSwitch(nbClient, sw) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + return nil, fmt.Errorf("error retrieving logical switch %s from libovsdb cache: %w", swName, err) + } + + var lsps []*nbdb.LogicalSwitchPort + for _, port := range sw.Ports { + lsp := &nbdb.LogicalSwitchPort{UUID: port} + lsp, err = GetLogicalSwitchPort(nbClient, lsp) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + continue + } + return nil, fmt.Errorf("error retrieving logical switch port with UUID %s associated with logical"+ + " switch %s from libovsdb cache: %w", port, swName, err) + } + if p(lsp) { + lsps = append(lsps, lsp) + } + } + + opModels := make([]operationModel, 0, len(lsps)+1) + sw.Ports = make([]string, 0, len(lsps)) + for _, lsp := range lsps { + sw.Ports = append(sw.Ports, lsp.UUID) + opModel := operationModel{ + Model: lsp, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + } + opModel := operationModel{ + Model: sw, + OnModelMutations: []interface{}{&sw.Ports}, + ErrNotFound: false, + BulkOp: false, + } + opModels = append(opModels, opModel) + + m := newModelClient(nbClient) + return m.DeleteOps(ops, opModels...) +} + +// UpdateLogicalSwitchPortSetOptions sets options on the provided logical switch +// port adding any missing, removing the ones set to an empty value and updating +// existing +func UpdateLogicalSwitchPortSetOptions(nbClient libovsdbclient.Client, lsp *nbdb.LogicalSwitchPort) error { + options := lsp.Options + lsp, err := GetLogicalSwitchPort(nbClient, lsp) + if err != nil { + return err + } + + if lsp.Options == nil { + lsp.Options = map[string]string{} + } + + for k, v := range options { + if v == "" { + delete(lsp.Options, k) + } else { + lsp.Options[k] = v + } + } + + opModel := operationModel{ + // For LSP's Name is a valid index, so no predicate is needed + Model: lsp, + OnModelUpdates: []interface{}{&lsp.Options}, + ErrNotFound: true, + BulkOp: false, + } + + m := newModelClient(nbClient) + _, err = m.CreateOrUpdate(opModel) + return err +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go new file mode 100644 index 000000000..4672a5c0e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/template_var.go @@ -0,0 +1,112 @@ +package ops + +import ( + "context" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +type chassisTemplateVarPredicate func(*nbdb.ChassisTemplateVar) bool + +// ListTemplateVar looks up all chassis template variables. +func ListTemplateVar(nbClient libovsdbclient.Client) ([]*nbdb.ChassisTemplateVar, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.Default.OVSDBTxnTimeout) + defer cancel() + + templatesList := []*nbdb.ChassisTemplateVar{} + err := nbClient.List(ctx, &templatesList) + return templatesList, err +} + +// CreateOrUpdateChassisTemplateVarOps creates or updates the provided +// 'template' variable and returns the corresponding ops. +func CreateOrUpdateChassisTemplateVarOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) { + + modelClient := newModelClient(nbClient) + return modelClient.CreateOrUpdateOps(ops, operationModel{ + Model: template, + OnModelMutations: []interface{}{&template.Variables}, + ErrNotFound: false, + BulkOp: false, + }) +} + +// deleteChassisTemplateVarVariablesOps removes the variables listed as +// keys of 'template.Variables' and returns the corresponding ops. +// It applies the mutation to all records that are selected by 'predicate'. +func deleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar, + predicate chassisTemplateVarPredicate) ([]libovsdb.Operation, error) { + + deleteTemplate := &nbdb.ChassisTemplateVar{ + Chassis: template.Chassis, + Variables: map[string]string{}, + } + for name := range template.Variables { + deleteTemplate.Variables[name] = "" + } + modelClient := newModelClient(nbClient) + return modelClient.DeleteOps(ops, operationModel{ + Model: deleteTemplate, + ModelPredicate: predicate, + OnModelMutations: []interface{}{&deleteTemplate.Variables}, + ErrNotFound: false, + BulkOp: true, + }) +} + +// DeleteChassisTemplateVarVariablesOps removes all variables listed as +// keys of 'template.Variables' from the record matching the same chassis +// as 'template'. It returns the corresponding ops. +func DeleteChassisTemplateVarVariablesOps(nbClient libovsdbclient.Client, + ops []libovsdb.Operation, template *nbdb.ChassisTemplateVar) ([]libovsdb.Operation, error) { + + return deleteChassisTemplateVarVariablesOps(nbClient, ops, template, nil) +} + +// DeleteAllChassisTemplateVarVariables removes the variables listed as +// in 'varNames' and commits the transaction to the database. It applies +// the mutation to all records that contain these variable names. +func DeleteAllChassisTemplateVarVariables(nbClient libovsdbclient.Client, varNames []string) error { + deleteTemplateVar := &nbdb.ChassisTemplateVar{ + Variables: make(map[string]string, len(varNames)), + } + for _, name := range varNames { + deleteTemplateVar.Variables[name] = "" + } + ops, err := deleteChassisTemplateVarVariablesOps(nbClient, nil, deleteTemplateVar, + func(item *nbdb.ChassisTemplateVar) bool { + for _, name := range varNames { + if _, found := item.Variables[name]; found { + return true + } + } + return false + }) + if err != nil { + return err + } + + _, err = TransactAndCheck(nbClient, ops) + return err +} + +// DeleteChassisTemplateVar deletes all complete Chassis_Template_Var +// records matching 'templates'. +func DeleteChassisTemplateVar(nbClient libovsdbclient.Client, templates ...*nbdb.ChassisTemplateVar) error { + opModels := make([]operationModel, 0, len(templates)) + for i := range templates { + template := templates[i] + opModels = append(opModels, operationModel{ + Model: template, + ErrNotFound: false, + BulkOp: false, + }) + } + m := newModelClient(nbClient) + return m.Delete(opModels...) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go new file mode 100644 index 000000000..51fd09cce --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/transact.go @@ -0,0 +1,98 @@ +package ops + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" +) + +// TransactWithRetry will attempt a transaction several times if it receives an error indicating that the client +// was not connected when the transaction occurred. +func TransactWithRetry(ctx context.Context, c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + var results []ovsdb.OperationResult + resultErr := wait.PollUntilContextCancel(ctx, 200*time.Millisecond, true, func(ctx context.Context) (bool, error) { + var err error + results, err = c.Transact(ctx, ops...) + if err == nil { + return true, nil + } + if err != nil && errors.Is(err, client.ErrNotConnected) { + klog.V(5).Infof("Unable to execute transaction: %+v. Client is disconnected, will retry...", ops) + return false, nil + } + return false, err + }) + return results, resultErr +} + +func TransactAndCheck(c client.Client, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + if len(ops) <= 0 { + return []ovsdb.OperationResult{{}}, nil + } + + klog.V(5).Infof("Configuring OVN: %+v", ops) + + ctx, cancel := context.WithTimeout(context.TODO(), config.Default.OVSDBTxnTimeout) + defer cancel() + + results, err := TransactWithRetry(ctx, c, ops) + if err != nil { + return nil, fmt.Errorf("error in transact with ops %+v: %v", ops, err) + } + + opErrors, err := ovsdb.CheckOperationResults(results, ops) + if err != nil { + return nil, fmt.Errorf("error in transact with ops %+v results %+v and errors %+v: %v", ops, results, opErrors, err) + } + + return results, nil +} + +// TransactAndCheckAndSetUUIDs transacts the given ops against client and returns +// results if no error occurred or an error otherwise. It sets the real uuids for +// the passed models if they were inserted and have a named-uuid (as built by +// BuildNamedUUID) +func TransactAndCheckAndSetUUIDs(client client.Client, models interface{}, ops []ovsdb.Operation) ([]ovsdb.OperationResult, error) { + results, err := TransactAndCheck(client, ops) + if err != nil { + return nil, err + } + + namedModelMap := map[string]model.Model{} + _ = onModels(models, func(model interface{}) error { + uuid := getUUID(model) + if isNamedUUID(uuid) { + namedModelMap[uuid] = model + } + return nil + }) + + if len(namedModelMap) == 0 { + return results, nil + } + + for i, op := range ops { + if op.Op != ovsdb.OperationInsert { + continue + } + + if !isNamedUUID(op.UUIDName) { + continue + } + + if model, ok := namedModelMap[op.UUIDName]; ok { + setUUID(model, results[i].UUID.GoUUID) + } + } + + return results, nil +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go new file mode 100644 index 000000000..0c2840c17 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/acl.go @@ -0,0 +1,303 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ACLTable = "ACL" + +type ( + ACLAction = string + ACLDirection = string + ACLSeverity = string +) + +var ( + ACLActionAllow ACLAction = "allow" + ACLActionAllowRelated ACLAction = "allow-related" + ACLActionAllowStateless ACLAction = "allow-stateless" + ACLActionDrop ACLAction = "drop" + ACLActionReject ACLAction = "reject" + ACLActionPass ACLAction = "pass" + ACLDirectionFromLport ACLDirection = "from-lport" + ACLDirectionToLport ACLDirection = "to-lport" + ACLSeverityAlert ACLSeverity = "alert" + ACLSeverityWarning ACLSeverity = "warning" + ACLSeverityNotice ACLSeverity = "notice" + ACLSeverityInfo ACLSeverity = "info" + ACLSeverityDebug ACLSeverity = "debug" +) + +// ACL defines an object in ACL table +type ACL struct { + UUID string `ovsdb:"_uuid"` + Action ACLAction `ovsdb:"action"` + Direction ACLDirection `ovsdb:"direction"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Label int `ovsdb:"label"` + Log bool `ovsdb:"log"` + Match string `ovsdb:"match"` + Meter *string `ovsdb:"meter"` + Name *string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` + SampleEst *string `ovsdb:"sample_est"` + SampleNew *string `ovsdb:"sample_new"` + Severity *ACLSeverity `ovsdb:"severity"` + Tier int `ovsdb:"tier"` +} + +func (a *ACL) GetUUID() string { + return a.UUID +} + +func (a *ACL) GetAction() ACLAction { + return a.Action +} + +func (a *ACL) GetDirection() ACLDirection { + return a.Direction +} + +func (a *ACL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyACLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalACLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ACL) GetLabel() int { + return a.Label +} + +func (a *ACL) GetLog() bool { + return a.Log +} + +func (a *ACL) GetMatch() string { + return a.Match +} + +func (a *ACL) GetMeter() *string { + return a.Meter +} + +func copyACLMeter(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLMeter(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetName() *string { + return a.Name +} + +func copyACLName(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLName(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetOptions() map[string]string { + return a.Options +} + +func copyACLOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalACLOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ACL) GetPriority() int { + return a.Priority +} + +func (a *ACL) GetSampleEst() *string { + return a.SampleEst +} + +func copyACLSampleEst(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleEst(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetSampleNew() *string { + return a.SampleNew +} + +func copyACLSampleNew(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSampleNew(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetSeverity() *ACLSeverity { + return a.Severity +} + +func copyACLSeverity(a *ACLSeverity) *ACLSeverity { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalACLSeverity(a, b *ACLSeverity) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ACL) GetTier() int { + return a.Tier +} + +func (a *ACL) DeepCopyInto(b *ACL) { + *b = *a + b.ExternalIDs = copyACLExternalIDs(a.ExternalIDs) + b.Meter = copyACLMeter(a.Meter) + b.Name = copyACLName(a.Name) + b.Options = copyACLOptions(a.Options) + b.SampleEst = copyACLSampleEst(a.SampleEst) + b.SampleNew = copyACLSampleNew(a.SampleNew) + b.Severity = copyACLSeverity(a.Severity) +} + +func (a *ACL) DeepCopy() *ACL { + b := new(ACL) + a.DeepCopyInto(b) + return b +} + +func (a *ACL) CloneModelInto(b model.Model) { + c := b.(*ACL) + a.DeepCopyInto(c) +} + +func (a *ACL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ACL) Equals(b *ACL) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.Direction == b.Direction && + equalACLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Label == b.Label && + a.Log == b.Log && + a.Match == b.Match && + equalACLMeter(a.Meter, b.Meter) && + equalACLName(a.Name, b.Name) && + equalACLOptions(a.Options, b.Options) && + a.Priority == b.Priority && + equalACLSampleEst(a.SampleEst, b.SampleEst) && + equalACLSampleNew(a.SampleNew, b.SampleNew) && + equalACLSeverity(a.Severity, b.Severity) && + a.Tier == b.Tier +} + +func (a *ACL) EqualsModel(b model.Model) bool { + c := b.(*ACL) + return a.Equals(c) +} + +var _ model.CloneableModel = &ACL{} +var _ model.ComparableModel = &ACL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go new file mode 100644 index 000000000..e8a836e2d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/address_set.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const AddressSetTable = "Address_Set" + +// AddressSet defines an object in Address_Set table +type AddressSet struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` +} + +func (a *AddressSet) GetUUID() string { + return a.UUID +} + +func (a *AddressSet) GetAddresses() []string { + return a.Addresses +} + +func copyAddressSetAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalAddressSetAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *AddressSet) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyAddressSetExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalAddressSetExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *AddressSet) GetName() string { + return a.Name +} + +func (a *AddressSet) DeepCopyInto(b *AddressSet) { + *b = *a + b.Addresses = copyAddressSetAddresses(a.Addresses) + b.ExternalIDs = copyAddressSetExternalIDs(a.ExternalIDs) +} + +func (a *AddressSet) DeepCopy() *AddressSet { + b := new(AddressSet) + a.DeepCopyInto(b) + return b +} + +func (a *AddressSet) CloneModelInto(b model.Model) { + c := b.(*AddressSet) + a.DeepCopyInto(c) +} + +func (a *AddressSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AddressSet) Equals(b *AddressSet) bool { + return a.UUID == b.UUID && + equalAddressSetAddresses(a.Addresses, b.Addresses) && + equalAddressSetExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name +} + +func (a *AddressSet) EqualsModel(b model.Model) bool { + c := b.(*AddressSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &AddressSet{} +var _ model.ComparableModel = &AddressSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go new file mode 100644 index 000000000..46646e81a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/bfd.go @@ -0,0 +1,237 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const BFDTable = "BFD" + +type ( + BFDStatus = string +) + +var ( + BFDStatusDown BFDStatus = "down" + BFDStatusInit BFDStatus = "init" + BFDStatusUp BFDStatus = "up" + BFDStatusAdminDown BFDStatus = "admin_down" +) + +// BFD defines an object in BFD table +type BFD struct { + UUID string `ovsdb:"_uuid"` + DetectMult *int `ovsdb:"detect_mult"` + DstIP string `ovsdb:"dst_ip"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LogicalPort string `ovsdb:"logical_port"` + MinRx *int `ovsdb:"min_rx"` + MinTx *int `ovsdb:"min_tx"` + Options map[string]string `ovsdb:"options"` + Status *BFDStatus `ovsdb:"status"` +} + +func (a *BFD) GetUUID() string { + return a.UUID +} + +func (a *BFD) GetDetectMult() *int { + return a.DetectMult +} + +func copyBFDDetectMult(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDDetectMult(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetDstIP() string { + return a.DstIP +} + +func (a *BFD) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBFDExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *BFD) GetMinRx() *int { + return a.MinRx +} + +func copyBFDMinRx(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDMinRx(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetMinTx() *int { + return a.MinTx +} + +func copyBFDMinTx(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDMinTx(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) GetOptions() map[string]string { + return a.Options +} + +func copyBFDOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetStatus() *BFDStatus { + return a.Status +} + +func copyBFDStatus(a *BFDStatus) *BFDStatus { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalBFDStatus(a, b *BFDStatus) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *BFD) DeepCopyInto(b *BFD) { + *b = *a + b.DetectMult = copyBFDDetectMult(a.DetectMult) + b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs) + b.MinRx = copyBFDMinRx(a.MinRx) + b.MinTx = copyBFDMinTx(a.MinTx) + b.Options = copyBFDOptions(a.Options) + b.Status = copyBFDStatus(a.Status) +} + +func (a *BFD) DeepCopy() *BFD { + b := new(BFD) + a.DeepCopyInto(b) + return b +} + +func (a *BFD) CloneModelInto(b model.Model) { + c := b.(*BFD) + a.DeepCopyInto(c) +} + +func (a *BFD) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *BFD) Equals(b *BFD) bool { + return a.UUID == b.UUID && + equalBFDDetectMult(a.DetectMult, b.DetectMult) && + a.DstIP == b.DstIP && + equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.LogicalPort == b.LogicalPort && + equalBFDMinRx(a.MinRx, b.MinRx) && + equalBFDMinTx(a.MinTx, b.MinTx) && + equalBFDOptions(a.Options, b.Options) && + equalBFDStatus(a.Status, b.Status) +} + +func (a *BFD) EqualsModel(b model.Model) bool { + c := b.(*BFD) + return a.Equals(c) +} + +var _ model.CloneableModel = &BFD{} +var _ model.ComparableModel = &BFD{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go new file mode 100644 index 000000000..602c3f522 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/chassis_template_var.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTemplateVarTable = "Chassis_Template_Var" + +// ChassisTemplateVar defines an object in Chassis_Template_Var table +type ChassisTemplateVar struct { + UUID string `ovsdb:"_uuid"` + Chassis string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Variables map[string]string `ovsdb:"variables"` +} + +func (a *ChassisTemplateVar) GetUUID() string { + return a.UUID +} + +func (a *ChassisTemplateVar) GetChassis() string { + return a.Chassis +} + +func (a *ChassisTemplateVar) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisTemplateVarExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) GetVariables() map[string]string { + return a.Variables +} + +func copyChassisTemplateVarVariables(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarVariables(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) { + *b = *a + b.ExternalIDs = copyChassisTemplateVarExternalIDs(a.ExternalIDs) + b.Variables = copyChassisTemplateVarVariables(a.Variables) +} + +func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar { + b := new(ChassisTemplateVar) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisTemplateVar) CloneModelInto(b model.Model) { + c := b.(*ChassisTemplateVar) + a.DeepCopyInto(c) +} + +func (a *ChassisTemplateVar) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool { + return a.UUID == b.UUID && + a.Chassis == b.Chassis && + equalChassisTemplateVarExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalChassisTemplateVarVariables(a.Variables, b.Variables) +} + +func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool { + c := b.(*ChassisTemplateVar) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisTemplateVar{} +var _ model.ComparableModel = &ChassisTemplateVar{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go new file mode 100644 index 000000000..baf6da344 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/connection.go @@ -0,0 +1,209 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ConnectionTable = "Connection" + +// Connection defines an object in Connection table +type Connection struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` +} + +func (a *Connection) GetUUID() string { + return a.UUID +} + +func (a *Connection) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyConnectionExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyConnectionInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Connection) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyConnectionMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyConnectionOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetStatus() map[string]string { + return a.Status +} + +func copyConnectionStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetTarget() string { + return a.Target +} + +func (a *Connection) DeepCopyInto(b *Connection) { + *b = *a + b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe) + b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig) + b.Status = copyConnectionStatus(a.Status) +} + +func (a *Connection) DeepCopy() *Connection { + b := new(Connection) + a.DeepCopyInto(b) + return b +} + +func (a *Connection) CloneModelInto(b model.Model) { + c := b.(*Connection) + a.DeepCopyInto(c) +} + +func (a *Connection) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Connection) Equals(b *Connection) bool { + return a.UUID == b.UUID && + equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) && + equalConnectionStatus(a.Status, b.Status) && + a.Target == b.Target +} + +func (a *Connection) EqualsModel(b model.Model) bool { + c := b.(*Connection) + return a.Equals(c) +} + +var _ model.CloneableModel = &Connection{} +var _ model.ComparableModel = &Connection{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go new file mode 100644 index 000000000..1e146b657 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/copp.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const CoppTable = "Copp" + +// Copp defines an object in Copp table +type Copp struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Meters map[string]string `ovsdb:"meters"` + Name string `ovsdb:"name"` +} + +func (a *Copp) GetUUID() string { + return a.UUID +} + +func (a *Copp) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyCoppExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCoppExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Copp) GetMeters() map[string]string { + return a.Meters +} + +func copyCoppMeters(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalCoppMeters(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Copp) GetName() string { + return a.Name +} + +func (a *Copp) DeepCopyInto(b *Copp) { + *b = *a + b.ExternalIDs = copyCoppExternalIDs(a.ExternalIDs) + b.Meters = copyCoppMeters(a.Meters) +} + +func (a *Copp) DeepCopy() *Copp { + b := new(Copp) + a.DeepCopyInto(b) + return b +} + +func (a *Copp) CloneModelInto(b model.Model) { + c := b.(*Copp) + a.DeepCopyInto(c) +} + +func (a *Copp) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Copp) Equals(b *Copp) bool { + return a.UUID == b.UUID && + equalCoppExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalCoppMeters(a.Meters, b.Meters) && + a.Name == b.Name +} + +func (a *Copp) EqualsModel(b model.Model) bool { + c := b.(*Copp) + return a.Equals(c) +} + +var _ model.CloneableModel = &Copp{} +var _ model.ComparableModel = &Copp{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go new file mode 100644 index 000000000..fd68ebee2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_options.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPOptionsTable = "DHCP_Options" + +// DHCPOptions defines an object in DHCP_Options table +type DHCPOptions struct { + UUID string `ovsdb:"_uuid"` + Cidr string `ovsdb:"cidr"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` +} + +func (a *DHCPOptions) GetUUID() string { + return a.UUID +} + +func (a *DHCPOptions) GetCidr() string { + return a.Cidr +} + +func (a *DHCPOptions) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDHCPOptionsExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPOptionsExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPOptions) GetOptions() map[string]string { + return a.Options +} + +func copyDHCPOptionsOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPOptionsOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) { + *b = *a + b.ExternalIDs = copyDHCPOptionsExternalIDs(a.ExternalIDs) + b.Options = copyDHCPOptionsOptions(a.Options) +} + +func (a *DHCPOptions) DeepCopy() *DHCPOptions { + b := new(DHCPOptions) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPOptions) CloneModelInto(b model.Model) { + c := b.(*DHCPOptions) + a.DeepCopyInto(c) +} + +func (a *DHCPOptions) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPOptions) Equals(b *DHCPOptions) bool { + return a.UUID == b.UUID && + a.Cidr == b.Cidr && + equalDHCPOptionsExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDHCPOptionsOptions(a.Options, b.Options) +} + +func (a *DHCPOptions) EqualsModel(b model.Model) bool { + c := b.(*DHCPOptions) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPOptions{} +var _ model.ComparableModel = &DHCPOptions{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go new file mode 100644 index 000000000..f0e973ab7 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dhcp_relay.go @@ -0,0 +1,145 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPRelayTable = "DHCP_Relay" + +// DHCPRelay defines an object in DHCP_Relay table +type DHCPRelay struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Servers *string `ovsdb:"servers"` +} + +func (a *DHCPRelay) GetUUID() string { + return a.UUID +} + +func (a *DHCPRelay) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDHCPRelayExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetName() string { + return a.Name +} + +func (a *DHCPRelay) GetOptions() map[string]string { + return a.Options +} + +func copyDHCPRelayOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDHCPRelayOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DHCPRelay) GetServers() *string { + return a.Servers +} + +func copyDHCPRelayServers(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalDHCPRelayServers(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *DHCPRelay) DeepCopyInto(b *DHCPRelay) { + *b = *a + b.ExternalIDs = copyDHCPRelayExternalIDs(a.ExternalIDs) + b.Options = copyDHCPRelayOptions(a.Options) + b.Servers = copyDHCPRelayServers(a.Servers) +} + +func (a *DHCPRelay) DeepCopy() *DHCPRelay { + b := new(DHCPRelay) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPRelay) CloneModelInto(b model.Model) { + c := b.(*DHCPRelay) + a.DeepCopyInto(c) +} + +func (a *DHCPRelay) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPRelay) Equals(b *DHCPRelay) bool { + return a.UUID == b.UUID && + equalDHCPRelayExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalDHCPRelayOptions(a.Options, b.Options) && + equalDHCPRelayServers(a.Servers, b.Servers) +} + +func (a *DHCPRelay) EqualsModel(b model.Model) bool { + c := b.(*DHCPRelay) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPRelay{} +var _ model.ComparableModel = &DHCPRelay{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go new file mode 100644 index 000000000..285d5df28 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/dns.go @@ -0,0 +1,147 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const DNSTable = "DNS" + +// DNS defines an object in DNS table +type DNS struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Records map[string]string `ovsdb:"records"` +} + +func (a *DNS) GetUUID() string { + return a.UUID +} + +func (a *DNS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDNSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetOptions() map[string]string { + return a.Options +} + +func copyDNSOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetRecords() map[string]string { + return a.Records +} + +func copyDNSRecords(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSRecords(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) DeepCopyInto(b *DNS) { + *b = *a + b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs) + b.Options = copyDNSOptions(a.Options) + b.Records = copyDNSRecords(a.Records) +} + +func (a *DNS) DeepCopy() *DNS { + b := new(DNS) + a.DeepCopyInto(b) + return b +} + +func (a *DNS) CloneModelInto(b model.Model) { + c := b.(*DNS) + a.DeepCopyInto(c) +} + +func (a *DNS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DNS) Equals(b *DNS) bool { + return a.UUID == b.UUID && + equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDNSOptions(a.Options, b.Options) && + equalDNSRecords(a.Records, b.Records) +} + +func (a *DNS) EqualsModel(b model.Model) bool { + c := b.(*DNS) + return a.Equals(c) +} + +var _ model.CloneableModel = &DNS{} +var _ model.ComparableModel = &DNS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go new file mode 100644 index 000000000..1a0657559 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/forwarding_group.go @@ -0,0 +1,136 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const ForwardingGroupTable = "Forwarding_Group" + +// ForwardingGroup defines an object in Forwarding_Group table +type ForwardingGroup struct { + UUID string `ovsdb:"_uuid"` + ChildPort []string `ovsdb:"child_port"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Liveness bool `ovsdb:"liveness"` + Name string `ovsdb:"name"` + Vip string `ovsdb:"vip"` + Vmac string `ovsdb:"vmac"` +} + +func (a *ForwardingGroup) GetUUID() string { + return a.UUID +} + +func (a *ForwardingGroup) GetChildPort() []string { + return a.ChildPort +} + +func copyForwardingGroupChildPort(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalForwardingGroupChildPort(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *ForwardingGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyForwardingGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalForwardingGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ForwardingGroup) GetLiveness() bool { + return a.Liveness +} + +func (a *ForwardingGroup) GetName() string { + return a.Name +} + +func (a *ForwardingGroup) GetVip() string { + return a.Vip +} + +func (a *ForwardingGroup) GetVmac() string { + return a.Vmac +} + +func (a *ForwardingGroup) DeepCopyInto(b *ForwardingGroup) { + *b = *a + b.ChildPort = copyForwardingGroupChildPort(a.ChildPort) + b.ExternalIDs = copyForwardingGroupExternalIDs(a.ExternalIDs) +} + +func (a *ForwardingGroup) DeepCopy() *ForwardingGroup { + b := new(ForwardingGroup) + a.DeepCopyInto(b) + return b +} + +func (a *ForwardingGroup) CloneModelInto(b model.Model) { + c := b.(*ForwardingGroup) + a.DeepCopyInto(c) +} + +func (a *ForwardingGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ForwardingGroup) Equals(b *ForwardingGroup) bool { + return a.UUID == b.UUID && + equalForwardingGroupChildPort(a.ChildPort, b.ChildPort) && + equalForwardingGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Liveness == b.Liveness && + a.Name == b.Name && + a.Vip == b.Vip && + a.Vmac == b.Vmac +} + +func (a *ForwardingGroup) EqualsModel(b model.Model) bool { + c := b.(*ForwardingGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &ForwardingGroup{} +var _ model.ComparableModel = &ForwardingGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go new file mode 100644 index 000000000..15935847b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gateway_chassis.go @@ -0,0 +1,132 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const GatewayChassisTable = "Gateway_Chassis" + +// GatewayChassis defines an object in Gateway_Chassis table +type GatewayChassis struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *GatewayChassis) GetUUID() string { + return a.UUID +} + +func (a *GatewayChassis) GetChassisName() string { + return a.ChassisName +} + +func (a *GatewayChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyGatewayChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetName() string { + return a.Name +} + +func (a *GatewayChassis) GetOptions() map[string]string { + return a.Options +} + +func copyGatewayChassisOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetPriority() int { + return a.Priority +} + +func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) { + *b = *a + b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs) + b.Options = copyGatewayChassisOptions(a.Options) +} + +func (a *GatewayChassis) DeepCopy() *GatewayChassis { + b := new(GatewayChassis) + a.DeepCopyInto(b) + return b +} + +func (a *GatewayChassis) CloneModelInto(b model.Model) { + c := b.(*GatewayChassis) + a.DeepCopyInto(c) +} + +func (a *GatewayChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *GatewayChassis) Equals(b *GatewayChassis) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalGatewayChassisOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *GatewayChassis) EqualsModel(b model.Model) bool { + c := b.(*GatewayChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &GatewayChassis{} +var _ model.ComparableModel = &GatewayChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go new file mode 100644 index 000000000..67f8a84f1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/gen.go @@ -0,0 +1,3 @@ +package nbdb + +//go:generate modelgen --extended -p nbdb -o . ovn-nb.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go new file mode 100644 index 000000000..dc09d1ec9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis.go @@ -0,0 +1,93 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisTable = "HA_Chassis" + +// HAChassis defines an object in HA_Chassis table +type HAChassis struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Priority int `ovsdb:"priority"` +} + +func (a *HAChassis) GetUUID() string { + return a.UUID +} + +func (a *HAChassis) GetChassisName() string { + return a.ChassisName +} + +func (a *HAChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassis) GetPriority() int { + return a.Priority +} + +func (a *HAChassis) DeepCopyInto(b *HAChassis) { + *b = *a + b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs) +} + +func (a *HAChassis) DeepCopy() *HAChassis { + b := new(HAChassis) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassis) CloneModelInto(b model.Model) { + c := b.(*HAChassis) + a.DeepCopyInto(c) +} + +func (a *HAChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassis) Equals(b *HAChassis) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Priority == b.Priority +} + +func (a *HAChassis) EqualsModel(b model.Model) bool { + c := b.(*HAChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassis{} +var _ model.ComparableModel = &HAChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go new file mode 100644 index 000000000..bdda95aaf --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ha_chassis_group.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisGroupTable = "HA_Chassis_Group" + +// HAChassisGroup defines an object in HA_Chassis_Group table +type HAChassisGroup struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassis []string `ovsdb:"ha_chassis"` + Name string `ovsdb:"name"` +} + +func (a *HAChassisGroup) GetUUID() string { + return a.UUID +} + +func (a *HAChassisGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetHaChassis() []string { + return a.HaChassis +} + +func copyHAChassisGroupHaChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupHaChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetName() string { + return a.Name +} + +func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) { + *b = *a + b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs) + b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis) +} + +func (a *HAChassisGroup) DeepCopy() *HAChassisGroup { + b := new(HAChassisGroup) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassisGroup) CloneModelInto(b model.Model) { + c := b.(*HAChassisGroup) + a.DeepCopyInto(c) +} + +func (a *HAChassisGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool { + return a.UUID == b.UUID && + equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) && + a.Name == b.Name +} + +func (a *HAChassisGroup) EqualsModel(b model.Model) bool { + c := b.(*HAChassisGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassisGroup{} +var _ model.ComparableModel = &HAChassisGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go new file mode 100644 index 000000000..03bcd7601 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer.go @@ -0,0 +1,290 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerTable = "Load_Balancer" + +type ( + LoadBalancerProtocol = string + LoadBalancerSelectionFields = string +) + +var ( + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" + LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" + LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" + LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" + LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" + LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" + LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" +) + +// LoadBalancer defines an object in Load_Balancer table +type LoadBalancer struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HealthCheck []string `ovsdb:"health_check"` + IPPortMappings map[string]string `ovsdb:"ip_port_mappings"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Protocol *LoadBalancerProtocol `ovsdb:"protocol"` + SelectionFields []LoadBalancerSelectionFields `ovsdb:"selection_fields"` + Vips map[string]string `ovsdb:"vips"` +} + +func (a *LoadBalancer) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancer) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetHealthCheck() []string { + return a.HealthCheck +} + +func copyLoadBalancerHealthCheck(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerHealthCheck(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetIPPortMappings() map[string]string { + return a.IPPortMappings +} + +func copyLoadBalancerIPPortMappings(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerIPPortMappings(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetName() string { + return a.Name +} + +func (a *LoadBalancer) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol { + return a.Protocol +} + +func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetSelectionFields() []LoadBalancerSelectionFields { + return a.SelectionFields +} + +func copyLoadBalancerSelectionFields(a []LoadBalancerSelectionFields) []LoadBalancerSelectionFields { + if a == nil { + return nil + } + b := make([]LoadBalancerSelectionFields, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerSelectionFields(a, b []LoadBalancerSelectionFields) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetVips() map[string]string { + return a.Vips +} + +func copyLoadBalancerVips(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerVips(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) { + *b = *a + b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs) + b.HealthCheck = copyLoadBalancerHealthCheck(a.HealthCheck) + b.IPPortMappings = copyLoadBalancerIPPortMappings(a.IPPortMappings) + b.Options = copyLoadBalancerOptions(a.Options) + b.Protocol = copyLoadBalancerProtocol(a.Protocol) + b.SelectionFields = copyLoadBalancerSelectionFields(a.SelectionFields) + b.Vips = copyLoadBalancerVips(a.Vips) +} + +func (a *LoadBalancer) DeepCopy() *LoadBalancer { + b := new(LoadBalancer) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancer) CloneModelInto(b model.Model) { + c := b.(*LoadBalancer) + a.DeepCopyInto(c) +} + +func (a *LoadBalancer) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancer) Equals(b *LoadBalancer) bool { + return a.UUID == b.UUID && + equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerHealthCheck(a.HealthCheck, b.HealthCheck) && + equalLoadBalancerIPPortMappings(a.IPPortMappings, b.IPPortMappings) && + a.Name == b.Name && + equalLoadBalancerOptions(a.Options, b.Options) && + equalLoadBalancerProtocol(a.Protocol, b.Protocol) && + equalLoadBalancerSelectionFields(a.SelectionFields, b.SelectionFields) && + equalLoadBalancerVips(a.Vips, b.Vips) +} + +func (a *LoadBalancer) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancer) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancer{} +var _ model.ComparableModel = &LoadBalancer{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go new file mode 100644 index 000000000..775924967 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_group.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerGroupTable = "Load_Balancer_Group" + +// LoadBalancerGroup defines an object in Load_Balancer_Group table +type LoadBalancerGroup struct { + UUID string `ovsdb:"_uuid"` + LoadBalancer []string `ovsdb:"load_balancer"` + Name string `ovsdb:"name"` +} + +func (a *LoadBalancerGroup) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancerGroup) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLoadBalancerGroupLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerGroupLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancerGroup) GetName() string { + return a.Name +} + +func (a *LoadBalancerGroup) DeepCopyInto(b *LoadBalancerGroup) { + *b = *a + b.LoadBalancer = copyLoadBalancerGroupLoadBalancer(a.LoadBalancer) +} + +func (a *LoadBalancerGroup) DeepCopy() *LoadBalancerGroup { + b := new(LoadBalancerGroup) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancerGroup) CloneModelInto(b model.Model) { + c := b.(*LoadBalancerGroup) + a.DeepCopyInto(c) +} + +func (a *LoadBalancerGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancerGroup) Equals(b *LoadBalancerGroup) bool { + return a.UUID == b.UUID && + equalLoadBalancerGroupLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + a.Name == b.Name +} + +func (a *LoadBalancerGroup) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancerGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancerGroup{} +var _ model.ComparableModel = &LoadBalancerGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go new file mode 100644 index 000000000..c8163fa00 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/load_balancer_health_check.go @@ -0,0 +1,120 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerHealthCheckTable = "Load_Balancer_Health_Check" + +// LoadBalancerHealthCheck defines an object in Load_Balancer_Health_Check table +type LoadBalancerHealthCheck struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Vip string `ovsdb:"vip"` +} + +func (a *LoadBalancerHealthCheck) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancerHealthCheck) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerHealthCheckExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerHealthCheckExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancerHealthCheck) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerHealthCheckOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerHealthCheckOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancerHealthCheck) GetVip() string { + return a.Vip +} + +func (a *LoadBalancerHealthCheck) DeepCopyInto(b *LoadBalancerHealthCheck) { + *b = *a + b.ExternalIDs = copyLoadBalancerHealthCheckExternalIDs(a.ExternalIDs) + b.Options = copyLoadBalancerHealthCheckOptions(a.Options) +} + +func (a *LoadBalancerHealthCheck) DeepCopy() *LoadBalancerHealthCheck { + b := new(LoadBalancerHealthCheck) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancerHealthCheck) CloneModelInto(b model.Model) { + c := b.(*LoadBalancerHealthCheck) + a.DeepCopyInto(c) +} + +func (a *LoadBalancerHealthCheck) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancerHealthCheck) Equals(b *LoadBalancerHealthCheck) bool { + return a.UUID == b.UUID && + equalLoadBalancerHealthCheckExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerHealthCheckOptions(a.Options, b.Options) && + a.Vip == b.Vip +} + +func (a *LoadBalancerHealthCheck) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancerHealthCheck) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancerHealthCheck{} +var _ model.ComparableModel = &LoadBalancerHealthCheck{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go new file mode 100644 index 000000000..81c5efaf9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router.go @@ -0,0 +1,356 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterTable = "Logical_Router" + +// LogicalRouter defines an object in Logical_Router table +type LogicalRouter struct { + UUID string `ovsdb:"_uuid"` + Copp *string `ovsdb:"copp"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LoadBalancer []string `ovsdb:"load_balancer"` + LoadBalancerGroup []string `ovsdb:"load_balancer_group"` + Name string `ovsdb:"name"` + Nat []string `ovsdb:"nat"` + Options map[string]string `ovsdb:"options"` + Policies []string `ovsdb:"policies"` + Ports []string `ovsdb:"ports"` + StaticRoutes []string `ovsdb:"static_routes"` +} + +func (a *LogicalRouter) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouter) GetCopp() *string { + return a.Copp +} + +func copyLogicalRouterCopp(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterCopp(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouter) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalRouterEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouter) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouter) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLogicalRouterLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetLoadBalancerGroup() []string { + return a.LoadBalancerGroup +} + +func copyLogicalRouterLoadBalancerGroup(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterLoadBalancerGroup(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetName() string { + return a.Name +} + +func (a *LogicalRouter) GetNat() []string { + return a.Nat +} + +func copyLogicalRouterNat(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterNat(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouter) GetPolicies() []string { + return a.Policies +} + +func copyLogicalRouterPolicies(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicies(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetPorts() []string { + return a.Ports +} + +func copyLogicalRouterPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) GetStaticRoutes() []string { + return a.StaticRoutes +} + +func copyLogicalRouterStaticRoutes(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterStaticRoutes(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouter) DeepCopyInto(b *LogicalRouter) { + *b = *a + b.Copp = copyLogicalRouterCopp(a.Copp) + b.Enabled = copyLogicalRouterEnabled(a.Enabled) + b.ExternalIDs = copyLogicalRouterExternalIDs(a.ExternalIDs) + b.LoadBalancer = copyLogicalRouterLoadBalancer(a.LoadBalancer) + b.LoadBalancerGroup = copyLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup) + b.Nat = copyLogicalRouterNat(a.Nat) + b.Options = copyLogicalRouterOptions(a.Options) + b.Policies = copyLogicalRouterPolicies(a.Policies) + b.Ports = copyLogicalRouterPorts(a.Ports) + b.StaticRoutes = copyLogicalRouterStaticRoutes(a.StaticRoutes) +} + +func (a *LogicalRouter) DeepCopy() *LogicalRouter { + b := new(LogicalRouter) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouter) CloneModelInto(b model.Model) { + c := b.(*LogicalRouter) + a.DeepCopyInto(c) +} + +func (a *LogicalRouter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouter) Equals(b *LogicalRouter) bool { + return a.UUID == b.UUID && + equalLogicalRouterCopp(a.Copp, b.Copp) && + equalLogicalRouterEnabled(a.Enabled, b.Enabled) && + equalLogicalRouterExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + equalLogicalRouterLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) && + a.Name == b.Name && + equalLogicalRouterNat(a.Nat, b.Nat) && + equalLogicalRouterOptions(a.Options, b.Options) && + equalLogicalRouterPolicies(a.Policies, b.Policies) && + equalLogicalRouterPorts(a.Ports, b.Ports) && + equalLogicalRouterStaticRoutes(a.StaticRoutes, b.StaticRoutes) +} + +func (a *LogicalRouter) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouter) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouter{} +var _ model.ComparableModel = &LogicalRouter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go new file mode 100644 index 000000000..7272dbb8a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_policy.go @@ -0,0 +1,229 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterPolicyTable = "Logical_Router_Policy" + +type ( + LogicalRouterPolicyAction = string +) + +var ( + LogicalRouterPolicyActionAllow LogicalRouterPolicyAction = "allow" + LogicalRouterPolicyActionDrop LogicalRouterPolicyAction = "drop" + LogicalRouterPolicyActionReroute LogicalRouterPolicyAction = "reroute" +) + +// LogicalRouterPolicy defines an object in Logical_Router_Policy table +type LogicalRouterPolicy struct { + UUID string `ovsdb:"_uuid"` + Action LogicalRouterPolicyAction `ovsdb:"action"` + BFDSessions []string `ovsdb:"bfd_sessions"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Match string `ovsdb:"match"` + Nexthop *string `ovsdb:"nexthop"` + Nexthops []string `ovsdb:"nexthops"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *LogicalRouterPolicy) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterPolicy) GetAction() LogicalRouterPolicyAction { + return a.Action +} + +func (a *LogicalRouterPolicy) GetBFDSessions() []string { + return a.BFDSessions +} + +func copyLogicalRouterPolicyBFDSessions(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicyBFDSessions(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterPolicyExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPolicyExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetMatch() string { + return a.Match +} + +func (a *LogicalRouterPolicy) GetNexthop() *string { + return a.Nexthop +} + +func copyLogicalRouterPolicyNexthop(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyNexthop(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPolicy) GetNexthops() []string { + return a.Nexthops +} + +func copyLogicalRouterPolicyNexthops(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPolicyNexthops(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterPolicyOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPolicyOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPolicy) GetPriority() int { + return a.Priority +} + +func (a *LogicalRouterPolicy) DeepCopyInto(b *LogicalRouterPolicy) { + *b = *a + b.BFDSessions = copyLogicalRouterPolicyBFDSessions(a.BFDSessions) + b.ExternalIDs = copyLogicalRouterPolicyExternalIDs(a.ExternalIDs) + b.Nexthop = copyLogicalRouterPolicyNexthop(a.Nexthop) + b.Nexthops = copyLogicalRouterPolicyNexthops(a.Nexthops) + b.Options = copyLogicalRouterPolicyOptions(a.Options) +} + +func (a *LogicalRouterPolicy) DeepCopy() *LogicalRouterPolicy { + b := new(LogicalRouterPolicy) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterPolicy) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterPolicy) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterPolicy) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterPolicy) Equals(b *LogicalRouterPolicy) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + equalLogicalRouterPolicyBFDSessions(a.BFDSessions, b.BFDSessions) && + equalLogicalRouterPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Match == b.Match && + equalLogicalRouterPolicyNexthop(a.Nexthop, b.Nexthop) && + equalLogicalRouterPolicyNexthops(a.Nexthops, b.Nexthops) && + equalLogicalRouterPolicyOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *LogicalRouterPolicy) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterPolicy) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterPolicy{} +var _ model.ComparableModel = &LogicalRouterPolicy{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go new file mode 100644 index 000000000..d39fe0db4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_port.go @@ -0,0 +1,385 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterPortTable = "Logical_Router_Port" + +// LogicalRouterPort defines an object in Logical_Router_Port table +type LogicalRouterPort struct { + UUID string `ovsdb:"_uuid"` + DhcpRelay *string `ovsdb:"dhcp_relay"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + GatewayChassis []string `ovsdb:"gateway_chassis"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + Ipv6Prefix []string `ovsdb:"ipv6_prefix"` + Ipv6RaConfigs map[string]string `ovsdb:"ipv6_ra_configs"` + MAC string `ovsdb:"mac"` + Name string `ovsdb:"name"` + Networks []string `ovsdb:"networks"` + Options map[string]string `ovsdb:"options"` + Peer *string `ovsdb:"peer"` + Status map[string]string `ovsdb:"status"` +} + +func (a *LogicalRouterPort) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterPort) GetDhcpRelay() *string { + return a.DhcpRelay +} + +func copyLogicalRouterPortDhcpRelay(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortDhcpRelay(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalRouterPortEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterPortExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetGatewayChassis() []string { + return a.GatewayChassis +} + +func copyLogicalRouterPortGatewayChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortGatewayChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyLogicalRouterPortHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetIpv6Prefix() []string { + return a.Ipv6Prefix +} + +func copyLogicalRouterPortIpv6Prefix(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortIpv6Prefix(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetIpv6RaConfigs() map[string]string { + return a.Ipv6RaConfigs +} + +func copyLogicalRouterPortIpv6RaConfigs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortIpv6RaConfigs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetMAC() string { + return a.MAC +} + +func (a *LogicalRouterPort) GetName() string { + return a.Name +} + +func (a *LogicalRouterPort) GetNetworks() []string { + return a.Networks +} + +func copyLogicalRouterPortNetworks(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterPortNetworks(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterPortOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) GetPeer() *string { + return a.Peer +} + +func copyLogicalRouterPortPeer(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPortPeer(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterPort) GetStatus() map[string]string { + return a.Status +} + +func copyLogicalRouterPortStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterPortStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterPort) DeepCopyInto(b *LogicalRouterPort) { + *b = *a + b.DhcpRelay = copyLogicalRouterPortDhcpRelay(a.DhcpRelay) + b.Enabled = copyLogicalRouterPortEnabled(a.Enabled) + b.ExternalIDs = copyLogicalRouterPortExternalIDs(a.ExternalIDs) + b.GatewayChassis = copyLogicalRouterPortGatewayChassis(a.GatewayChassis) + b.HaChassisGroup = copyLogicalRouterPortHaChassisGroup(a.HaChassisGroup) + b.Ipv6Prefix = copyLogicalRouterPortIpv6Prefix(a.Ipv6Prefix) + b.Ipv6RaConfigs = copyLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs) + b.Networks = copyLogicalRouterPortNetworks(a.Networks) + b.Options = copyLogicalRouterPortOptions(a.Options) + b.Peer = copyLogicalRouterPortPeer(a.Peer) + b.Status = copyLogicalRouterPortStatus(a.Status) +} + +func (a *LogicalRouterPort) DeepCopy() *LogicalRouterPort { + b := new(LogicalRouterPort) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterPort) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterPort) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterPort) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterPort) Equals(b *LogicalRouterPort) bool { + return a.UUID == b.UUID && + equalLogicalRouterPortDhcpRelay(a.DhcpRelay, b.DhcpRelay) && + equalLogicalRouterPortEnabled(a.Enabled, b.Enabled) && + equalLogicalRouterPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterPortGatewayChassis(a.GatewayChassis, b.GatewayChassis) && + equalLogicalRouterPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + equalLogicalRouterPortIpv6Prefix(a.Ipv6Prefix, b.Ipv6Prefix) && + equalLogicalRouterPortIpv6RaConfigs(a.Ipv6RaConfigs, b.Ipv6RaConfigs) && + a.MAC == b.MAC && + a.Name == b.Name && + equalLogicalRouterPortNetworks(a.Networks, b.Networks) && + equalLogicalRouterPortOptions(a.Options, b.Options) && + equalLogicalRouterPortPeer(a.Peer, b.Peer) && + equalLogicalRouterPortStatus(a.Status, b.Status) +} + +func (a *LogicalRouterPort) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterPort) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterPort{} +var _ model.ComparableModel = &LogicalRouterPort{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go new file mode 100644 index 000000000..ce966e570 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_router_static_route.go @@ -0,0 +1,216 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route" + +type ( + LogicalRouterStaticRoutePolicy = string +) + +var ( + LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" + LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" +) + +// LogicalRouterStaticRoute defines an object in Logical_Router_Static_Route table +type LogicalRouterStaticRoute struct { + UUID string `ovsdb:"_uuid"` + BFD *string `ovsdb:"bfd"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + Nexthop string `ovsdb:"nexthop"` + Options map[string]string `ovsdb:"options"` + OutputPort *string `ovsdb:"output_port"` + Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` + RouteTable string `ovsdb:"route_table"` +} + +func (a *LogicalRouterStaticRoute) GetUUID() string { + return a.UUID +} + +func (a *LogicalRouterStaticRoute) GetBFD() *string { + return a.BFD +} + +func copyLogicalRouterStaticRouteBFD(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRouteBFD(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalRouterStaticRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterStaticRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterStaticRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *LogicalRouterStaticRoute) GetNexthop() string { + return a.Nexthop +} + +func (a *LogicalRouterStaticRoute) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalRouterStaticRouteOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalRouterStaticRouteOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalRouterStaticRoute) GetOutputPort() *string { + return a.OutputPort +} + +func copyLogicalRouterStaticRouteOutputPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRouteOutputPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetPolicy() *LogicalRouterStaticRoutePolicy { + return a.Policy +} + +func copyLogicalRouterStaticRoutePolicy(a *LogicalRouterStaticRoutePolicy) *LogicalRouterStaticRoutePolicy { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterStaticRoutePolicy(a, b *LogicalRouterStaticRoutePolicy) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalRouterStaticRoute) GetRouteTable() string { + return a.RouteTable +} + +func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { + *b = *a + b.BFD = copyLogicalRouterStaticRouteBFD(a.BFD) + b.ExternalIDs = copyLogicalRouterStaticRouteExternalIDs(a.ExternalIDs) + b.Options = copyLogicalRouterStaticRouteOptions(a.Options) + b.OutputPort = copyLogicalRouterStaticRouteOutputPort(a.OutputPort) + b.Policy = copyLogicalRouterStaticRoutePolicy(a.Policy) +} + +func (a *LogicalRouterStaticRoute) DeepCopy() *LogicalRouterStaticRoute { + b := new(LogicalRouterStaticRoute) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalRouterStaticRoute) CloneModelInto(b model.Model) { + c := b.(*LogicalRouterStaticRoute) + a.DeepCopyInto(c) +} + +func (a *LogicalRouterStaticRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalRouterStaticRoute) Equals(b *LogicalRouterStaticRoute) bool { + return a.UUID == b.UUID && + equalLogicalRouterStaticRouteBFD(a.BFD, b.BFD) && + equalLogicalRouterStaticRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.Nexthop == b.Nexthop && + equalLogicalRouterStaticRouteOptions(a.Options, b.Options) && + equalLogicalRouterStaticRouteOutputPort(a.OutputPort, b.OutputPort) && + equalLogicalRouterStaticRoutePolicy(a.Policy, b.Policy) && + a.RouteTable == b.RouteTable +} + +func (a *LogicalRouterStaticRoute) EqualsModel(b model.Model) bool { + c := b.(*LogicalRouterStaticRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalRouterStaticRoute{} +var _ model.ComparableModel = &LogicalRouterStaticRoute{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go new file mode 100644 index 000000000..50b8214ad --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch.go @@ -0,0 +1,362 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalSwitchTable = "Logical_Switch" + +// LogicalSwitch defines an object in Logical_Switch table +type LogicalSwitch struct { + UUID string `ovsdb:"_uuid"` + ACLs []string `ovsdb:"acls"` + Copp *string `ovsdb:"copp"` + DNSRecords []string `ovsdb:"dns_records"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ForwardingGroups []string `ovsdb:"forwarding_groups"` + LoadBalancer []string `ovsdb:"load_balancer"` + LoadBalancerGroup []string `ovsdb:"load_balancer_group"` + Name string `ovsdb:"name"` + OtherConfig map[string]string `ovsdb:"other_config"` + Ports []string `ovsdb:"ports"` + QOSRules []string `ovsdb:"qos_rules"` +} + +func (a *LogicalSwitch) GetUUID() string { + return a.UUID +} + +func (a *LogicalSwitch) GetACLs() []string { + return a.ACLs +} + +func copyLogicalSwitchACLs(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchACLs(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetCopp() *string { + return a.Copp +} + +func copyLogicalSwitchCopp(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchCopp(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitch) GetDNSRecords() []string { + return a.DNSRecords +} + +func copyLogicalSwitchDNSRecords(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchDNSRecords(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalSwitchExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetForwardingGroups() []string { + return a.ForwardingGroups +} + +func copyLogicalSwitchForwardingGroups(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchForwardingGroups(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetLoadBalancer() []string { + return a.LoadBalancer +} + +func copyLogicalSwitchLoadBalancer(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchLoadBalancer(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetLoadBalancerGroup() []string { + return a.LoadBalancerGroup +} + +func copyLogicalSwitchLoadBalancerGroup(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchLoadBalancerGroup(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetName() string { + return a.Name +} + +func (a *LogicalSwitch) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyLogicalSwitchOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetPorts() []string { + return a.Ports +} + +func copyLogicalSwitchPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) GetQOSRules() []string { + return a.QOSRules +} + +func copyLogicalSwitchQOSRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchQOSRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitch) DeepCopyInto(b *LogicalSwitch) { + *b = *a + b.ACLs = copyLogicalSwitchACLs(a.ACLs) + b.Copp = copyLogicalSwitchCopp(a.Copp) + b.DNSRecords = copyLogicalSwitchDNSRecords(a.DNSRecords) + b.ExternalIDs = copyLogicalSwitchExternalIDs(a.ExternalIDs) + b.ForwardingGroups = copyLogicalSwitchForwardingGroups(a.ForwardingGroups) + b.LoadBalancer = copyLogicalSwitchLoadBalancer(a.LoadBalancer) + b.LoadBalancerGroup = copyLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup) + b.OtherConfig = copyLogicalSwitchOtherConfig(a.OtherConfig) + b.Ports = copyLogicalSwitchPorts(a.Ports) + b.QOSRules = copyLogicalSwitchQOSRules(a.QOSRules) +} + +func (a *LogicalSwitch) DeepCopy() *LogicalSwitch { + b := new(LogicalSwitch) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalSwitch) CloneModelInto(b model.Model) { + c := b.(*LogicalSwitch) + a.DeepCopyInto(c) +} + +func (a *LogicalSwitch) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalSwitch) Equals(b *LogicalSwitch) bool { + return a.UUID == b.UUID && + equalLogicalSwitchACLs(a.ACLs, b.ACLs) && + equalLogicalSwitchCopp(a.Copp, b.Copp) && + equalLogicalSwitchDNSRecords(a.DNSRecords, b.DNSRecords) && + equalLogicalSwitchExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalSwitchForwardingGroups(a.ForwardingGroups, b.ForwardingGroups) && + equalLogicalSwitchLoadBalancer(a.LoadBalancer, b.LoadBalancer) && + equalLogicalSwitchLoadBalancerGroup(a.LoadBalancerGroup, b.LoadBalancerGroup) && + a.Name == b.Name && + equalLogicalSwitchOtherConfig(a.OtherConfig, b.OtherConfig) && + equalLogicalSwitchPorts(a.Ports, b.Ports) && + equalLogicalSwitchQOSRules(a.QOSRules, b.QOSRules) +} + +func (a *LogicalSwitch) EqualsModel(b model.Model) bool { + c := b.(*LogicalSwitch) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalSwitch{} +var _ model.ComparableModel = &LogicalSwitch{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go new file mode 100644 index 000000000..c048f7654 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/logical_switch_port.go @@ -0,0 +1,444 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalSwitchPortTable = "Logical_Switch_Port" + +// LogicalSwitchPort defines an object in Logical_Switch_Port table +type LogicalSwitchPort struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + Dhcpv4Options *string `ovsdb:"dhcpv4_options"` + Dhcpv6Options *string `ovsdb:"dhcpv6_options"` + DynamicAddresses *string `ovsdb:"dynamic_addresses"` + Enabled *bool `ovsdb:"enabled"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + MirrorRules []string `ovsdb:"mirror_rules"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + ParentName *string `ovsdb:"parent_name"` + PortSecurity []string `ovsdb:"port_security"` + Tag *int `ovsdb:"tag"` + TagRequest *int `ovsdb:"tag_request"` + Type string `ovsdb:"type"` + Up *bool `ovsdb:"up"` +} + +func (a *LogicalSwitchPort) GetUUID() string { + return a.UUID +} + +func (a *LogicalSwitchPort) GetAddresses() []string { + return a.Addresses +} + +func copyLogicalSwitchPortAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetDhcpv4Options() *string { + return a.Dhcpv4Options +} + +func copyLogicalSwitchPortDhcpv4Options(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDhcpv4Options(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetDhcpv6Options() *string { + return a.Dhcpv6Options +} + +func copyLogicalSwitchPortDhcpv6Options(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDhcpv6Options(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetDynamicAddresses() *string { + return a.DynamicAddresses +} + +func copyLogicalSwitchPortDynamicAddresses(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortDynamicAddresses(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetEnabled() *bool { + return a.Enabled +} + +func copyLogicalSwitchPortEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalSwitchPortExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchPortExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyLogicalSwitchPortHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetMirrorRules() []string { + return a.MirrorRules +} + +func copyLogicalSwitchPortMirrorRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortMirrorRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetName() string { + return a.Name +} + +func (a *LogicalSwitchPort) GetOptions() map[string]string { + return a.Options +} + +func copyLogicalSwitchPortOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalSwitchPortOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetParentName() *string { + return a.ParentName +} + +func copyLogicalSwitchPortParentName(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortParentName(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetPortSecurity() []string { + return a.PortSecurity +} + +func copyLogicalSwitchPortPortSecurity(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalSwitchPortPortSecurity(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalSwitchPort) GetTag() *int { + return a.Tag +} + +func copyLogicalSwitchPortTag(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortTag(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetTagRequest() *int { + return a.TagRequest +} + +func copyLogicalSwitchPortTagRequest(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortTagRequest(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) GetType() string { + return a.Type +} + +func (a *LogicalSwitchPort) GetUp() *bool { + return a.Up +} + +func copyLogicalSwitchPortUp(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortUp(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalSwitchPort) DeepCopyInto(b *LogicalSwitchPort) { + *b = *a + b.Addresses = copyLogicalSwitchPortAddresses(a.Addresses) + b.Dhcpv4Options = copyLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options) + b.Dhcpv6Options = copyLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options) + b.DynamicAddresses = copyLogicalSwitchPortDynamicAddresses(a.DynamicAddresses) + b.Enabled = copyLogicalSwitchPortEnabled(a.Enabled) + b.ExternalIDs = copyLogicalSwitchPortExternalIDs(a.ExternalIDs) + b.HaChassisGroup = copyLogicalSwitchPortHaChassisGroup(a.HaChassisGroup) + b.MirrorRules = copyLogicalSwitchPortMirrorRules(a.MirrorRules) + b.Options = copyLogicalSwitchPortOptions(a.Options) + b.ParentName = copyLogicalSwitchPortParentName(a.ParentName) + b.PortSecurity = copyLogicalSwitchPortPortSecurity(a.PortSecurity) + b.Tag = copyLogicalSwitchPortTag(a.Tag) + b.TagRequest = copyLogicalSwitchPortTagRequest(a.TagRequest) + b.Up = copyLogicalSwitchPortUp(a.Up) +} + +func (a *LogicalSwitchPort) DeepCopy() *LogicalSwitchPort { + b := new(LogicalSwitchPort) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalSwitchPort) CloneModelInto(b model.Model) { + c := b.(*LogicalSwitchPort) + a.DeepCopyInto(c) +} + +func (a *LogicalSwitchPort) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalSwitchPort) Equals(b *LogicalSwitchPort) bool { + return a.UUID == b.UUID && + equalLogicalSwitchPortAddresses(a.Addresses, b.Addresses) && + equalLogicalSwitchPortDhcpv4Options(a.Dhcpv4Options, b.Dhcpv4Options) && + equalLogicalSwitchPortDhcpv6Options(a.Dhcpv6Options, b.Dhcpv6Options) && + equalLogicalSwitchPortDynamicAddresses(a.DynamicAddresses, b.DynamicAddresses) && + equalLogicalSwitchPortEnabled(a.Enabled, b.Enabled) && + equalLogicalSwitchPortExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalSwitchPortHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + equalLogicalSwitchPortMirrorRules(a.MirrorRules, b.MirrorRules) && + a.Name == b.Name && + equalLogicalSwitchPortOptions(a.Options, b.Options) && + equalLogicalSwitchPortParentName(a.ParentName, b.ParentName) && + equalLogicalSwitchPortPortSecurity(a.PortSecurity, b.PortSecurity) && + equalLogicalSwitchPortTag(a.Tag, b.Tag) && + equalLogicalSwitchPortTagRequest(a.TagRequest, b.TagRequest) && + a.Type == b.Type && + equalLogicalSwitchPortUp(a.Up, b.Up) +} + +func (a *LogicalSwitchPort) EqualsModel(b model.Model) bool { + c := b.(*LogicalSwitchPort) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalSwitchPort{} +var _ model.ComparableModel = &LogicalSwitchPort{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go new file mode 100644 index 000000000..09b7e9e6a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter.go @@ -0,0 +1,158 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterTable = "Meter" + +type ( + MeterUnit = string +) + +var ( + MeterUnitKbps MeterUnit = "kbps" + MeterUnitPktps MeterUnit = "pktps" +) + +// Meter defines an object in Meter table +type Meter struct { + UUID string `ovsdb:"_uuid"` + Bands []string `ovsdb:"bands"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Fair *bool `ovsdb:"fair"` + Name string `ovsdb:"name"` + Unit MeterUnit `ovsdb:"unit"` +} + +func (a *Meter) GetUUID() string { + return a.UUID +} + +func (a *Meter) GetBands() []string { + return a.Bands +} + +func copyMeterBands(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMeterBands(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Meter) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMeterExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMeterExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Meter) GetFair() *bool { + return a.Fair +} + +func copyMeterFair(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalMeterFair(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Meter) GetName() string { + return a.Name +} + +func (a *Meter) GetUnit() MeterUnit { + return a.Unit +} + +func (a *Meter) DeepCopyInto(b *Meter) { + *b = *a + b.Bands = copyMeterBands(a.Bands) + b.ExternalIDs = copyMeterExternalIDs(a.ExternalIDs) + b.Fair = copyMeterFair(a.Fair) +} + +func (a *Meter) DeepCopy() *Meter { + b := new(Meter) + a.DeepCopyInto(b) + return b +} + +func (a *Meter) CloneModelInto(b model.Model) { + c := b.(*Meter) + a.DeepCopyInto(c) +} + +func (a *Meter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Meter) Equals(b *Meter) bool { + return a.UUID == b.UUID && + equalMeterBands(a.Bands, b.Bands) && + equalMeterExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalMeterFair(a.Fair, b.Fair) && + a.Name == b.Name && + a.Unit == b.Unit +} + +func (a *Meter) EqualsModel(b model.Model) bool { + c := b.(*Meter) + return a.Equals(c) +} + +var _ model.CloneableModel = &Meter{} +var _ model.ComparableModel = &Meter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go new file mode 100644 index 000000000..4ef0d901a --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/meter_band.go @@ -0,0 +1,107 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterBandTable = "Meter_Band" + +type ( + MeterBandAction = string +) + +var ( + MeterBandActionDrop MeterBandAction = "drop" +) + +// MeterBand defines an object in Meter_Band table +type MeterBand struct { + UUID string `ovsdb:"_uuid"` + Action MeterBandAction `ovsdb:"action"` + BurstSize int `ovsdb:"burst_size"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Rate int `ovsdb:"rate"` +} + +func (a *MeterBand) GetUUID() string { + return a.UUID +} + +func (a *MeterBand) GetAction() MeterBandAction { + return a.Action +} + +func (a *MeterBand) GetBurstSize() int { + return a.BurstSize +} + +func (a *MeterBand) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMeterBandExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMeterBandExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *MeterBand) GetRate() int { + return a.Rate +} + +func (a *MeterBand) DeepCopyInto(b *MeterBand) { + *b = *a + b.ExternalIDs = copyMeterBandExternalIDs(a.ExternalIDs) +} + +func (a *MeterBand) DeepCopy() *MeterBand { + b := new(MeterBand) + a.DeepCopyInto(b) + return b +} + +func (a *MeterBand) CloneModelInto(b model.Model) { + c := b.(*MeterBand) + a.DeepCopyInto(c) +} + +func (a *MeterBand) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MeterBand) Equals(b *MeterBand) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.BurstSize == b.BurstSize && + equalMeterBandExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Rate == b.Rate +} + +func (a *MeterBand) EqualsModel(b model.Model) bool { + c := b.(*MeterBand) + return a.Equals(c) +} + +var _ model.CloneableModel = &MeterBand{} +var _ model.ComparableModel = &MeterBand{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go new file mode 100644 index 000000000..57e3b01f6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/mirror.go @@ -0,0 +1,125 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const MirrorTable = "Mirror" + +type ( + MirrorFilter = string + MirrorType = string +) + +var ( + MirrorFilterFromLport MirrorFilter = "from-lport" + MirrorFilterToLport MirrorFilter = "to-lport" + MirrorFilterBoth MirrorFilter = "both" + MirrorTypeGre MirrorType = "gre" + MirrorTypeErspan MirrorType = "erspan" + MirrorTypeLocal MirrorType = "local" +) + +// Mirror defines an object in Mirror table +type Mirror struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Filter MirrorFilter `ovsdb:"filter"` + Index int `ovsdb:"index"` + Name string `ovsdb:"name"` + Sink string `ovsdb:"sink"` + Type MirrorType `ovsdb:"type"` +} + +func (a *Mirror) GetUUID() string { + return a.UUID +} + +func (a *Mirror) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMirrorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) GetFilter() MirrorFilter { + return a.Filter +} + +func (a *Mirror) GetIndex() int { + return a.Index +} + +func (a *Mirror) GetName() string { + return a.Name +} + +func (a *Mirror) GetSink() string { + return a.Sink +} + +func (a *Mirror) GetType() MirrorType { + return a.Type +} + +func (a *Mirror) DeepCopyInto(b *Mirror) { + *b = *a + b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs) +} + +func (a *Mirror) DeepCopy() *Mirror { + b := new(Mirror) + a.DeepCopyInto(b) + return b +} + +func (a *Mirror) CloneModelInto(b model.Model) { + c := b.(*Mirror) + a.DeepCopyInto(c) +} + +func (a *Mirror) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Mirror) Equals(b *Mirror) bool { + return a.UUID == b.UUID && + equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Filter == b.Filter && + a.Index == b.Index && + a.Name == b.Name && + a.Sink == b.Sink && + a.Type == b.Type +} + +func (a *Mirror) EqualsModel(b model.Model) bool { + c := b.(*Mirror) + return a.Equals(c) +} + +var _ model.CloneableModel = &Mirror{} +var _ model.ComparableModel = &Mirror{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go new file mode 100644 index 000000000..daabac453 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/model.go @@ -0,0 +1,2262 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("OVN_Northbound", map[string]model.Model{ + "ACL": &ACL{}, + "Address_Set": &AddressSet{}, + "BFD": &BFD{}, + "Chassis_Template_Var": &ChassisTemplateVar{}, + "Connection": &Connection{}, + "Copp": &Copp{}, + "DHCP_Options": &DHCPOptions{}, + "DHCP_Relay": &DHCPRelay{}, + "DNS": &DNS{}, + "Forwarding_Group": &ForwardingGroup{}, + "Gateway_Chassis": &GatewayChassis{}, + "HA_Chassis": &HAChassis{}, + "HA_Chassis_Group": &HAChassisGroup{}, + "Load_Balancer": &LoadBalancer{}, + "Load_Balancer_Group": &LoadBalancerGroup{}, + "Load_Balancer_Health_Check": &LoadBalancerHealthCheck{}, + "Logical_Router": &LogicalRouter{}, + "Logical_Router_Policy": &LogicalRouterPolicy{}, + "Logical_Router_Port": &LogicalRouterPort{}, + "Logical_Router_Static_Route": &LogicalRouterStaticRoute{}, + "Logical_Switch": &LogicalSwitch{}, + "Logical_Switch_Port": &LogicalSwitchPort{}, + "Meter": &Meter{}, + "Meter_Band": &MeterBand{}, + "Mirror": &Mirror{}, + "NAT": &NAT{}, + "NB_Global": &NBGlobal{}, + "Port_Group": &PortGroup{}, + "QoS": &QoS{}, + "SSL": &SSL{}, + "Sample": &Sample{}, + "Sample_Collector": &SampleCollector{}, + "Sampling_App": &SamplingApp{}, + "Static_MAC_Binding": &StaticMACBinding{}, + }) +} + +var schema = `{ + "name": "OVN_Northbound", + "version": "7.6.0", + "tables": { + "ACL": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "allow", + "allow-related", + "allow-stateless", + "drop", + "reject", + "pass" + ] + ] + } + } + }, + "direction": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport" + ] + ] + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "label": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "log": { + "type": "boolean" + }, + "match": { + "type": "string" + }, + "meter": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": { + "key": { + "type": "string", + "minLength": 63, + "maxLength": 63 + }, + "min": 0, + "max": 1 + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + }, + "sample_est": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "sample_new": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "severity": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "alert", + "warning", + "notice", + "info", + "debug" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "tier": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 3 + } + } + } + } + }, + "Address_Set": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "BFD": { + "columns": { + "detect_mult": { + "type": { + "key": { + "type": "integer", + "minInteger": 1 + }, + "min": 0, + "max": 1 + } + }, + "dst_ip": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "logical_port": { + "type": "string" + }, + "min_rx": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "min_tx": { + "type": { + "key": { + "type": "integer", + "minInteger": 1 + }, + "min": 0, + "max": 1 + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "down", + "init", + "up", + "admin_down" + ] + ] + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "logical_port", + "dst_ip" + ] + ], + "isRoot": true + }, + "Chassis_Template_Var": { + "columns": { + "chassis": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "variables": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "chassis" + ] + ], + "isRoot": true + }, + "Connection": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + } + }, + "indexes": [ + [ + "target" + ] + ] + }, + "Copp": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "meters": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "DHCP_Options": { + "columns": { + "cidr": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "DHCP_Relay": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "servers": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "DNS": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "records": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Forwarding_Group": { + "columns": { + "child_port": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "liveness": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "vip": { + "type": "string" + }, + "vmac": { + "type": "string" + } + } + }, + "Gateway_Chassis": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "HA_Chassis": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "HA_Chassis_Group": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Load_Balancer": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "health_check": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Health_Check", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_port_mappings": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp", + "sctp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "selection_fields": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "eth_src", + "eth_dst", + "ip_src", + "ip_dst", + "tp_src", + "tp_dst" + ] + ] + }, + "min": 0, + "max": "unlimited" + } + }, + "vips": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Load_Balancer_Group": { + "columns": { + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Load_Balancer_Health_Check": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "vip": { + "type": "string" + } + } + }, + "Logical_Router": { + "columns": { + "copp": { + "type": { + "key": { + "type": "uuid", + "refTable": "Copp", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Group" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "nat": { + "type": { + "key": { + "type": "uuid", + "refTable": "NAT", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "policies": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Policy", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Port", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "static_routes": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Static_Route", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_Router_Policy": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "allow", + "drop", + "reroute" + ] + ] + } + } + }, + "bfd_sessions": { + "type": { + "key": { + "type": "uuid", + "refTable": "BFD", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "match": { + "type": "string" + }, + "nexthop": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "nexthops": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "Logical_Router_Port": { + "columns": { + "dhcp_relay": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Relay", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "gateway_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Gateway_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "ipv6_prefix": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ipv6_ra_configs": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mac": { + "type": "string" + }, + "name": { + "type": "string" + }, + "networks": { + "type": { + "key": { + "type": "string" + }, + "min": 1, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "peer": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "Logical_Router_Static_Route": { + "columns": { + "bfd": { + "type": { + "key": { + "type": "uuid", + "refTable": "BFD", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "nexthop": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "output_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "policy": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "src-ip", + "dst-ip" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "route_table": { + "type": "string" + } + } + }, + "Logical_Switch": { + "columns": { + "acls": { + "type": { + "key": { + "type": "uuid", + "refTable": "ACL", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "copp": { + "type": { + "key": { + "type": "uuid", + "refTable": "Copp", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dns_records": { + "type": { + "key": { + "type": "uuid", + "refTable": "DNS", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "forwarding_groups": { + "type": { + "key": { + "type": "uuid", + "refTable": "Forwarding_Group", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancer_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Load_Balancer_Group" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "qos_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "QoS", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_Switch_Port": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "dhcpv4_options": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dhcpv6_options": { + "type": { + "key": { + "type": "uuid", + "refTable": "DHCP_Options", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "dynamic_addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "mirror_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "Mirror", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "parent_name": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "port_security": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "tag": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "tag_request": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "type": { + "type": "string" + }, + "up": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "Meter": { + "columns": { + "bands": { + "type": { + "key": { + "type": "uuid", + "refTable": "Meter_Band", + "refType": "strong" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "fair": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string" + }, + "unit": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "kbps", + "pktps" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Meter_Band": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": "drop" + } + } + }, + "burst_size": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + } + }, + "Mirror": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "filter": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport", + "both" + ] + ] + } + } + }, + "index": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "sink": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "gre", + "erspan", + "local" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "NAT": { + "columns": { + "allowed_ext_ips": { + "type": { + "key": { + "type": "uuid", + "refTable": "Address_Set", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "exempted_ext_ips": { + "type": { + "key": { + "type": "uuid", + "refTable": "Address_Set", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ip": { + "type": "string" + }, + "external_mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_port_range": { + "type": "string" + }, + "gateway_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Router_Port", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "logical_ip": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "match": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "dnat", + "snat", + "dnat_and_snat" + ] + ] + } + } + } + } + }, + "NB_Global": { + "columns": { + "connections": { + "type": { + "key": { + "type": "uuid", + "refTable": "Connection" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "hv_cfg": { + "type": "integer" + }, + "hv_cfg_timestamp": { + "type": "integer" + }, + "ipsec": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "nb_cfg_timestamp": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "sb_cfg": { + "type": "integer" + }, + "sb_cfg_timestamp": { + "type": "integer" + }, + "ssl": { + "type": { + "key": { + "type": "uuid", + "refTable": "SSL" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "Port_Group": { + "columns": { + "acls": { + "type": { + "key": { + "type": "uuid", + "refTable": "ACL", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_Switch_Port", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "QoS": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "dscp", + "mark" + ] + ] + }, + "value": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + }, + "min": 0, + "max": "unlimited" + } + }, + "bandwidth": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "rate", + "burst" + ] + ] + }, + "value": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 0, + "max": "unlimited" + } + }, + "direction": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport" + ] + ] + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "match": { + "type": "string" + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "SSL": { + "columns": { + "bootstrap_ca_cert": { + "type": "boolean" + }, + "ca_cert": { + "type": "string" + }, + "certificate": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "private_key": { + "type": "string" + }, + "ssl_ciphers": { + "type": "string" + }, + "ssl_protocols": { + "type": "string" + } + } + }, + "Sample": { + "columns": { + "collectors": { + "type": { + "key": { + "type": "uuid", + "refTable": "Sample_Collector", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "metadata": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + }, + "min": 1, + "max": 1 + } + } + }, + "indexes": [ + [ + "metadata" + ] + ] + }, + "Sample_Collector": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "name": { + "type": "string" + }, + "probability": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "set_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + }, + "indexes": [ + [ + "id" + ] + ], + "isRoot": true + }, + "Sampling_App": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 255 + } + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "drop", + "acl-new", + "acl-est" + ] + ] + } + } + } + }, + "indexes": [ + [ + "type" + ] + ], + "isRoot": true + }, + "Static_MAC_Binding": { + "columns": { + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "override_dynamic_mac": { + "type": "boolean" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go new file mode 100644 index 000000000..4bd1b7ed4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nat.go @@ -0,0 +1,285 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const NATTable = "NAT" + +type ( + NATType = string +) + +var ( + NATTypeDNAT NATType = "dnat" + NATTypeSNAT NATType = "snat" + NATTypeDNATAndSNAT NATType = "dnat_and_snat" +) + +// NAT defines an object in NAT table +type NAT struct { + UUID string `ovsdb:"_uuid"` + AllowedExtIPs *string `ovsdb:"allowed_ext_ips"` + ExemptedExtIPs *string `ovsdb:"exempted_ext_ips"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ExternalIP string `ovsdb:"external_ip"` + ExternalMAC *string `ovsdb:"external_mac"` + ExternalPortRange string `ovsdb:"external_port_range"` + GatewayPort *string `ovsdb:"gateway_port"` + LogicalIP string `ovsdb:"logical_ip"` + LogicalPort *string `ovsdb:"logical_port"` + Match string `ovsdb:"match"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` + Type NATType `ovsdb:"type"` +} + +func (a *NAT) GetUUID() string { + return a.UUID +} + +func (a *NAT) GetAllowedExtIPs() *string { + return a.AllowedExtIPs +} + +func copyNATAllowedExtIPs(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATAllowedExtIPs(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExemptedExtIPs() *string { + return a.ExemptedExtIPs +} + +func copyNATExemptedExtIPs(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATExemptedExtIPs(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyNATExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNATExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NAT) GetExternalIP() string { + return a.ExternalIP +} + +func (a *NAT) GetExternalMAC() *string { + return a.ExternalMAC +} + +func copyNATExternalMAC(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATExternalMAC(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetExternalPortRange() string { + return a.ExternalPortRange +} + +func (a *NAT) GetGatewayPort() *string { + return a.GatewayPort +} + +func copyNATGatewayPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATGatewayPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetLogicalIP() string { + return a.LogicalIP +} + +func (a *NAT) GetLogicalPort() *string { + return a.LogicalPort +} + +func copyNATLogicalPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNATLogicalPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NAT) GetMatch() string { + return a.Match +} + +func (a *NAT) GetOptions() map[string]string { + return a.Options +} + +func copyNATOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNATOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NAT) GetPriority() int { + return a.Priority +} + +func (a *NAT) GetType() NATType { + return a.Type +} + +func (a *NAT) DeepCopyInto(b *NAT) { + *b = *a + b.AllowedExtIPs = copyNATAllowedExtIPs(a.AllowedExtIPs) + b.ExemptedExtIPs = copyNATExemptedExtIPs(a.ExemptedExtIPs) + b.ExternalIDs = copyNATExternalIDs(a.ExternalIDs) + b.ExternalMAC = copyNATExternalMAC(a.ExternalMAC) + b.GatewayPort = copyNATGatewayPort(a.GatewayPort) + b.LogicalPort = copyNATLogicalPort(a.LogicalPort) + b.Options = copyNATOptions(a.Options) +} + +func (a *NAT) DeepCopy() *NAT { + b := new(NAT) + a.DeepCopyInto(b) + return b +} + +func (a *NAT) CloneModelInto(b model.Model) { + c := b.(*NAT) + a.DeepCopyInto(c) +} + +func (a *NAT) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *NAT) Equals(b *NAT) bool { + return a.UUID == b.UUID && + equalNATAllowedExtIPs(a.AllowedExtIPs, b.AllowedExtIPs) && + equalNATExemptedExtIPs(a.ExemptedExtIPs, b.ExemptedExtIPs) && + equalNATExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ExternalIP == b.ExternalIP && + equalNATExternalMAC(a.ExternalMAC, b.ExternalMAC) && + a.ExternalPortRange == b.ExternalPortRange && + equalNATGatewayPort(a.GatewayPort, b.GatewayPort) && + a.LogicalIP == b.LogicalIP && + equalNATLogicalPort(a.LogicalPort, b.LogicalPort) && + a.Match == b.Match && + equalNATOptions(a.Options, b.Options) && + a.Priority == b.Priority && + a.Type == b.Type +} + +func (a *NAT) EqualsModel(b model.Model) bool { + c := b.(*NAT) + return a.Equals(c) +} + +var _ model.CloneableModel = &NAT{} +var _ model.ComparableModel = &NAT{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go new file mode 100644 index 000000000..bae9e20f2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/nb_global.go @@ -0,0 +1,218 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const NBGlobalTable = "NB_Global" + +// NBGlobal defines an object in NB_Global table +type NBGlobal struct { + UUID string `ovsdb:"_uuid"` + Connections []string `ovsdb:"connections"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HvCfg int `ovsdb:"hv_cfg"` + HvCfgTimestamp int `ovsdb:"hv_cfg_timestamp"` + Ipsec bool `ovsdb:"ipsec"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"` + Options map[string]string `ovsdb:"options"` + SbCfg int `ovsdb:"sb_cfg"` + SbCfgTimestamp int `ovsdb:"sb_cfg_timestamp"` + SSL *string `ovsdb:"ssl"` +} + +func (a *NBGlobal) GetUUID() string { + return a.UUID +} + +func (a *NBGlobal) GetConnections() []string { + return a.Connections +} + +func copyNBGlobalConnections(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalNBGlobalConnections(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *NBGlobal) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyNBGlobalExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNBGlobalExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NBGlobal) GetHvCfg() int { + return a.HvCfg +} + +func (a *NBGlobal) GetHvCfgTimestamp() int { + return a.HvCfgTimestamp +} + +func (a *NBGlobal) GetIpsec() bool { + return a.Ipsec +} + +func (a *NBGlobal) GetName() string { + return a.Name +} + +func (a *NBGlobal) GetNbCfg() int { + return a.NbCfg +} + +func (a *NBGlobal) GetNbCfgTimestamp() int { + return a.NbCfgTimestamp +} + +func (a *NBGlobal) GetOptions() map[string]string { + return a.Options +} + +func copyNBGlobalOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalNBGlobalOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *NBGlobal) GetSbCfg() int { + return a.SbCfg +} + +func (a *NBGlobal) GetSbCfgTimestamp() int { + return a.SbCfgTimestamp +} + +func (a *NBGlobal) GetSSL() *string { + return a.SSL +} + +func copyNBGlobalSSL(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalNBGlobalSSL(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *NBGlobal) DeepCopyInto(b *NBGlobal) { + *b = *a + b.Connections = copyNBGlobalConnections(a.Connections) + b.ExternalIDs = copyNBGlobalExternalIDs(a.ExternalIDs) + b.Options = copyNBGlobalOptions(a.Options) + b.SSL = copyNBGlobalSSL(a.SSL) +} + +func (a *NBGlobal) DeepCopy() *NBGlobal { + b := new(NBGlobal) + a.DeepCopyInto(b) + return b +} + +func (a *NBGlobal) CloneModelInto(b model.Model) { + c := b.(*NBGlobal) + a.DeepCopyInto(c) +} + +func (a *NBGlobal) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *NBGlobal) Equals(b *NBGlobal) bool { + return a.UUID == b.UUID && + equalNBGlobalConnections(a.Connections, b.Connections) && + equalNBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.HvCfg == b.HvCfg && + a.HvCfgTimestamp == b.HvCfgTimestamp && + a.Ipsec == b.Ipsec && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + a.NbCfgTimestamp == b.NbCfgTimestamp && + equalNBGlobalOptions(a.Options, b.Options) && + a.SbCfg == b.SbCfg && + a.SbCfgTimestamp == b.SbCfgTimestamp && + equalNBGlobalSSL(a.SSL, b.SSL) +} + +func (a *NBGlobal) EqualsModel(b model.Model) bool { + c := b.(*NBGlobal) + return a.Equals(c) +} + +var _ model.CloneableModel = &NBGlobal{} +var _ model.ComparableModel = &NBGlobal{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go new file mode 100644 index 000000000..bf4fa809b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/port_group.go @@ -0,0 +1,149 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortGroupTable = "Port_Group" + +// PortGroup defines an object in Port_Group table +type PortGroup struct { + UUID string `ovsdb:"_uuid"` + ACLs []string `ovsdb:"acls"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` +} + +func (a *PortGroup) GetUUID() string { + return a.UUID +} + +func (a *PortGroup) GetACLs() []string { + return a.ACLs +} + +func copyPortGroupACLs(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupACLs(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyPortGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortGroup) GetName() string { + return a.Name +} + +func (a *PortGroup) GetPorts() []string { + return a.Ports +} + +func copyPortGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) DeepCopyInto(b *PortGroup) { + *b = *a + b.ACLs = copyPortGroupACLs(a.ACLs) + b.ExternalIDs = copyPortGroupExternalIDs(a.ExternalIDs) + b.Ports = copyPortGroupPorts(a.Ports) +} + +func (a *PortGroup) DeepCopy() *PortGroup { + b := new(PortGroup) + a.DeepCopyInto(b) + return b +} + +func (a *PortGroup) CloneModelInto(b model.Model) { + c := b.(*PortGroup) + a.DeepCopyInto(c) +} + +func (a *PortGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortGroup) Equals(b *PortGroup) bool { + return a.UUID == b.UUID && + equalPortGroupACLs(a.ACLs, b.ACLs) && + equalPortGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalPortGroupPorts(a.Ports, b.Ports) +} + +func (a *PortGroup) EqualsModel(b model.Model) bool { + c := b.(*PortGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortGroup{} +var _ model.ComparableModel = &PortGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go new file mode 100644 index 000000000..d25322b4b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/qos.go @@ -0,0 +1,180 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const QoSTable = "QoS" + +type ( + QoSAction = string + QoSBandwidth = string + QoSDirection = string +) + +var ( + QoSActionDSCP QoSAction = "dscp" + QoSActionMark QoSAction = "mark" + QoSBandwidthRate QoSBandwidth = "rate" + QoSBandwidthBurst QoSBandwidth = "burst" + QoSDirectionFromLport QoSDirection = "from-lport" + QoSDirectionToLport QoSDirection = "to-lport" +) + +// QoS defines an object in QoS table +type QoS struct { + UUID string `ovsdb:"_uuid"` + Action map[string]int `ovsdb:"action"` + Bandwidth map[string]int `ovsdb:"bandwidth"` + Direction QoSDirection `ovsdb:"direction"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Match string `ovsdb:"match"` + Priority int `ovsdb:"priority"` +} + +func (a *QoS) GetUUID() string { + return a.UUID +} + +func (a *QoS) GetAction() map[string]int { + return a.Action +} + +func copyQoSAction(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSAction(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetBandwidth() map[string]int { + return a.Bandwidth +} + +func copyQoSBandwidth(a map[string]int) map[string]int { + if a == nil { + return nil + } + b := make(map[string]int, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSBandwidth(a, b map[string]int) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetDirection() QoSDirection { + return a.Direction +} + +func (a *QoS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyQoSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalQoSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *QoS) GetMatch() string { + return a.Match +} + +func (a *QoS) GetPriority() int { + return a.Priority +} + +func (a *QoS) DeepCopyInto(b *QoS) { + *b = *a + b.Action = copyQoSAction(a.Action) + b.Bandwidth = copyQoSBandwidth(a.Bandwidth) + b.ExternalIDs = copyQoSExternalIDs(a.ExternalIDs) +} + +func (a *QoS) DeepCopy() *QoS { + b := new(QoS) + a.DeepCopyInto(b) + return b +} + +func (a *QoS) CloneModelInto(b model.Model) { + c := b.(*QoS) + a.DeepCopyInto(c) +} + +func (a *QoS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *QoS) Equals(b *QoS) bool { + return a.UUID == b.UUID && + equalQoSAction(a.Action, b.Action) && + equalQoSBandwidth(a.Bandwidth, b.Bandwidth) && + a.Direction == b.Direction && + equalQoSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Match == b.Match && + a.Priority == b.Priority +} + +func (a *QoS) EqualsModel(b model.Model) bool { + c := b.(*QoS) + return a.Equals(c) +} + +var _ model.CloneableModel = &QoS{} +var _ model.ComparableModel = &QoS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go new file mode 100644 index 000000000..639393a1e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleTable = "Sample" + +// Sample defines an object in Sample table +type Sample struct { + UUID string `ovsdb:"_uuid"` + Collectors []string `ovsdb:"collectors"` + Metadata int `ovsdb:"metadata"` +} + +func (a *Sample) GetUUID() string { + return a.UUID +} + +func (a *Sample) GetCollectors() []string { + return a.Collectors +} + +func copySampleCollectors(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSampleCollectors(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Sample) GetMetadata() int { + return a.Metadata +} + +func (a *Sample) DeepCopyInto(b *Sample) { + *b = *a + b.Collectors = copySampleCollectors(a.Collectors) +} + +func (a *Sample) DeepCopy() *Sample { + b := new(Sample) + a.DeepCopyInto(b) + return b +} + +func (a *Sample) CloneModelInto(b model.Model) { + c := b.(*Sample) + a.DeepCopyInto(c) +} + +func (a *Sample) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Sample) Equals(b *Sample) bool { + return a.UUID == b.UUID && + equalSampleCollectors(a.Collectors, b.Collectors) && + a.Metadata == b.Metadata +} + +func (a *Sample) EqualsModel(b model.Model) bool { + c := b.(*Sample) + return a.Equals(c) +} + +var _ model.CloneableModel = &Sample{} +var _ model.ComparableModel = &Sample{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go new file mode 100644 index 000000000..50f065904 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sample_collector.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SampleCollectorTable = "Sample_Collector" + +// SampleCollector defines an object in Sample_Collector table +type SampleCollector struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Name string `ovsdb:"name"` + Probability int `ovsdb:"probability"` + SetID int `ovsdb:"set_id"` +} + +func (a *SampleCollector) GetUUID() string { + return a.UUID +} + +func (a *SampleCollector) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySampleCollectorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSampleCollectorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SampleCollector) GetID() int { + return a.ID +} + +func (a *SampleCollector) GetName() string { + return a.Name +} + +func (a *SampleCollector) GetProbability() int { + return a.Probability +} + +func (a *SampleCollector) GetSetID() int { + return a.SetID +} + +func (a *SampleCollector) DeepCopyInto(b *SampleCollector) { + *b = *a + b.ExternalIDs = copySampleCollectorExternalIDs(a.ExternalIDs) +} + +func (a *SampleCollector) DeepCopy() *SampleCollector { + b := new(SampleCollector) + a.DeepCopyInto(b) + return b +} + +func (a *SampleCollector) CloneModelInto(b model.Model) { + c := b.(*SampleCollector) + a.DeepCopyInto(c) +} + +func (a *SampleCollector) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SampleCollector) Equals(b *SampleCollector) bool { + return a.UUID == b.UUID && + equalSampleCollectorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Name == b.Name && + a.Probability == b.Probability && + a.SetID == b.SetID +} + +func (a *SampleCollector) EqualsModel(b model.Model) bool { + c := b.(*SampleCollector) + return a.Equals(c) +} + +var _ model.CloneableModel = &SampleCollector{} +var _ model.ComparableModel = &SampleCollector{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go new file mode 100644 index 000000000..a152b4237 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/sampling_app.go @@ -0,0 +1,103 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SamplingAppTable = "Sampling_App" + +type ( + SamplingAppType = string +) + +var ( + SamplingAppTypeDrop SamplingAppType = "drop" + SamplingAppTypeACLNew SamplingAppType = "acl-new" + SamplingAppTypeACLEst SamplingAppType = "acl-est" +) + +// SamplingApp defines an object in Sampling_App table +type SamplingApp struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + ID int `ovsdb:"id"` + Type SamplingAppType `ovsdb:"type"` +} + +func (a *SamplingApp) GetUUID() string { + return a.UUID +} + +func (a *SamplingApp) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySamplingAppExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSamplingAppExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SamplingApp) GetID() int { + return a.ID +} + +func (a *SamplingApp) GetType() SamplingAppType { + return a.Type +} + +func (a *SamplingApp) DeepCopyInto(b *SamplingApp) { + *b = *a + b.ExternalIDs = copySamplingAppExternalIDs(a.ExternalIDs) +} + +func (a *SamplingApp) DeepCopy() *SamplingApp { + b := new(SamplingApp) + a.DeepCopyInto(b) + return b +} + +func (a *SamplingApp) CloneModelInto(b model.Model) { + c := b.(*SamplingApp) + a.DeepCopyInto(c) +} + +func (a *SamplingApp) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SamplingApp) Equals(b *SamplingApp) bool { + return a.UUID == b.UUID && + equalSamplingAppExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.ID == b.ID && + a.Type == b.Type +} + +func (a *SamplingApp) EqualsModel(b model.Model) bool { + c := b.(*SamplingApp) + return a.Equals(c) +} + +var _ model.CloneableModel = &SamplingApp{} +var _ model.ComparableModel = &SamplingApp{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go new file mode 100644 index 000000000..ddaba5d32 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/ssl.go @@ -0,0 +1,117 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const SSLTable = "SSL" + +// SSL defines an object in SSL table +type SSL struct { + UUID string `ovsdb:"_uuid"` + BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"` + CaCert string `ovsdb:"ca_cert"` + Certificate string `ovsdb:"certificate"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + PrivateKey string `ovsdb:"private_key"` + SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLProtocols string `ovsdb:"ssl_protocols"` +} + +func (a *SSL) GetUUID() string { + return a.UUID +} + +func (a *SSL) GetBootstrapCaCert() bool { + return a.BootstrapCaCert +} + +func (a *SSL) GetCaCert() string { + return a.CaCert +} + +func (a *SSL) GetCertificate() string { + return a.Certificate +} + +func (a *SSL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySSLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSSLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SSL) GetPrivateKey() string { + return a.PrivateKey +} + +func (a *SSL) GetSSLCiphers() string { + return a.SSLCiphers +} + +func (a *SSL) GetSSLProtocols() string { + return a.SSLProtocols +} + +func (a *SSL) DeepCopyInto(b *SSL) { + *b = *a + b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs) +} + +func (a *SSL) DeepCopy() *SSL { + b := new(SSL) + a.DeepCopyInto(b) + return b +} + +func (a *SSL) CloneModelInto(b model.Model) { + c := b.(*SSL) + a.DeepCopyInto(c) +} + +func (a *SSL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SSL) Equals(b *SSL) bool { + return a.UUID == b.UUID && + a.BootstrapCaCert == b.BootstrapCaCert && + a.CaCert == b.CaCert && + a.Certificate == b.Certificate && + equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.PrivateKey == b.PrivateKey && + a.SSLCiphers == b.SSLCiphers && + a.SSLProtocols == b.SSLProtocols +} + +func (a *SSL) EqualsModel(b model.Model) bool { + c := b.(*SSL) + return a.Equals(c) +} + +var _ model.CloneableModel = &SSL{} +var _ model.ComparableModel = &SSL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go new file mode 100644 index 000000000..15207e648 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb/static_mac_binding.go @@ -0,0 +1,72 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package nbdb + +import "github.com/ovn-org/libovsdb/model" + +const StaticMACBindingTable = "Static_MAC_Binding" + +// StaticMACBinding defines an object in Static_MAC_Binding table +type StaticMACBinding struct { + UUID string `ovsdb:"_uuid"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"` +} + +func (a *StaticMACBinding) GetUUID() string { + return a.UUID +} + +func (a *StaticMACBinding) GetIP() string { + return a.IP +} + +func (a *StaticMACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *StaticMACBinding) GetMAC() string { + return a.MAC +} + +func (a *StaticMACBinding) GetOverrideDynamicMAC() bool { + return a.OverrideDynamicMAC +} + +func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) { + *b = *a +} + +func (a *StaticMACBinding) DeepCopy() *StaticMACBinding { + b := new(StaticMACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *StaticMACBinding) CloneModelInto(b model.Model) { + c := b.(*StaticMACBinding) + a.DeepCopyInto(c) +} + +func (a *StaticMACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool { + return a.UUID == b.UUID && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.OverrideDynamicMAC == b.OverrideDynamicMAC +} + +func (a *StaticMACBinding) EqualsModel(b model.Model) bool { + c := b.(*StaticMACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &StaticMACBinding{} +var _ model.ComparableModel = &StaticMACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go new file mode 100644 index 000000000..6349c1519 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability/observability.go @@ -0,0 +1,303 @@ +package observability + +import ( + "fmt" + "slices" + "strings" + "sync" + "time" + + libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdb "github.com/ovn-org/libovsdb/ovsdb" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" +) + +// OVN observ app IDs. Make sure to always add new apps in the end. +const ( + DropSamplingID = iota + 1 + ACLNewTrafficSamplingID + ACLEstTrafficSamplingID +) + +// temporary const, until we have dynamic config +const DefaultObservabilityCollectorSetID = 42 + +// this is inferred from nbdb schema, check Sample_Collector.id +const maxCollectorID = 255 +const collectorFeaturesExternalID = "sample-features" + +// collectorConfig holds the configuration for a collector. +// It is allowed to set different probabilities for every feature. +// collectorSetID is used to set up sampling via OVSDB. +type collectorConfig struct { + collectorSetID int + // probability in percent, 0 to 100 + featuresProbability map[libovsdbops.SampleFeature]int +} + +type Manager struct { + nbClient libovsdbclient.Client + sampConfig *libovsdbops.SamplingConfig + collectorsLock sync.Mutex + // nbdb Collectors have probability. To allow different probabilities for different features, + // multiple nbdb Collectors will be created, one per probability. + // getCollectorKey() => collector.UUID + dbCollectors map[string]string + // cleaning up unused collectors may take time and multiple retries, as all referencing samples must be removed first. + // Therefore, we need to save state between those retries. + // getCollectorKey() => collector.SetID + unusedCollectors map[string]int + unusedCollectorsRetryInterval time.Duration + collectorsCleanupRetries int + // Only maxCollectorID collectors are allowed, each should have unique ID. + // this set is tracking already assigned IDs. + takenCollectorIDs sets.Set[int] +} + +func NewManager(nbClient libovsdbclient.Client) *Manager { + return &Manager{ + nbClient: nbClient, + collectorsLock: sync.Mutex{}, + dbCollectors: make(map[string]string), + unusedCollectors: make(map[string]int), + unusedCollectorsRetryInterval: time.Minute, + takenCollectorIDs: sets.New[int](), + } +} + +func (m *Manager) SamplingConfig() *libovsdbops.SamplingConfig { + return m.sampConfig +} + +func (m *Manager) Init() error { + // this will be read from the kube-api in the future + currentConfig := &collectorConfig{ + collectorSetID: DefaultObservabilityCollectorSetID, + featuresProbability: map[libovsdbops.SampleFeature]int{ + libovsdbops.EgressFirewallSample: 100, + libovsdbops.NetworkPolicySample: 100, + libovsdbops.AdminNetworkPolicySample: 100, + libovsdbops.MulticastSample: 100, + libovsdbops.UDNIsolationSample: 100, + }, + } + + return m.initWithConfig(currentConfig) +} + +func (m *Manager) initWithConfig(config *collectorConfig) error { + if err := m.setSamplingAppIDs(); err != nil { + return err + } + if err := m.setDbCollectors(); err != nil { + return err + } + + featuresConfig, err := m.addCollector(config) + if err != nil { + return err + } + m.sampConfig = libovsdbops.NewSamplingConfig(featuresConfig) + + // now cleanup stale collectors + m.deleteStaleCollectorsWithRetry() + return nil +} + +func (m *Manager) setDbCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + clear(m.dbCollectors) + collectors, err := libovsdbops.ListSampleCollectors(m.nbClient) + if err != nil { + return fmt.Errorf("error getting sample collectors: %w", err) + } + for _, collector := range collectors { + collectorKey := getCollectorKey(collector.SetID, collector.Probability) + m.dbCollectors[collectorKey] = collector.UUID + m.takenCollectorIDs.Insert(collector.ID) + // all collectors are unused, until we update existing configs + m.unusedCollectors[collectorKey] = collector.ID + } + return nil +} + +// Stale collectors can't be deleted until all referencing Samples are deleted. +// Samples will be deleted asynchronously by different controllers on their init with the new Manager. +// deleteStaleCollectorsWithRetry will retry, considering deletion should eventually succeed when all controllers +// update their db entries to use the latest observability config. +func (m *Manager) deleteStaleCollectorsWithRetry() { + if err := m.deleteStaleCollectors(); err != nil { + m.collectorsCleanupRetries += 1 + // allow retries for 1 hour, hopefully it will be enough for all handler to complete initial sync + if m.collectorsCleanupRetries > 60 { + m.collectorsCleanupRetries = 0 + klog.Errorf("Cleanup stale collectors failed after 30 retries: %v", err) + return + } + time.AfterFunc(m.unusedCollectorsRetryInterval, m.deleteStaleCollectorsWithRetry) + return + } + m.collectorsCleanupRetries = 0 + klog.Infof("Cleanup stale collectors succeeded.") +} + +func (m *Manager) deleteStaleCollectors() error { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + var lastErr error + for collectorKey, collectorSetID := range m.unusedCollectors { + collectorUUID := m.dbCollectors[collectorKey] + err := libovsdbops.DeleteSampleCollector(m.nbClient, &nbdb.SampleCollector{ + UUID: collectorUUID, + }) + if err != nil { + lastErr = err + klog.Infof("Error deleting collector with ID=%d: %v", collectorSetID, lastErr) + continue + } + delete(m.unusedCollectors, collectorKey) + delete(m.dbCollectors, collectorKey) + delete(m.takenCollectorIDs, collectorSetID) + } + return lastErr +} + +// Cleanup must be called when observability is no longer needed. +// It will return an error if some samples still exist in the db. +// This is expected, and Cleanup may be retried on the next restart. +func Cleanup(nbClient libovsdbclient.Client) error { + // Do the opposite of init + err := libovsdbops.DeleteSamplingAppsWithPredicate(nbClient, func(app *nbdb.SamplingApp) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sampling apps: %w", err) + } + + err = libovsdbops.DeleteSampleCollectorWithPredicate(nbClient, func(collector *nbdb.SampleCollector) bool { + return true + }) + if err != nil { + return fmt.Errorf("error deleting sample collectors: %w", err) + } + return nil +} + +func (m *Manager) setSamplingAppIDs() error { + var ops []libovsdb.Operation + var err error + for _, appConfig := range []struct { + id int + appType nbdb.SamplingAppType + }{ + { + id: DropSamplingID, + appType: nbdb.SamplingAppTypeDrop, + }, + { + id: ACLNewTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLNew, + }, + { + id: ACLEstTrafficSamplingID, + appType: nbdb.SamplingAppTypeACLEst, + }, + } { + samplingApp := &nbdb.SamplingApp{ + ID: appConfig.id, + Type: appConfig.appType, + } + ops, err = libovsdbops.CreateOrUpdateSamplingAppsOps(m.nbClient, ops, samplingApp) + if err != nil { + return fmt.Errorf("error creating or updating sampling app %s: %w", appConfig.appType, err) + } + } + _, err = libovsdbops.TransactAndCheck(m.nbClient, ops) + return err +} + +func groupByProbability(c *collectorConfig) map[int][]libovsdbops.SampleFeature { + probabilities := make(map[int][]libovsdbops.SampleFeature) + for feature, percentProbability := range c.featuresProbability { + probability := percentToProbability(percentProbability) + probabilities[probability] = append(probabilities[probability], feature) + } + return probabilities +} + +func getCollectorKey(collectorID int, probability int) string { + return fmt.Sprintf("%d-%d", collectorID, probability) +} + +func (m *Manager) getFreeCollectorID() (int, error) { + for i := 1; i <= maxCollectorID; i++ { + if !m.takenCollectorIDs.Has(i) { + return i, nil + } + } + return 0, fmt.Errorf("no free collector IDs") +} + +func (m *Manager) addCollector(conf *collectorConfig) (map[libovsdbops.SampleFeature][]string, error) { + m.collectorsLock.Lock() + defer m.collectorsLock.Unlock() + sampleFeaturesConfig := make(map[libovsdbops.SampleFeature][]string) + probabilityConfig := groupByProbability(conf) + + for probability, features := range probabilityConfig { + collectorKey := getCollectorKey(conf.collectorSetID, probability) + var collectorUUID string + var ok bool + // ensure predictable externalID + slices.Sort(features) + collectorFeatures := strings.Join(features, ",") + if collectorUUID, ok = m.dbCollectors[collectorKey]; !ok { + collectorID, err := m.getFreeCollectorID() + if err != nil { + return sampleFeaturesConfig, err + } + collector := &nbdb.SampleCollector{ + ID: collectorID, + SetID: conf.collectorSetID, + Probability: probability, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err = libovsdbops.CreateOrUpdateSampleCollector(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + collectorUUID = collector.UUID + m.dbCollectors[collectorKey] = collectorUUID + m.takenCollectorIDs.Insert(collectorID) + } else { + // update collector's features + collector := &nbdb.SampleCollector{ + UUID: collectorUUID, + ExternalIDs: map[string]string{ + collectorFeaturesExternalID: collectorFeatures, + }, + } + err := libovsdbops.UpdateSampleCollectorExternalIDs(m.nbClient, collector) + if err != nil { + return sampleFeaturesConfig, err + } + // collector is used, remove from unused Collectors + delete(m.unusedCollectors, collectorKey) + } + for _, feature := range features { + sampleFeaturesConfig[feature] = append(sampleFeaturesConfig[feature], collectorUUID) + } + } + return sampleFeaturesConfig, nil +} + +func percentToProbability(percent int) int { + return 65535 * percent / 100 +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore new file mode 100644 index 000000000..734ba1eff --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/.gitignore @@ -0,0 +1 @@ +*.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go new file mode 100644 index 000000000..b3b1c3c2d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/address_set.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const AddressSetTable = "Address_Set" + +// AddressSet defines an object in Address_Set table +type AddressSet struct { + UUID string `ovsdb:"_uuid"` + Addresses []string `ovsdb:"addresses"` + Name string `ovsdb:"name"` +} + +func (a *AddressSet) GetUUID() string { + return a.UUID +} + +func (a *AddressSet) GetAddresses() []string { + return a.Addresses +} + +func copyAddressSetAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalAddressSetAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *AddressSet) GetName() string { + return a.Name +} + +func (a *AddressSet) DeepCopyInto(b *AddressSet) { + *b = *a + b.Addresses = copyAddressSetAddresses(a.Addresses) +} + +func (a *AddressSet) DeepCopy() *AddressSet { + b := new(AddressSet) + a.DeepCopyInto(b) + return b +} + +func (a *AddressSet) CloneModelInto(b model.Model) { + c := b.(*AddressSet) + a.DeepCopyInto(c) +} + +func (a *AddressSet) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AddressSet) Equals(b *AddressSet) bool { + return a.UUID == b.UUID && + equalAddressSetAddresses(a.Addresses, b.Addresses) && + a.Name == b.Name +} + +func (a *AddressSet) EqualsModel(b model.Model) bool { + c := b.(*AddressSet) + return a.Equals(c) +} + +var _ model.CloneableModel = &AddressSet{} +var _ model.ComparableModel = &AddressSet{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go new file mode 100644 index 000000000..cf27814b5 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/bfd.go @@ -0,0 +1,179 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const BFDTable = "BFD" + +type ( + BFDStatus = string +) + +var ( + BFDStatusDown BFDStatus = "down" + BFDStatusInit BFDStatus = "init" + BFDStatusUp BFDStatus = "up" + BFDStatusAdminDown BFDStatus = "admin_down" +) + +// BFD defines an object in BFD table +type BFD struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + DetectMult int `ovsdb:"detect_mult"` + Disc int `ovsdb:"disc"` + DstIP string `ovsdb:"dst_ip"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LogicalPort string `ovsdb:"logical_port"` + MinRx int `ovsdb:"min_rx"` + MinTx int `ovsdb:"min_tx"` + Options map[string]string `ovsdb:"options"` + SrcPort int `ovsdb:"src_port"` + Status BFDStatus `ovsdb:"status"` +} + +func (a *BFD) GetUUID() string { + return a.UUID +} + +func (a *BFD) GetChassisName() string { + return a.ChassisName +} + +func (a *BFD) GetDetectMult() int { + return a.DetectMult +} + +func (a *BFD) GetDisc() int { + return a.Disc +} + +func (a *BFD) GetDstIP() string { + return a.DstIP +} + +func (a *BFD) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyBFDExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *BFD) GetMinRx() int { + return a.MinRx +} + +func (a *BFD) GetMinTx() int { + return a.MinTx +} + +func (a *BFD) GetOptions() map[string]string { + return a.Options +} + +func copyBFDOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalBFDOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *BFD) GetSrcPort() int { + return a.SrcPort +} + +func (a *BFD) GetStatus() BFDStatus { + return a.Status +} + +func (a *BFD) DeepCopyInto(b *BFD) { + *b = *a + b.ExternalIDs = copyBFDExternalIDs(a.ExternalIDs) + b.Options = copyBFDOptions(a.Options) +} + +func (a *BFD) DeepCopy() *BFD { + b := new(BFD) + a.DeepCopyInto(b) + return b +} + +func (a *BFD) CloneModelInto(b model.Model) { + c := b.(*BFD) + a.DeepCopyInto(c) +} + +func (a *BFD) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *BFD) Equals(b *BFD) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + a.DetectMult == b.DetectMult && + a.Disc == b.Disc && + a.DstIP == b.DstIP && + equalBFDExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.LogicalPort == b.LogicalPort && + a.MinRx == b.MinRx && + a.MinTx == b.MinTx && + equalBFDOptions(a.Options, b.Options) && + a.SrcPort == b.SrcPort && + a.Status == b.Status +} + +func (a *BFD) EqualsModel(b model.Model) bool { + c := b.(*BFD) + return a.Equals(c) +} + +var _ model.CloneableModel = &BFD{} +var _ model.ComparableModel = &BFD{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go new file mode 100644 index 000000000..3526f096f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis.go @@ -0,0 +1,225 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTable = "Chassis" + +// Chassis defines an object in Chassis table +type Chassis struct { + UUID string `ovsdb:"_uuid"` + Encaps []string `ovsdb:"encaps"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Hostname string `ovsdb:"hostname"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + OtherConfig map[string]string `ovsdb:"other_config"` + TransportZones []string `ovsdb:"transport_zones"` + VtepLogicalSwitches []string `ovsdb:"vtep_logical_switches"` +} + +func (a *Chassis) GetUUID() string { + return a.UUID +} + +func (a *Chassis) GetEncaps() []string { + return a.Encaps +} + +func copyChassisEncaps(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisEncaps(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Chassis) GetHostname() string { + return a.Hostname +} + +func (a *Chassis) GetName() string { + return a.Name +} + +func (a *Chassis) GetNbCfg() int { + return a.NbCfg +} + +func (a *Chassis) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyChassisOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Chassis) GetTransportZones() []string { + return a.TransportZones +} + +func copyChassisTransportZones(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisTransportZones(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) GetVtepLogicalSwitches() []string { + return a.VtepLogicalSwitches +} + +func copyChassisVtepLogicalSwitches(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalChassisVtepLogicalSwitches(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Chassis) DeepCopyInto(b *Chassis) { + *b = *a + b.Encaps = copyChassisEncaps(a.Encaps) + b.ExternalIDs = copyChassisExternalIDs(a.ExternalIDs) + b.OtherConfig = copyChassisOtherConfig(a.OtherConfig) + b.TransportZones = copyChassisTransportZones(a.TransportZones) + b.VtepLogicalSwitches = copyChassisVtepLogicalSwitches(a.VtepLogicalSwitches) +} + +func (a *Chassis) DeepCopy() *Chassis { + b := new(Chassis) + a.DeepCopyInto(b) + return b +} + +func (a *Chassis) CloneModelInto(b model.Model) { + c := b.(*Chassis) + a.DeepCopyInto(c) +} + +func (a *Chassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Chassis) Equals(b *Chassis) bool { + return a.UUID == b.UUID && + equalChassisEncaps(a.Encaps, b.Encaps) && + equalChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Hostname == b.Hostname && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + equalChassisOtherConfig(a.OtherConfig, b.OtherConfig) && + equalChassisTransportZones(a.TransportZones, b.TransportZones) && + equalChassisVtepLogicalSwitches(a.VtepLogicalSwitches, b.VtepLogicalSwitches) +} + +func (a *Chassis) EqualsModel(b model.Model) bool { + c := b.(*Chassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &Chassis{} +var _ model.ComparableModel = &Chassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go new file mode 100644 index 000000000..1e8c3764b --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_private.go @@ -0,0 +1,124 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisPrivateTable = "Chassis_Private" + +// ChassisPrivate defines an object in Chassis_Private table +type ChassisPrivate struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + NbCfg int `ovsdb:"nb_cfg"` + NbCfgTimestamp int `ovsdb:"nb_cfg_timestamp"` +} + +func (a *ChassisPrivate) GetUUID() string { + return a.UUID +} + +func (a *ChassisPrivate) GetChassis() *string { + return a.Chassis +} + +func copyChassisPrivateChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalChassisPrivateChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ChassisPrivate) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyChassisPrivateExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisPrivateExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisPrivate) GetName() string { + return a.Name +} + +func (a *ChassisPrivate) GetNbCfg() int { + return a.NbCfg +} + +func (a *ChassisPrivate) GetNbCfgTimestamp() int { + return a.NbCfgTimestamp +} + +func (a *ChassisPrivate) DeepCopyInto(b *ChassisPrivate) { + *b = *a + b.Chassis = copyChassisPrivateChassis(a.Chassis) + b.ExternalIDs = copyChassisPrivateExternalIDs(a.ExternalIDs) +} + +func (a *ChassisPrivate) DeepCopy() *ChassisPrivate { + b := new(ChassisPrivate) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisPrivate) CloneModelInto(b model.Model) { + c := b.(*ChassisPrivate) + a.DeepCopyInto(c) +} + +func (a *ChassisPrivate) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisPrivate) Equals(b *ChassisPrivate) bool { + return a.UUID == b.UUID && + equalChassisPrivateChassis(a.Chassis, b.Chassis) && + equalChassisPrivateExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + a.NbCfg == b.NbCfg && + a.NbCfgTimestamp == b.NbCfgTimestamp +} + +func (a *ChassisPrivate) EqualsModel(b model.Model) bool { + c := b.(*ChassisPrivate) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisPrivate{} +var _ model.ComparableModel = &ChassisPrivate{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go new file mode 100644 index 000000000..212e772be --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/chassis_template_var.go @@ -0,0 +1,87 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ChassisTemplateVarTable = "Chassis_Template_Var" + +// ChassisTemplateVar defines an object in Chassis_Template_Var table +type ChassisTemplateVar struct { + UUID string `ovsdb:"_uuid"` + Chassis string `ovsdb:"chassis"` + Variables map[string]string `ovsdb:"variables"` +} + +func (a *ChassisTemplateVar) GetUUID() string { + return a.UUID +} + +func (a *ChassisTemplateVar) GetChassis() string { + return a.Chassis +} + +func (a *ChassisTemplateVar) GetVariables() map[string]string { + return a.Variables +} + +func copyChassisTemplateVarVariables(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalChassisTemplateVarVariables(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ChassisTemplateVar) DeepCopyInto(b *ChassisTemplateVar) { + *b = *a + b.Variables = copyChassisTemplateVarVariables(a.Variables) +} + +func (a *ChassisTemplateVar) DeepCopy() *ChassisTemplateVar { + b := new(ChassisTemplateVar) + a.DeepCopyInto(b) + return b +} + +func (a *ChassisTemplateVar) CloneModelInto(b model.Model) { + c := b.(*ChassisTemplateVar) + a.DeepCopyInto(c) +} + +func (a *ChassisTemplateVar) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ChassisTemplateVar) Equals(b *ChassisTemplateVar) bool { + return a.UUID == b.UUID && + a.Chassis == b.Chassis && + equalChassisTemplateVarVariables(a.Variables, b.Variables) +} + +func (a *ChassisTemplateVar) EqualsModel(b model.Model) bool { + c := b.(*ChassisTemplateVar) + return a.Equals(c) +} + +var _ model.CloneableModel = &ChassisTemplateVar{} +var _ model.ComparableModel = &ChassisTemplateVar{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go new file mode 100644 index 000000000..8f96f5422 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/connection.go @@ -0,0 +1,221 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ConnectionTable = "Connection" + +// Connection defines an object in Connection table +type Connection struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + InactivityProbe *int `ovsdb:"inactivity_probe"` + IsConnected bool `ovsdb:"is_connected"` + MaxBackoff *int `ovsdb:"max_backoff"` + OtherConfig map[string]string `ovsdb:"other_config"` + ReadOnly bool `ovsdb:"read_only"` + Role string `ovsdb:"role"` + Status map[string]string `ovsdb:"status"` + Target string `ovsdb:"target"` +} + +func (a *Connection) GetUUID() string { + return a.UUID +} + +func (a *Connection) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyConnectionExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetInactivityProbe() *int { + return a.InactivityProbe +} + +func copyConnectionInactivityProbe(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionInactivityProbe(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetIsConnected() bool { + return a.IsConnected +} + +func (a *Connection) GetMaxBackoff() *int { + return a.MaxBackoff +} + +func copyConnectionMaxBackoff(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalConnectionMaxBackoff(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *Connection) GetOtherConfig() map[string]string { + return a.OtherConfig +} + +func copyConnectionOtherConfig(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionOtherConfig(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetReadOnly() bool { + return a.ReadOnly +} + +func (a *Connection) GetRole() string { + return a.Role +} + +func (a *Connection) GetStatus() map[string]string { + return a.Status +} + +func copyConnectionStatus(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalConnectionStatus(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Connection) GetTarget() string { + return a.Target +} + +func (a *Connection) DeepCopyInto(b *Connection) { + *b = *a + b.ExternalIDs = copyConnectionExternalIDs(a.ExternalIDs) + b.InactivityProbe = copyConnectionInactivityProbe(a.InactivityProbe) + b.MaxBackoff = copyConnectionMaxBackoff(a.MaxBackoff) + b.OtherConfig = copyConnectionOtherConfig(a.OtherConfig) + b.Status = copyConnectionStatus(a.Status) +} + +func (a *Connection) DeepCopy() *Connection { + b := new(Connection) + a.DeepCopyInto(b) + return b +} + +func (a *Connection) CloneModelInto(b model.Model) { + c := b.(*Connection) + a.DeepCopyInto(c) +} + +func (a *Connection) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Connection) Equals(b *Connection) bool { + return a.UUID == b.UUID && + equalConnectionExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalConnectionInactivityProbe(a.InactivityProbe, b.InactivityProbe) && + a.IsConnected == b.IsConnected && + equalConnectionMaxBackoff(a.MaxBackoff, b.MaxBackoff) && + equalConnectionOtherConfig(a.OtherConfig, b.OtherConfig) && + a.ReadOnly == b.ReadOnly && + a.Role == b.Role && + equalConnectionStatus(a.Status, b.Status) && + a.Target == b.Target +} + +func (a *Connection) EqualsModel(b model.Model) bool { + c := b.(*Connection) + return a.Equals(c) +} + +var _ model.CloneableModel = &Connection{} +var _ model.ComparableModel = &Connection{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go new file mode 100644 index 000000000..741ffd028 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/controller_event.go @@ -0,0 +1,126 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ControllerEventTable = "Controller_Event" + +type ( + ControllerEventEventType = string +) + +var ( + ControllerEventEventTypeEmptyLbBackends ControllerEventEventType = "empty_lb_backends" +) + +// ControllerEvent defines an object in Controller_Event table +type ControllerEvent struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + EventInfo map[string]string `ovsdb:"event_info"` + EventType ControllerEventEventType `ovsdb:"event_type"` + SeqNum int `ovsdb:"seq_num"` +} + +func (a *ControllerEvent) GetUUID() string { + return a.UUID +} + +func (a *ControllerEvent) GetChassis() *string { + return a.Chassis +} + +func copyControllerEventChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalControllerEventChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ControllerEvent) GetEventInfo() map[string]string { + return a.EventInfo +} + +func copyControllerEventEventInfo(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalControllerEventEventInfo(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ControllerEvent) GetEventType() ControllerEventEventType { + return a.EventType +} + +func (a *ControllerEvent) GetSeqNum() int { + return a.SeqNum +} + +func (a *ControllerEvent) DeepCopyInto(b *ControllerEvent) { + *b = *a + b.Chassis = copyControllerEventChassis(a.Chassis) + b.EventInfo = copyControllerEventEventInfo(a.EventInfo) +} + +func (a *ControllerEvent) DeepCopy() *ControllerEvent { + b := new(ControllerEvent) + a.DeepCopyInto(b) + return b +} + +func (a *ControllerEvent) CloneModelInto(b model.Model) { + c := b.(*ControllerEvent) + a.DeepCopyInto(c) +} + +func (a *ControllerEvent) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ControllerEvent) Equals(b *ControllerEvent) bool { + return a.UUID == b.UUID && + equalControllerEventChassis(a.Chassis, b.Chassis) && + equalControllerEventEventInfo(a.EventInfo, b.EventInfo) && + a.EventType == b.EventType && + a.SeqNum == b.SeqNum +} + +func (a *ControllerEvent) EqualsModel(b model.Model) bool { + c := b.(*ControllerEvent) + return a.Equals(c) +} + +var _ model.CloneableModel = &ControllerEvent{} +var _ model.ComparableModel = &ControllerEvent{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go new file mode 100644 index 000000000..10247286f --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/datapath_binding.go @@ -0,0 +1,118 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DatapathBindingTable = "Datapath_Binding" + +// DatapathBinding defines an object in Datapath_Binding table +type DatapathBinding struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LoadBalancers []string `ovsdb:"load_balancers"` + TunnelKey int `ovsdb:"tunnel_key"` +} + +func (a *DatapathBinding) GetUUID() string { + return a.UUID +} + +func (a *DatapathBinding) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDatapathBindingExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDatapathBindingExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DatapathBinding) GetLoadBalancers() []string { + return a.LoadBalancers +} + +func copyDatapathBindingLoadBalancers(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalDatapathBindingLoadBalancers(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *DatapathBinding) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *DatapathBinding) DeepCopyInto(b *DatapathBinding) { + *b = *a + b.ExternalIDs = copyDatapathBindingExternalIDs(a.ExternalIDs) + b.LoadBalancers = copyDatapathBindingLoadBalancers(a.LoadBalancers) +} + +func (a *DatapathBinding) DeepCopy() *DatapathBinding { + b := new(DatapathBinding) + a.DeepCopyInto(b) + return b +} + +func (a *DatapathBinding) CloneModelInto(b model.Model) { + c := b.(*DatapathBinding) + a.DeepCopyInto(c) +} + +func (a *DatapathBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DatapathBinding) Equals(b *DatapathBinding) bool { + return a.UUID == b.UUID && + equalDatapathBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDatapathBindingLoadBalancers(a.LoadBalancers, b.LoadBalancers) && + a.TunnelKey == b.TunnelKey +} + +func (a *DatapathBinding) EqualsModel(b model.Model) bool { + c := b.(*DatapathBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &DatapathBinding{} +var _ model.ComparableModel = &DatapathBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go new file mode 100644 index 000000000..e9ec44ce2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcp_options.go @@ -0,0 +1,82 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPOptionsTable = "DHCP_Options" + +type ( + DHCPOptionsType = string +) + +var ( + DHCPOptionsTypeBool DHCPOptionsType = "bool" + DHCPOptionsTypeUint8 DHCPOptionsType = "uint8" + DHCPOptionsTypeUint16 DHCPOptionsType = "uint16" + DHCPOptionsTypeUint32 DHCPOptionsType = "uint32" + DHCPOptionsTypeIpv4 DHCPOptionsType = "ipv4" + DHCPOptionsTypeStaticRoutes DHCPOptionsType = "static_routes" + DHCPOptionsTypeStr DHCPOptionsType = "str" + DHCPOptionsTypeHostID DHCPOptionsType = "host_id" + DHCPOptionsTypeDomains DHCPOptionsType = "domains" +) + +// DHCPOptions defines an object in DHCP_Options table +type DHCPOptions struct { + UUID string `ovsdb:"_uuid"` + Code int `ovsdb:"code"` + Name string `ovsdb:"name"` + Type DHCPOptionsType `ovsdb:"type"` +} + +func (a *DHCPOptions) GetUUID() string { + return a.UUID +} + +func (a *DHCPOptions) GetCode() int { + return a.Code +} + +func (a *DHCPOptions) GetName() string { + return a.Name +} + +func (a *DHCPOptions) GetType() DHCPOptionsType { + return a.Type +} + +func (a *DHCPOptions) DeepCopyInto(b *DHCPOptions) { + *b = *a +} + +func (a *DHCPOptions) DeepCopy() *DHCPOptions { + b := new(DHCPOptions) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPOptions) CloneModelInto(b model.Model) { + c := b.(*DHCPOptions) + a.DeepCopyInto(c) +} + +func (a *DHCPOptions) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPOptions) Equals(b *DHCPOptions) bool { + return a.UUID == b.UUID && + a.Code == b.Code && + a.Name == b.Name && + a.Type == b.Type +} + +func (a *DHCPOptions) EqualsModel(b model.Model) bool { + c := b.(*DHCPOptions) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPOptions{} +var _ model.ComparableModel = &DHCPOptions{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go new file mode 100644 index 000000000..908d1e0ad --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dhcpv6_options.go @@ -0,0 +1,77 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DHCPv6OptionsTable = "DHCPv6_Options" + +type ( + DHCPv6OptionsType = string +) + +var ( + DHCPv6OptionsTypeIpv6 DHCPv6OptionsType = "ipv6" + DHCPv6OptionsTypeStr DHCPv6OptionsType = "str" + DHCPv6OptionsTypeMAC DHCPv6OptionsType = "mac" + DHCPv6OptionsTypeDomain DHCPv6OptionsType = "domain" +) + +// DHCPv6Options defines an object in DHCPv6_Options table +type DHCPv6Options struct { + UUID string `ovsdb:"_uuid"` + Code int `ovsdb:"code"` + Name string `ovsdb:"name"` + Type DHCPv6OptionsType `ovsdb:"type"` +} + +func (a *DHCPv6Options) GetUUID() string { + return a.UUID +} + +func (a *DHCPv6Options) GetCode() int { + return a.Code +} + +func (a *DHCPv6Options) GetName() string { + return a.Name +} + +func (a *DHCPv6Options) GetType() DHCPv6OptionsType { + return a.Type +} + +func (a *DHCPv6Options) DeepCopyInto(b *DHCPv6Options) { + *b = *a +} + +func (a *DHCPv6Options) DeepCopy() *DHCPv6Options { + b := new(DHCPv6Options) + a.DeepCopyInto(b) + return b +} + +func (a *DHCPv6Options) CloneModelInto(b model.Model) { + c := b.(*DHCPv6Options) + a.DeepCopyInto(c) +} + +func (a *DHCPv6Options) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DHCPv6Options) Equals(b *DHCPv6Options) bool { + return a.UUID == b.UUID && + a.Code == b.Code && + a.Name == b.Name && + a.Type == b.Type +} + +func (a *DHCPv6Options) EqualsModel(b model.Model) bool { + c := b.(*DHCPv6Options) + return a.Equals(c) +} + +var _ model.CloneableModel = &DHCPv6Options{} +var _ model.ComparableModel = &DHCPv6Options{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go new file mode 100644 index 000000000..95c0a52d1 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/dns.go @@ -0,0 +1,178 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const DNSTable = "DNS" + +// DNS defines an object in DNS table +type DNS struct { + UUID string `ovsdb:"_uuid"` + Datapaths []string `ovsdb:"datapaths"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Options map[string]string `ovsdb:"options"` + Records map[string]string `ovsdb:"records"` +} + +func (a *DNS) GetUUID() string { + return a.UUID +} + +func (a *DNS) GetDatapaths() []string { + return a.Datapaths +} + +func copyDNSDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalDNSDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *DNS) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyDNSExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetOptions() map[string]string { + return a.Options +} + +func copyDNSOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) GetRecords() map[string]string { + return a.Records +} + +func copyDNSRecords(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalDNSRecords(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *DNS) DeepCopyInto(b *DNS) { + *b = *a + b.Datapaths = copyDNSDatapaths(a.Datapaths) + b.ExternalIDs = copyDNSExternalIDs(a.ExternalIDs) + b.Options = copyDNSOptions(a.Options) + b.Records = copyDNSRecords(a.Records) +} + +func (a *DNS) DeepCopy() *DNS { + b := new(DNS) + a.DeepCopyInto(b) + return b +} + +func (a *DNS) CloneModelInto(b model.Model) { + c := b.(*DNS) + a.DeepCopyInto(c) +} + +func (a *DNS) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *DNS) Equals(b *DNS) bool { + return a.UUID == b.UUID && + equalDNSDatapaths(a.Datapaths, b.Datapaths) && + equalDNSExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalDNSOptions(a.Options, b.Options) && + equalDNSRecords(a.Records, b.Records) +} + +func (a *DNS) EqualsModel(b model.Model) bool { + c := b.(*DNS) + return a.Equals(c) +} + +var _ model.CloneableModel = &DNS{} +var _ model.ComparableModel = &DNS{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go new file mode 100644 index 000000000..9a2f17fba --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/encap.go @@ -0,0 +1,109 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const EncapTable = "Encap" + +type ( + EncapType = string +) + +var ( + EncapTypeGeneve EncapType = "geneve" + EncapTypeSTT EncapType = "stt" + EncapTypeVxlan EncapType = "vxlan" +) + +// Encap defines an object in Encap table +type Encap struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + IP string `ovsdb:"ip"` + Options map[string]string `ovsdb:"options"` + Type EncapType `ovsdb:"type"` +} + +func (a *Encap) GetUUID() string { + return a.UUID +} + +func (a *Encap) GetChassisName() string { + return a.ChassisName +} + +func (a *Encap) GetIP() string { + return a.IP +} + +func (a *Encap) GetOptions() map[string]string { + return a.Options +} + +func copyEncapOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalEncapOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Encap) GetType() EncapType { + return a.Type +} + +func (a *Encap) DeepCopyInto(b *Encap) { + *b = *a + b.Options = copyEncapOptions(a.Options) +} + +func (a *Encap) DeepCopy() *Encap { + b := new(Encap) + a.DeepCopyInto(b) + return b +} + +func (a *Encap) CloneModelInto(b model.Model) { + c := b.(*Encap) + a.DeepCopyInto(c) +} + +func (a *Encap) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Encap) Equals(b *Encap) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + a.IP == b.IP && + equalEncapOptions(a.Options, b.Options) && + a.Type == b.Type +} + +func (a *Encap) EqualsModel(b model.Model) bool { + c := b.(*Encap) + return a.Equals(c) +} + +var _ model.CloneableModel = &Encap{} +var _ model.ComparableModel = &Encap{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go new file mode 100644 index 000000000..8253e7059 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/fdb.go @@ -0,0 +1,72 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const FDBTable = "FDB" + +// FDB defines an object in FDB table +type FDB struct { + UUID string `ovsdb:"_uuid"` + DpKey int `ovsdb:"dp_key"` + MAC string `ovsdb:"mac"` + PortKey int `ovsdb:"port_key"` + Timestamp int `ovsdb:"timestamp"` +} + +func (a *FDB) GetUUID() string { + return a.UUID +} + +func (a *FDB) GetDpKey() int { + return a.DpKey +} + +func (a *FDB) GetMAC() string { + return a.MAC +} + +func (a *FDB) GetPortKey() int { + return a.PortKey +} + +func (a *FDB) GetTimestamp() int { + return a.Timestamp +} + +func (a *FDB) DeepCopyInto(b *FDB) { + *b = *a +} + +func (a *FDB) DeepCopy() *FDB { + b := new(FDB) + a.DeepCopyInto(b) + return b +} + +func (a *FDB) CloneModelInto(b model.Model) { + c := b.(*FDB) + a.DeepCopyInto(c) +} + +func (a *FDB) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *FDB) Equals(b *FDB) bool { + return a.UUID == b.UUID && + a.DpKey == b.DpKey && + a.MAC == b.MAC && + a.PortKey == b.PortKey && + a.Timestamp == b.Timestamp +} + +func (a *FDB) EqualsModel(b model.Model) bool { + c := b.(*FDB) + return a.Equals(c) +} + +var _ model.CloneableModel = &FDB{} +var _ model.ComparableModel = &FDB{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go new file mode 100644 index 000000000..a84ad7fc4 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gateway_chassis.go @@ -0,0 +1,151 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const GatewayChassisTable = "Gateway_Chassis" + +// GatewayChassis defines an object in Gateway_Chassis table +type GatewayChassis struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Priority int `ovsdb:"priority"` +} + +func (a *GatewayChassis) GetUUID() string { + return a.UUID +} + +func (a *GatewayChassis) GetChassis() *string { + return a.Chassis +} + +func copyGatewayChassisChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalGatewayChassisChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *GatewayChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyGatewayChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetName() string { + return a.Name +} + +func (a *GatewayChassis) GetOptions() map[string]string { + return a.Options +} + +func copyGatewayChassisOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalGatewayChassisOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *GatewayChassis) GetPriority() int { + return a.Priority +} + +func (a *GatewayChassis) DeepCopyInto(b *GatewayChassis) { + *b = *a + b.Chassis = copyGatewayChassisChassis(a.Chassis) + b.ExternalIDs = copyGatewayChassisExternalIDs(a.ExternalIDs) + b.Options = copyGatewayChassisOptions(a.Options) +} + +func (a *GatewayChassis) DeepCopy() *GatewayChassis { + b := new(GatewayChassis) + a.DeepCopyInto(b) + return b +} + +func (a *GatewayChassis) CloneModelInto(b model.Model) { + c := b.(*GatewayChassis) + a.DeepCopyInto(c) +} + +func (a *GatewayChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *GatewayChassis) Equals(b *GatewayChassis) bool { + return a.UUID == b.UUID && + equalGatewayChassisChassis(a.Chassis, b.Chassis) && + equalGatewayChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Name == b.Name && + equalGatewayChassisOptions(a.Options, b.Options) && + a.Priority == b.Priority +} + +func (a *GatewayChassis) EqualsModel(b model.Model) bool { + c := b.(*GatewayChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &GatewayChassis{} +var _ model.ComparableModel = &GatewayChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go new file mode 100644 index 000000000..6507d071e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/gen.go @@ -0,0 +1,3 @@ +package sbdb + +//go:generate modelgen --extended -p sbdb -o . ovn-sb.ovsschema diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go new file mode 100644 index 000000000..b0b3cebbb --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis.go @@ -0,0 +1,112 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisTable = "HA_Chassis" + +// HAChassis defines an object in HA_Chassis table +type HAChassis struct { + UUID string `ovsdb:"_uuid"` + Chassis *string `ovsdb:"chassis"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Priority int `ovsdb:"priority"` +} + +func (a *HAChassis) GetUUID() string { + return a.UUID +} + +func (a *HAChassis) GetChassis() *string { + return a.Chassis +} + +func copyHAChassisChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalHAChassisChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *HAChassis) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassis) GetPriority() int { + return a.Priority +} + +func (a *HAChassis) DeepCopyInto(b *HAChassis) { + *b = *a + b.Chassis = copyHAChassisChassis(a.Chassis) + b.ExternalIDs = copyHAChassisExternalIDs(a.ExternalIDs) +} + +func (a *HAChassis) DeepCopy() *HAChassis { + b := new(HAChassis) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassis) CloneModelInto(b model.Model) { + c := b.(*HAChassis) + a.DeepCopyInto(c) +} + +func (a *HAChassis) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassis) Equals(b *HAChassis) bool { + return a.UUID == b.UUID && + equalHAChassisChassis(a.Chassis, b.Chassis) && + equalHAChassisExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Priority == b.Priority +} + +func (a *HAChassis) EqualsModel(b model.Model) bool { + c := b.(*HAChassis) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassis{} +var _ model.ComparableModel = &HAChassis{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go new file mode 100644 index 000000000..1cc013c70 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ha_chassis_group.go @@ -0,0 +1,149 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const HAChassisGroupTable = "HA_Chassis_Group" + +// HAChassisGroup defines an object in HA_Chassis_Group table +type HAChassisGroup struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + HaChassis []string `ovsdb:"ha_chassis"` + Name string `ovsdb:"name"` + RefChassis []string `ovsdb:"ref_chassis"` +} + +func (a *HAChassisGroup) GetUUID() string { + return a.UUID +} + +func (a *HAChassisGroup) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyHAChassisGroupExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalHAChassisGroupExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetHaChassis() []string { + return a.HaChassis +} + +func copyHAChassisGroupHaChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupHaChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) GetName() string { + return a.Name +} + +func (a *HAChassisGroup) GetRefChassis() []string { + return a.RefChassis +} + +func copyHAChassisGroupRefChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalHAChassisGroupRefChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *HAChassisGroup) DeepCopyInto(b *HAChassisGroup) { + *b = *a + b.ExternalIDs = copyHAChassisGroupExternalIDs(a.ExternalIDs) + b.HaChassis = copyHAChassisGroupHaChassis(a.HaChassis) + b.RefChassis = copyHAChassisGroupRefChassis(a.RefChassis) +} + +func (a *HAChassisGroup) DeepCopy() *HAChassisGroup { + b := new(HAChassisGroup) + a.DeepCopyInto(b) + return b +} + +func (a *HAChassisGroup) CloneModelInto(b model.Model) { + c := b.(*HAChassisGroup) + a.DeepCopyInto(c) +} + +func (a *HAChassisGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *HAChassisGroup) Equals(b *HAChassisGroup) bool { + return a.UUID == b.UUID && + equalHAChassisGroupExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalHAChassisGroupHaChassis(a.HaChassis, b.HaChassis) && + a.Name == b.Name && + equalHAChassisGroupRefChassis(a.RefChassis, b.RefChassis) +} + +func (a *HAChassisGroup) EqualsModel(b model.Model) bool { + c := b.(*HAChassisGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &HAChassisGroup{} +var _ model.ComparableModel = &HAChassisGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go new file mode 100644 index 000000000..73a0bb943 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/igmp_group.go @@ -0,0 +1,147 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const IGMPGroupTable = "IGMP_Group" + +// IGMPGroup defines an object in IGMP_Group table +type IGMPGroup struct { + UUID string `ovsdb:"_uuid"` + Address string `ovsdb:"address"` + Chassis *string `ovsdb:"chassis"` + ChassisName string `ovsdb:"chassis_name"` + Datapath *string `ovsdb:"datapath"` + Ports []string `ovsdb:"ports"` + Protocol string `ovsdb:"protocol"` +} + +func (a *IGMPGroup) GetUUID() string { + return a.UUID +} + +func (a *IGMPGroup) GetAddress() string { + return a.Address +} + +func (a *IGMPGroup) GetChassis() *string { + return a.Chassis +} + +func copyIGMPGroupChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIGMPGroupChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IGMPGroup) GetChassisName() string { + return a.ChassisName +} + +func (a *IGMPGroup) GetDatapath() *string { + return a.Datapath +} + +func copyIGMPGroupDatapath(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIGMPGroupDatapath(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IGMPGroup) GetPorts() []string { + return a.Ports +} + +func copyIGMPGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalIGMPGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *IGMPGroup) GetProtocol() string { + return a.Protocol +} + +func (a *IGMPGroup) DeepCopyInto(b *IGMPGroup) { + *b = *a + b.Chassis = copyIGMPGroupChassis(a.Chassis) + b.Datapath = copyIGMPGroupDatapath(a.Datapath) + b.Ports = copyIGMPGroupPorts(a.Ports) +} + +func (a *IGMPGroup) DeepCopy() *IGMPGroup { + b := new(IGMPGroup) + a.DeepCopyInto(b) + return b +} + +func (a *IGMPGroup) CloneModelInto(b model.Model) { + c := b.(*IGMPGroup) + a.DeepCopyInto(c) +} + +func (a *IGMPGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *IGMPGroup) Equals(b *IGMPGroup) bool { + return a.UUID == b.UUID && + a.Address == b.Address && + equalIGMPGroupChassis(a.Chassis, b.Chassis) && + a.ChassisName == b.ChassisName && + equalIGMPGroupDatapath(a.Datapath, b.Datapath) && + equalIGMPGroupPorts(a.Ports, b.Ports) && + a.Protocol == b.Protocol +} + +func (a *IGMPGroup) EqualsModel(b model.Model) bool { + c := b.(*IGMPGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &IGMPGroup{} +var _ model.ComparableModel = &IGMPGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go new file mode 100644 index 000000000..493cd342d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ip_multicast.go @@ -0,0 +1,228 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const IPMulticastTable = "IP_Multicast" + +// IPMulticast defines an object in IP_Multicast table +type IPMulticast struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + Enabled *bool `ovsdb:"enabled"` + EthSrc string `ovsdb:"eth_src"` + IdleTimeout *int `ovsdb:"idle_timeout"` + Ip4Src string `ovsdb:"ip4_src"` + Ip6Src string `ovsdb:"ip6_src"` + Querier *bool `ovsdb:"querier"` + QueryInterval *int `ovsdb:"query_interval"` + QueryMaxResp *int `ovsdb:"query_max_resp"` + SeqNo int `ovsdb:"seq_no"` + TableSize *int `ovsdb:"table_size"` +} + +func (a *IPMulticast) GetUUID() string { + return a.UUID +} + +func (a *IPMulticast) GetDatapath() string { + return a.Datapath +} + +func (a *IPMulticast) GetEnabled() *bool { + return a.Enabled +} + +func copyIPMulticastEnabled(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastEnabled(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetEthSrc() string { + return a.EthSrc +} + +func (a *IPMulticast) GetIdleTimeout() *int { + return a.IdleTimeout +} + +func copyIPMulticastIdleTimeout(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastIdleTimeout(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetIp4Src() string { + return a.Ip4Src +} + +func (a *IPMulticast) GetIp6Src() string { + return a.Ip6Src +} + +func (a *IPMulticast) GetQuerier() *bool { + return a.Querier +} + +func copyIPMulticastQuerier(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQuerier(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetQueryInterval() *int { + return a.QueryInterval +} + +func copyIPMulticastQueryInterval(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQueryInterval(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetQueryMaxResp() *int { + return a.QueryMaxResp +} + +func copyIPMulticastQueryMaxResp(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastQueryMaxResp(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) GetSeqNo() int { + return a.SeqNo +} + +func (a *IPMulticast) GetTableSize() *int { + return a.TableSize +} + +func copyIPMulticastTableSize(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalIPMulticastTableSize(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *IPMulticast) DeepCopyInto(b *IPMulticast) { + *b = *a + b.Enabled = copyIPMulticastEnabled(a.Enabled) + b.IdleTimeout = copyIPMulticastIdleTimeout(a.IdleTimeout) + b.Querier = copyIPMulticastQuerier(a.Querier) + b.QueryInterval = copyIPMulticastQueryInterval(a.QueryInterval) + b.QueryMaxResp = copyIPMulticastQueryMaxResp(a.QueryMaxResp) + b.TableSize = copyIPMulticastTableSize(a.TableSize) +} + +func (a *IPMulticast) DeepCopy() *IPMulticast { + b := new(IPMulticast) + a.DeepCopyInto(b) + return b +} + +func (a *IPMulticast) CloneModelInto(b model.Model) { + c := b.(*IPMulticast) + a.DeepCopyInto(c) +} + +func (a *IPMulticast) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *IPMulticast) Equals(b *IPMulticast) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalIPMulticastEnabled(a.Enabled, b.Enabled) && + a.EthSrc == b.EthSrc && + equalIPMulticastIdleTimeout(a.IdleTimeout, b.IdleTimeout) && + a.Ip4Src == b.Ip4Src && + a.Ip6Src == b.Ip6Src && + equalIPMulticastQuerier(a.Querier, b.Querier) && + equalIPMulticastQueryInterval(a.QueryInterval, b.QueryInterval) && + equalIPMulticastQueryMaxResp(a.QueryMaxResp, b.QueryMaxResp) && + a.SeqNo == b.SeqNo && + equalIPMulticastTableSize(a.TableSize, b.TableSize) +} + +func (a *IPMulticast) EqualsModel(b model.Model) bool { + c := b.(*IPMulticast) + return a.Equals(c) +} + +var _ model.CloneableModel = &IPMulticast{} +var _ model.ComparableModel = &IPMulticast{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go new file mode 100644 index 000000000..bc341807e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/load_balancer.go @@ -0,0 +1,294 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LoadBalancerTable = "Load_Balancer" + +type ( + LoadBalancerProtocol = string +) + +var ( + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" +) + +// LoadBalancer defines an object in Load_Balancer table +type LoadBalancer struct { + UUID string `ovsdb:"_uuid"` + DatapathGroup *string `ovsdb:"datapath_group"` + Datapaths []string `ovsdb:"datapaths"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + LrDatapathGroup *string `ovsdb:"lr_datapath_group"` + LsDatapathGroup *string `ovsdb:"ls_datapath_group"` + Name string `ovsdb:"name"` + Options map[string]string `ovsdb:"options"` + Protocol *LoadBalancerProtocol `ovsdb:"protocol"` + Vips map[string]string `ovsdb:"vips"` +} + +func (a *LoadBalancer) GetUUID() string { + return a.UUID +} + +func (a *LoadBalancer) GetDatapathGroup() *string { + return a.DatapathGroup +} + +func copyLoadBalancerDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetDatapaths() []string { + return a.Datapaths +} + +func copyLoadBalancerDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLoadBalancerDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LoadBalancer) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLoadBalancerExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetLrDatapathGroup() *string { + return a.LrDatapathGroup +} + +func copyLoadBalancerLrDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerLrDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetLsDatapathGroup() *string { + return a.LsDatapathGroup +} + +func copyLoadBalancerLsDatapathGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerLsDatapathGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetName() string { + return a.Name +} + +func (a *LoadBalancer) GetOptions() map[string]string { + return a.Options +} + +func copyLoadBalancerOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) GetProtocol() *LoadBalancerProtocol { + return a.Protocol +} + +func copyLoadBalancerProtocol(a *LoadBalancerProtocol) *LoadBalancerProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLoadBalancerProtocol(a, b *LoadBalancerProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LoadBalancer) GetVips() map[string]string { + return a.Vips +} + +func copyLoadBalancerVips(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLoadBalancerVips(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LoadBalancer) DeepCopyInto(b *LoadBalancer) { + *b = *a + b.DatapathGroup = copyLoadBalancerDatapathGroup(a.DatapathGroup) + b.Datapaths = copyLoadBalancerDatapaths(a.Datapaths) + b.ExternalIDs = copyLoadBalancerExternalIDs(a.ExternalIDs) + b.LrDatapathGroup = copyLoadBalancerLrDatapathGroup(a.LrDatapathGroup) + b.LsDatapathGroup = copyLoadBalancerLsDatapathGroup(a.LsDatapathGroup) + b.Options = copyLoadBalancerOptions(a.Options) + b.Protocol = copyLoadBalancerProtocol(a.Protocol) + b.Vips = copyLoadBalancerVips(a.Vips) +} + +func (a *LoadBalancer) DeepCopy() *LoadBalancer { + b := new(LoadBalancer) + a.DeepCopyInto(b) + return b +} + +func (a *LoadBalancer) CloneModelInto(b model.Model) { + c := b.(*LoadBalancer) + a.DeepCopyInto(c) +} + +func (a *LoadBalancer) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LoadBalancer) Equals(b *LoadBalancer) bool { + return a.UUID == b.UUID && + equalLoadBalancerDatapathGroup(a.DatapathGroup, b.DatapathGroup) && + equalLoadBalancerDatapaths(a.Datapaths, b.Datapaths) && + equalLoadBalancerExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLoadBalancerLrDatapathGroup(a.LrDatapathGroup, b.LrDatapathGroup) && + equalLoadBalancerLsDatapathGroup(a.LsDatapathGroup, b.LsDatapathGroup) && + a.Name == b.Name && + equalLoadBalancerOptions(a.Options, b.Options) && + equalLoadBalancerProtocol(a.Protocol, b.Protocol) && + equalLoadBalancerVips(a.Vips, b.Vips) +} + +func (a *LoadBalancer) EqualsModel(b model.Model) bool { + c := b.(*LoadBalancer) + return a.Equals(c) +} + +var _ model.CloneableModel = &LoadBalancer{} +var _ model.ComparableModel = &LoadBalancer{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go new file mode 100644 index 000000000..911de2eed --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_dp_group.go @@ -0,0 +1,79 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalDPGroupTable = "Logical_DP_Group" + +// LogicalDPGroup defines an object in Logical_DP_Group table +type LogicalDPGroup struct { + UUID string `ovsdb:"_uuid"` + Datapaths []string `ovsdb:"datapaths"` +} + +func (a *LogicalDPGroup) GetUUID() string { + return a.UUID +} + +func (a *LogicalDPGroup) GetDatapaths() []string { + return a.Datapaths +} + +func copyLogicalDPGroupDatapaths(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalLogicalDPGroupDatapaths(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *LogicalDPGroup) DeepCopyInto(b *LogicalDPGroup) { + *b = *a + b.Datapaths = copyLogicalDPGroupDatapaths(a.Datapaths) +} + +func (a *LogicalDPGroup) DeepCopy() *LogicalDPGroup { + b := new(LogicalDPGroup) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalDPGroup) CloneModelInto(b model.Model) { + c := b.(*LogicalDPGroup) + a.DeepCopyInto(c) +} + +func (a *LogicalDPGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalDPGroup) Equals(b *LogicalDPGroup) bool { + return a.UUID == b.UUID && + equalLogicalDPGroupDatapaths(a.Datapaths, b.Datapaths) +} + +func (a *LogicalDPGroup) EqualsModel(b model.Model) bool { + c := b.(*LogicalDPGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalDPGroup{} +var _ model.ComparableModel = &LogicalDPGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go new file mode 100644 index 000000000..42af1cdf5 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/logical_flow.go @@ -0,0 +1,253 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const LogicalFlowTable = "Logical_Flow" + +type ( + LogicalFlowPipeline = string +) + +var ( + LogicalFlowPipelineIngress LogicalFlowPipeline = "ingress" + LogicalFlowPipelineEgress LogicalFlowPipeline = "egress" +) + +// LogicalFlow defines an object in Logical_Flow table +type LogicalFlow struct { + UUID string `ovsdb:"_uuid"` + Actions string `ovsdb:"actions"` + ControllerMeter *string `ovsdb:"controller_meter"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + FlowDesc *string `ovsdb:"flow_desc"` + LogicalDatapath *string `ovsdb:"logical_datapath"` + LogicalDpGroup *string `ovsdb:"logical_dp_group"` + Match string `ovsdb:"match"` + Pipeline LogicalFlowPipeline `ovsdb:"pipeline"` + Priority int `ovsdb:"priority"` + TableID int `ovsdb:"table_id"` + Tags map[string]string `ovsdb:"tags"` +} + +func (a *LogicalFlow) GetUUID() string { + return a.UUID +} + +func (a *LogicalFlow) GetActions() string { + return a.Actions +} + +func (a *LogicalFlow) GetControllerMeter() *string { + return a.ControllerMeter +} + +func copyLogicalFlowControllerMeter(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowControllerMeter(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLogicalFlowExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalFlowExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalFlow) GetFlowDesc() *string { + return a.FlowDesc +} + +func copyLogicalFlowFlowDesc(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowFlowDesc(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetLogicalDatapath() *string { + return a.LogicalDatapath +} + +func copyLogicalFlowLogicalDatapath(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowLogicalDatapath(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetLogicalDpGroup() *string { + return a.LogicalDpGroup +} + +func copyLogicalFlowLogicalDpGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalFlowLogicalDpGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *LogicalFlow) GetMatch() string { + return a.Match +} + +func (a *LogicalFlow) GetPipeline() LogicalFlowPipeline { + return a.Pipeline +} + +func (a *LogicalFlow) GetPriority() int { + return a.Priority +} + +func (a *LogicalFlow) GetTableID() int { + return a.TableID +} + +func (a *LogicalFlow) GetTags() map[string]string { + return a.Tags +} + +func copyLogicalFlowTags(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLogicalFlowTags(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LogicalFlow) DeepCopyInto(b *LogicalFlow) { + *b = *a + b.ControllerMeter = copyLogicalFlowControllerMeter(a.ControllerMeter) + b.ExternalIDs = copyLogicalFlowExternalIDs(a.ExternalIDs) + b.FlowDesc = copyLogicalFlowFlowDesc(a.FlowDesc) + b.LogicalDatapath = copyLogicalFlowLogicalDatapath(a.LogicalDatapath) + b.LogicalDpGroup = copyLogicalFlowLogicalDpGroup(a.LogicalDpGroup) + b.Tags = copyLogicalFlowTags(a.Tags) +} + +func (a *LogicalFlow) DeepCopy() *LogicalFlow { + b := new(LogicalFlow) + a.DeepCopyInto(b) + return b +} + +func (a *LogicalFlow) CloneModelInto(b model.Model) { + c := b.(*LogicalFlow) + a.DeepCopyInto(c) +} + +func (a *LogicalFlow) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LogicalFlow) Equals(b *LogicalFlow) bool { + return a.UUID == b.UUID && + a.Actions == b.Actions && + equalLogicalFlowControllerMeter(a.ControllerMeter, b.ControllerMeter) && + equalLogicalFlowExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalFlowFlowDesc(a.FlowDesc, b.FlowDesc) && + equalLogicalFlowLogicalDatapath(a.LogicalDatapath, b.LogicalDatapath) && + equalLogicalFlowLogicalDpGroup(a.LogicalDpGroup, b.LogicalDpGroup) && + a.Match == b.Match && + a.Pipeline == b.Pipeline && + a.Priority == b.Priority && + a.TableID == b.TableID && + equalLogicalFlowTags(a.Tags, b.Tags) +} + +func (a *LogicalFlow) EqualsModel(b model.Model) bool { + c := b.(*LogicalFlow) + return a.Equals(c) +} + +var _ model.CloneableModel = &LogicalFlow{} +var _ model.ComparableModel = &LogicalFlow{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go new file mode 100644 index 000000000..705431f1d --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mac_binding.go @@ -0,0 +1,78 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MACBindingTable = "MAC_Binding" + +// MACBinding defines an object in MAC_Binding table +type MACBinding struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + Timestamp int `ovsdb:"timestamp"` +} + +func (a *MACBinding) GetUUID() string { + return a.UUID +} + +func (a *MACBinding) GetDatapath() string { + return a.Datapath +} + +func (a *MACBinding) GetIP() string { + return a.IP +} + +func (a *MACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *MACBinding) GetMAC() string { + return a.MAC +} + +func (a *MACBinding) GetTimestamp() int { + return a.Timestamp +} + +func (a *MACBinding) DeepCopyInto(b *MACBinding) { + *b = *a +} + +func (a *MACBinding) DeepCopy() *MACBinding { + b := new(MACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *MACBinding) CloneModelInto(b model.Model) { + c := b.(*MACBinding) + a.DeepCopyInto(c) +} + +func (a *MACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MACBinding) Equals(b *MACBinding) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.Timestamp == b.Timestamp +} + +func (a *MACBinding) EqualsModel(b model.Model) bool { + c := b.(*MACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &MACBinding{} +var _ model.ComparableModel = &MACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go new file mode 100644 index 000000000..95c4daec2 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter.go @@ -0,0 +1,100 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterTable = "Meter" + +type ( + MeterUnit = string +) + +var ( + MeterUnitKbps MeterUnit = "kbps" + MeterUnitPktps MeterUnit = "pktps" +) + +// Meter defines an object in Meter table +type Meter struct { + UUID string `ovsdb:"_uuid"` + Bands []string `ovsdb:"bands"` + Name string `ovsdb:"name"` + Unit MeterUnit `ovsdb:"unit"` +} + +func (a *Meter) GetUUID() string { + return a.UUID +} + +func (a *Meter) GetBands() []string { + return a.Bands +} + +func copyMeterBands(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMeterBands(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *Meter) GetName() string { + return a.Name +} + +func (a *Meter) GetUnit() MeterUnit { + return a.Unit +} + +func (a *Meter) DeepCopyInto(b *Meter) { + *b = *a + b.Bands = copyMeterBands(a.Bands) +} + +func (a *Meter) DeepCopy() *Meter { + b := new(Meter) + a.DeepCopyInto(b) + return b +} + +func (a *Meter) CloneModelInto(b model.Model) { + c := b.(*Meter) + a.DeepCopyInto(c) +} + +func (a *Meter) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Meter) Equals(b *Meter) bool { + return a.UUID == b.UUID && + equalMeterBands(a.Bands, b.Bands) && + a.Name == b.Name && + a.Unit == b.Unit +} + +func (a *Meter) EqualsModel(b model.Model) bool { + c := b.(*Meter) + return a.Equals(c) +} + +var _ model.CloneableModel = &Meter{} +var _ model.ComparableModel = &Meter{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go new file mode 100644 index 000000000..addb01b64 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/meter_band.go @@ -0,0 +1,74 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MeterBandTable = "Meter_Band" + +type ( + MeterBandAction = string +) + +var ( + MeterBandActionDrop MeterBandAction = "drop" +) + +// MeterBand defines an object in Meter_Band table +type MeterBand struct { + UUID string `ovsdb:"_uuid"` + Action MeterBandAction `ovsdb:"action"` + BurstSize int `ovsdb:"burst_size"` + Rate int `ovsdb:"rate"` +} + +func (a *MeterBand) GetUUID() string { + return a.UUID +} + +func (a *MeterBand) GetAction() MeterBandAction { + return a.Action +} + +func (a *MeterBand) GetBurstSize() int { + return a.BurstSize +} + +func (a *MeterBand) GetRate() int { + return a.Rate +} + +func (a *MeterBand) DeepCopyInto(b *MeterBand) { + *b = *a +} + +func (a *MeterBand) DeepCopy() *MeterBand { + b := new(MeterBand) + a.DeepCopyInto(b) + return b +} + +func (a *MeterBand) CloneModelInto(b model.Model) { + c := b.(*MeterBand) + a.DeepCopyInto(c) +} + +func (a *MeterBand) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MeterBand) Equals(b *MeterBand) bool { + return a.UUID == b.UUID && + a.Action == b.Action && + a.BurstSize == b.BurstSize && + a.Rate == b.Rate +} + +func (a *MeterBand) EqualsModel(b model.Model) bool { + c := b.(*MeterBand) + return a.Equals(c) +} + +var _ model.CloneableModel = &MeterBand{} +var _ model.ComparableModel = &MeterBand{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go new file mode 100644 index 000000000..69444ea73 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/mirror.go @@ -0,0 +1,125 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MirrorTable = "Mirror" + +type ( + MirrorFilter = string + MirrorType = string +) + +var ( + MirrorFilterFromLport MirrorFilter = "from-lport" + MirrorFilterToLport MirrorFilter = "to-lport" + MirrorFilterBoth MirrorFilter = "both" + MirrorTypeGre MirrorType = "gre" + MirrorTypeErspan MirrorType = "erspan" + MirrorTypeLocal MirrorType = "local" +) + +// Mirror defines an object in Mirror table +type Mirror struct { + UUID string `ovsdb:"_uuid"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Filter MirrorFilter `ovsdb:"filter"` + Index int `ovsdb:"index"` + Name string `ovsdb:"name"` + Sink string `ovsdb:"sink"` + Type MirrorType `ovsdb:"type"` +} + +func (a *Mirror) GetUUID() string { + return a.UUID +} + +func (a *Mirror) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyMirrorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalMirrorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *Mirror) GetFilter() MirrorFilter { + return a.Filter +} + +func (a *Mirror) GetIndex() int { + return a.Index +} + +func (a *Mirror) GetName() string { + return a.Name +} + +func (a *Mirror) GetSink() string { + return a.Sink +} + +func (a *Mirror) GetType() MirrorType { + return a.Type +} + +func (a *Mirror) DeepCopyInto(b *Mirror) { + *b = *a + b.ExternalIDs = copyMirrorExternalIDs(a.ExternalIDs) +} + +func (a *Mirror) DeepCopy() *Mirror { + b := new(Mirror) + a.DeepCopyInto(b) + return b +} + +func (a *Mirror) CloneModelInto(b model.Model) { + c := b.(*Mirror) + a.DeepCopyInto(c) +} + +func (a *Mirror) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *Mirror) Equals(b *Mirror) bool { + return a.UUID == b.UUID && + equalMirrorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Filter == b.Filter && + a.Index == b.Index && + a.Name == b.Name && + a.Sink == b.Sink && + a.Type == b.Type +} + +func (a *Mirror) EqualsModel(b model.Model) bool { + c := b.(*Mirror) + return a.Equals(c) +} + +var _ model.CloneableModel = &Mirror{} +var _ model.ComparableModel = &Mirror{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go new file mode 100644 index 000000000..bc838fe49 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/model.go @@ -0,0 +1,1884 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import ( + "encoding/json" + + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" +) + +// FullDatabaseModel returns the DatabaseModel object to be used in libovsdb +func FullDatabaseModel() (model.ClientDBModel, error) { + return model.NewClientDBModel("OVN_Southbound", map[string]model.Model{ + "Address_Set": &AddressSet{}, + "BFD": &BFD{}, + "Chassis": &Chassis{}, + "Chassis_Private": &ChassisPrivate{}, + "Chassis_Template_Var": &ChassisTemplateVar{}, + "Connection": &Connection{}, + "Controller_Event": &ControllerEvent{}, + "DHCP_Options": &DHCPOptions{}, + "DHCPv6_Options": &DHCPv6Options{}, + "DNS": &DNS{}, + "Datapath_Binding": &DatapathBinding{}, + "Encap": &Encap{}, + "FDB": &FDB{}, + "Gateway_Chassis": &GatewayChassis{}, + "HA_Chassis": &HAChassis{}, + "HA_Chassis_Group": &HAChassisGroup{}, + "IGMP_Group": &IGMPGroup{}, + "IP_Multicast": &IPMulticast{}, + "Load_Balancer": &LoadBalancer{}, + "Logical_DP_Group": &LogicalDPGroup{}, + "Logical_Flow": &LogicalFlow{}, + "MAC_Binding": &MACBinding{}, + "Meter": &Meter{}, + "Meter_Band": &MeterBand{}, + "Mirror": &Mirror{}, + "Multicast_Group": &MulticastGroup{}, + "Port_Binding": &PortBinding{}, + "Port_Group": &PortGroup{}, + "RBAC_Permission": &RBACPermission{}, + "RBAC_Role": &RBACRole{}, + "SB_Global": &SBGlobal{}, + "SSL": &SSL{}, + "Service_Monitor": &ServiceMonitor{}, + "Static_MAC_Binding": &StaticMACBinding{}, + }) +} + +var schema = `{ + "name": "OVN_Southbound", + "version": "20.37.0", + "tables": { + "Address_Set": { + "columns": { + "addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "BFD": { + "columns": { + "chassis_name": { + "type": "string" + }, + "detect_mult": { + "type": "integer" + }, + "disc": { + "type": "integer" + }, + "dst_ip": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "logical_port": { + "type": "string" + }, + "min_rx": { + "type": "integer" + }, + "min_tx": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "src_port": { + "type": { + "key": { + "type": "integer", + "minInteger": 49152, + "maxInteger": 65535 + } + } + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "down", + "init", + "up", + "admin_down" + ] + ] + } + } + } + }, + "indexes": [ + [ + "logical_port", + "dst_ip", + "src_port", + "disc" + ] + ], + "isRoot": true + }, + "Chassis": { + "columns": { + "encaps": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "hostname": { + "type": "string" + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "transport_zones": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "vtep_logical_switches": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Chassis_Private": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "nb_cfg": { + "type": "integer" + }, + "nb_cfg_timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Chassis_Template_Var": { + "columns": { + "chassis": { + "type": "string" + }, + "variables": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "chassis" + ] + ], + "isRoot": true + }, + "Connection": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "inactivity_probe": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "is_connected": { + "type": "boolean", + "ephemeral": true + }, + "max_backoff": { + "type": { + "key": { + "type": "integer", + "minInteger": 1000 + }, + "min": 0, + "max": 1 + } + }, + "other_config": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "read_only": { + "type": "boolean" + }, + "role": { + "type": "string" + }, + "status": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + }, + "ephemeral": true + }, + "target": { + "type": "string" + } + }, + "indexes": [ + [ + "target" + ] + ] + }, + "Controller_Event": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "event_info": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "event_type": { + "type": { + "key": { + "type": "string", + "enum": "empty_lb_backends" + } + } + }, + "seq_num": { + "type": "integer" + } + }, + "isRoot": true + }, + "DHCP_Options": { + "columns": { + "code": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 254 + } + } + }, + "name": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "bool", + "uint8", + "uint16", + "uint32", + "ipv4", + "static_routes", + "str", + "host_id", + "domains" + ] + ] + } + } + } + }, + "isRoot": true + }, + "DHCPv6_Options": { + "columns": { + "code": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 254 + } + } + }, + "name": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "ipv6", + "str", + "mac", + "domain" + ] + ] + } + } + } + }, + "isRoot": true + }, + "DNS": { + "columns": { + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 1, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "records": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Datapath_Binding": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "load_balancers": { + "type": { + "key": { + "type": "uuid" + }, + "min": 0, + "max": "unlimited" + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + } + }, + "indexes": [ + [ + "tunnel_key" + ] + ], + "isRoot": true + }, + "Encap": { + "columns": { + "chassis_name": { + "type": "string" + }, + "ip": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "geneve", + "stt", + "vxlan" + ] + ] + } + } + } + }, + "indexes": [ + [ + "type", + "ip" + ] + ] + }, + "FDB": { + "columns": { + "dp_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + }, + "mac": { + "type": "string" + }, + "port_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 16777215 + } + } + }, + "timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "mac", + "dp_key" + ] + ], + "isRoot": true + }, + "Gateway_Chassis": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "indexes": [ + [ + "name" + ] + ] + }, + "HA_Chassis": { + "columns": { + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + } + }, + "HA_Chassis_Group": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "ref_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "IGMP_Group": { + "columns": { + "address": { + "type": "string" + }, + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "chassis_name": { + "type": "string" + }, + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": "string" + } + }, + "indexes": [ + [ + "address", + "datapath", + "chassis" + ] + ], + "isRoot": true + }, + "IP_Multicast": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + } + } + }, + "enabled": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "eth_src": { + "type": "string" + }, + "idle_timeout": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "ip4_src": { + "type": "string" + }, + "ip6_src": { + "type": "string" + }, + "querier": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "query_interval": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "query_max_resp": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + }, + "seq_no": { + "type": "integer" + }, + "table_size": { + "type": { + "key": { + "type": "integer" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath" + ] + ], + "isRoot": true + }, + "Load_Balancer": { + "columns": { + "datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "lr_datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "ls_datapath_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "name": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp", + "sctp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "vips": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "Logical_DP_Group": { + "columns": { + "datapaths": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + } + }, + "Logical_Flow": { + "columns": { + "actions": { + "type": "string" + }, + "controller_meter": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "flow_desc": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "logical_datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + }, + "min": 0, + "max": 1 + } + }, + "logical_dp_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "Logical_DP_Group" + }, + "min": 0, + "max": 1 + } + }, + "match": { + "type": "string" + }, + "pipeline": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "ingress", + "egress" + ] + ] + } + } + }, + "priority": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "table_id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32 + } + } + }, + "tags": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "MAC_Binding": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + }, + "Meter": { + "columns": { + "bands": { + "type": { + "key": { + "type": "uuid", + "refTable": "Meter_Band", + "refType": "strong" + }, + "min": 1, + "max": "unlimited" + } + }, + "name": { + "type": "string" + }, + "unit": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "kbps", + "pktps" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Meter_Band": { + "columns": { + "action": { + "type": { + "key": { + "type": "string", + "enum": "drop" + } + } + }, + "burst_size": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 4294967295 + } + } + }, + "rate": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4294967295 + } + } + } + } + }, + "Mirror": { + "columns": { + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "filter": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "from-lport", + "to-lport", + "both" + ] + ] + } + } + }, + "index": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "sink": { + "type": "string" + }, + "type": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "gre", + "erspan", + "local" + ] + ] + } + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "Multicast_Group": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 32768, + "maxInteger": 65535 + } + } + } + }, + "indexes": [ + [ + "datapath", + "tunnel_key" + ], + [ + "datapath", + "name" + ] + ], + "isRoot": true + }, + "Port_Binding": { + "columns": { + "additional_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "additional_encap": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "encap": { + "type": { + "key": { + "type": "uuid", + "refTable": "Encap", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "gateway_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Gateway_Chassis", + "refType": "strong" + }, + "min": 0, + "max": "unlimited" + } + }, + "ha_chassis_group": { + "type": { + "key": { + "type": "uuid", + "refTable": "HA_Chassis_Group", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mirror_rules": { + "type": { + "key": { + "type": "uuid", + "refTable": "Mirror", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "nat_addresses": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "parent_port": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, + "port_security": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "requested_additional_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + }, + "requested_chassis": { + "type": { + "key": { + "type": "uuid", + "refTable": "Chassis", + "refType": "weak" + }, + "min": 0, + "max": 1 + } + }, + "tag": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 4095 + }, + "min": 0, + "max": 1 + } + }, + "tunnel_key": { + "type": { + "key": { + "type": "integer", + "minInteger": 1, + "maxInteger": 32767 + } + } + }, + "type": { + "type": "string" + }, + "up": { + "type": { + "key": { + "type": "boolean" + }, + "min": 0, + "max": 1 + } + }, + "virtual_parent": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath", + "tunnel_key" + ], + [ + "logical_port" + ] + ], + "isRoot": true + }, + "Port_Group": { + "columns": { + "name": { + "type": "string" + }, + "ports": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "indexes": [ + [ + "name" + ] + ], + "isRoot": true + }, + "RBAC_Permission": { + "columns": { + "authorization": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "insert_delete": { + "type": "boolean" + }, + "table": { + "type": "string" + }, + "update": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "RBAC_Role": { + "columns": { + "name": { + "type": "string" + }, + "permissions": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "uuid", + "refTable": "RBAC_Permission", + "refType": "weak" + }, + "min": 0, + "max": "unlimited" + } + } + }, + "isRoot": true + }, + "SB_Global": { + "columns": { + "connections": { + "type": { + "key": { + "type": "uuid", + "refTable": "Connection" + }, + "min": 0, + "max": "unlimited" + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ipsec": { + "type": "boolean" + }, + "nb_cfg": { + "type": "integer" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ssl": { + "type": { + "key": { + "type": "uuid", + "refTable": "SSL" + }, + "min": 0, + "max": 1 + } + } + }, + "isRoot": true + }, + "SSL": { + "columns": { + "bootstrap_ca_cert": { + "type": "boolean" + }, + "ca_cert": { + "type": "string" + }, + "certificate": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "private_key": { + "type": "string" + }, + "ssl_ciphers": { + "type": "string" + }, + "ssl_protocols": { + "type": "string" + } + } + }, + "Service_Monitor": { + "columns": { + "chassis_name": { + "type": "string" + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "options": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "port": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 65535 + } + } + }, + "protocol": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "tcp", + "udp" + ] + ] + }, + "min": 0, + "max": 1 + } + }, + "src_ip": { + "type": "string" + }, + "src_mac": { + "type": "string" + }, + "status": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "online", + "offline", + "error" + ] + ] + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "logical_port", + "ip", + "port", + "protocol" + ] + ], + "isRoot": true + }, + "Static_MAC_Binding": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding" + } + } + }, + "ip": { + "type": "string" + }, + "logical_port": { + "type": "string" + }, + "mac": { + "type": "string" + }, + "override_dynamic_mac": { + "type": "boolean" + } + }, + "indexes": [ + [ + "logical_port", + "ip" + ] + ], + "isRoot": true + } + } +}` + +func Schema() ovsdb.DatabaseSchema { + var s ovsdb.DatabaseSchema + err := json.Unmarshal([]byte(schema), &s) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go new file mode 100644 index 000000000..1af933ea6 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/multicast_group.go @@ -0,0 +1,97 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const MulticastGroupTable = "Multicast_Group" + +// MulticastGroup defines an object in Multicast_Group table +type MulticastGroup struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` + TunnelKey int `ovsdb:"tunnel_key"` +} + +func (a *MulticastGroup) GetUUID() string { + return a.UUID +} + +func (a *MulticastGroup) GetDatapath() string { + return a.Datapath +} + +func (a *MulticastGroup) GetName() string { + return a.Name +} + +func (a *MulticastGroup) GetPorts() []string { + return a.Ports +} + +func copyMulticastGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalMulticastGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *MulticastGroup) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *MulticastGroup) DeepCopyInto(b *MulticastGroup) { + *b = *a + b.Ports = copyMulticastGroupPorts(a.Ports) +} + +func (a *MulticastGroup) DeepCopy() *MulticastGroup { + b := new(MulticastGroup) + a.DeepCopyInto(b) + return b +} + +func (a *MulticastGroup) CloneModelInto(b model.Model) { + c := b.(*MulticastGroup) + a.DeepCopyInto(c) +} + +func (a *MulticastGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *MulticastGroup) Equals(b *MulticastGroup) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.Name == b.Name && + equalMulticastGroupPorts(a.Ports, b.Ports) && + a.TunnelKey == b.TunnelKey +} + +func (a *MulticastGroup) EqualsModel(b model.Model) bool { + c := b.(*MulticastGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &MulticastGroup{} +var _ model.ComparableModel = &MulticastGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go new file mode 100644 index 000000000..b3d30f843 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_binding.go @@ -0,0 +1,586 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortBindingTable = "Port_Binding" + +// PortBinding defines an object in Port_Binding table +type PortBinding struct { + UUID string `ovsdb:"_uuid"` + AdditionalChassis []string `ovsdb:"additional_chassis"` + AdditionalEncap []string `ovsdb:"additional_encap"` + Chassis *string `ovsdb:"chassis"` + Datapath string `ovsdb:"datapath"` + Encap *string `ovsdb:"encap"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + GatewayChassis []string `ovsdb:"gateway_chassis"` + HaChassisGroup *string `ovsdb:"ha_chassis_group"` + LogicalPort string `ovsdb:"logical_port"` + MAC []string `ovsdb:"mac"` + MirrorRules []string `ovsdb:"mirror_rules"` + NatAddresses []string `ovsdb:"nat_addresses"` + Options map[string]string `ovsdb:"options"` + ParentPort *string `ovsdb:"parent_port"` + PortSecurity []string `ovsdb:"port_security"` + RequestedAdditionalChassis []string `ovsdb:"requested_additional_chassis"` + RequestedChassis *string `ovsdb:"requested_chassis"` + Tag *int `ovsdb:"tag"` + TunnelKey int `ovsdb:"tunnel_key"` + Type string `ovsdb:"type"` + Up *bool `ovsdb:"up"` + VirtualParent *string `ovsdb:"virtual_parent"` +} + +func (a *PortBinding) GetUUID() string { + return a.UUID +} + +func (a *PortBinding) GetAdditionalChassis() []string { + return a.AdditionalChassis +} + +func copyPortBindingAdditionalChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingAdditionalChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetAdditionalEncap() []string { + return a.AdditionalEncap +} + +func copyPortBindingAdditionalEncap(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingAdditionalEncap(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetChassis() *string { + return a.Chassis +} + +func copyPortBindingChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetDatapath() string { + return a.Datapath +} + +func (a *PortBinding) GetEncap() *string { + return a.Encap +} + +func copyPortBindingEncap(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingEncap(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyPortBindingExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortBindingExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortBinding) GetGatewayChassis() []string { + return a.GatewayChassis +} + +func copyPortBindingGatewayChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingGatewayChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetHaChassisGroup() *string { + return a.HaChassisGroup +} + +func copyPortBindingHaChassisGroup(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingHaChassisGroup(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *PortBinding) GetMAC() []string { + return a.MAC +} + +func copyPortBindingMAC(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingMAC(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetMirrorRules() []string { + return a.MirrorRules +} + +func copyPortBindingMirrorRules(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingMirrorRules(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetNatAddresses() []string { + return a.NatAddresses +} + +func copyPortBindingNatAddresses(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingNatAddresses(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetOptions() map[string]string { + return a.Options +} + +func copyPortBindingOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalPortBindingOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *PortBinding) GetParentPort() *string { + return a.ParentPort +} + +func copyPortBindingParentPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingParentPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetPortSecurity() []string { + return a.PortSecurity +} + +func copyPortBindingPortSecurity(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingPortSecurity(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetRequestedAdditionalChassis() []string { + return a.RequestedAdditionalChassis +} + +func copyPortBindingRequestedAdditionalChassis(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortBindingRequestedAdditionalChassis(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortBinding) GetRequestedChassis() *string { + return a.RequestedChassis +} + +func copyPortBindingRequestedChassis(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingRequestedChassis(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetTag() *int { + return a.Tag +} + +func copyPortBindingTag(a *int) *int { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingTag(a, b *int) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetTunnelKey() int { + return a.TunnelKey +} + +func (a *PortBinding) GetType() string { + return a.Type +} + +func (a *PortBinding) GetUp() *bool { + return a.Up +} + +func copyPortBindingUp(a *bool) *bool { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingUp(a, b *bool) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) GetVirtualParent() *string { + return a.VirtualParent +} + +func copyPortBindingVirtualParent(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalPortBindingVirtualParent(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *PortBinding) DeepCopyInto(b *PortBinding) { + *b = *a + b.AdditionalChassis = copyPortBindingAdditionalChassis(a.AdditionalChassis) + b.AdditionalEncap = copyPortBindingAdditionalEncap(a.AdditionalEncap) + b.Chassis = copyPortBindingChassis(a.Chassis) + b.Encap = copyPortBindingEncap(a.Encap) + b.ExternalIDs = copyPortBindingExternalIDs(a.ExternalIDs) + b.GatewayChassis = copyPortBindingGatewayChassis(a.GatewayChassis) + b.HaChassisGroup = copyPortBindingHaChassisGroup(a.HaChassisGroup) + b.MAC = copyPortBindingMAC(a.MAC) + b.MirrorRules = copyPortBindingMirrorRules(a.MirrorRules) + b.NatAddresses = copyPortBindingNatAddresses(a.NatAddresses) + b.Options = copyPortBindingOptions(a.Options) + b.ParentPort = copyPortBindingParentPort(a.ParentPort) + b.PortSecurity = copyPortBindingPortSecurity(a.PortSecurity) + b.RequestedAdditionalChassis = copyPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis) + b.RequestedChassis = copyPortBindingRequestedChassis(a.RequestedChassis) + b.Tag = copyPortBindingTag(a.Tag) + b.Up = copyPortBindingUp(a.Up) + b.VirtualParent = copyPortBindingVirtualParent(a.VirtualParent) +} + +func (a *PortBinding) DeepCopy() *PortBinding { + b := new(PortBinding) + a.DeepCopyInto(b) + return b +} + +func (a *PortBinding) CloneModelInto(b model.Model) { + c := b.(*PortBinding) + a.DeepCopyInto(c) +} + +func (a *PortBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortBinding) Equals(b *PortBinding) bool { + return a.UUID == b.UUID && + equalPortBindingAdditionalChassis(a.AdditionalChassis, b.AdditionalChassis) && + equalPortBindingAdditionalEncap(a.AdditionalEncap, b.AdditionalEncap) && + equalPortBindingChassis(a.Chassis, b.Chassis) && + a.Datapath == b.Datapath && + equalPortBindingEncap(a.Encap, b.Encap) && + equalPortBindingExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalPortBindingGatewayChassis(a.GatewayChassis, b.GatewayChassis) && + equalPortBindingHaChassisGroup(a.HaChassisGroup, b.HaChassisGroup) && + a.LogicalPort == b.LogicalPort && + equalPortBindingMAC(a.MAC, b.MAC) && + equalPortBindingMirrorRules(a.MirrorRules, b.MirrorRules) && + equalPortBindingNatAddresses(a.NatAddresses, b.NatAddresses) && + equalPortBindingOptions(a.Options, b.Options) && + equalPortBindingParentPort(a.ParentPort, b.ParentPort) && + equalPortBindingPortSecurity(a.PortSecurity, b.PortSecurity) && + equalPortBindingRequestedAdditionalChassis(a.RequestedAdditionalChassis, b.RequestedAdditionalChassis) && + equalPortBindingRequestedChassis(a.RequestedChassis, b.RequestedChassis) && + equalPortBindingTag(a.Tag, b.Tag) && + a.TunnelKey == b.TunnelKey && + a.Type == b.Type && + equalPortBindingUp(a.Up, b.Up) && + equalPortBindingVirtualParent(a.VirtualParent, b.VirtualParent) +} + +func (a *PortBinding) EqualsModel(b model.Model) bool { + c := b.(*PortBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortBinding{} +var _ model.ComparableModel = &PortBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go new file mode 100644 index 000000000..358e26b33 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/port_group.go @@ -0,0 +1,85 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const PortGroupTable = "Port_Group" + +// PortGroup defines an object in Port_Group table +type PortGroup struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Ports []string `ovsdb:"ports"` +} + +func (a *PortGroup) GetUUID() string { + return a.UUID +} + +func (a *PortGroup) GetName() string { + return a.Name +} + +func (a *PortGroup) GetPorts() []string { + return a.Ports +} + +func copyPortGroupPorts(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalPortGroupPorts(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *PortGroup) DeepCopyInto(b *PortGroup) { + *b = *a + b.Ports = copyPortGroupPorts(a.Ports) +} + +func (a *PortGroup) DeepCopy() *PortGroup { + b := new(PortGroup) + a.DeepCopyInto(b) + return b +} + +func (a *PortGroup) CloneModelInto(b model.Model) { + c := b.(*PortGroup) + a.DeepCopyInto(c) +} + +func (a *PortGroup) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *PortGroup) Equals(b *PortGroup) bool { + return a.UUID == b.UUID && + a.Name == b.Name && + equalPortGroupPorts(a.Ports, b.Ports) +} + +func (a *PortGroup) EqualsModel(b model.Model) bool { + c := b.(*PortGroup) + return a.Equals(c) +} + +var _ model.CloneableModel = &PortGroup{} +var _ model.ComparableModel = &PortGroup{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go new file mode 100644 index 000000000..9d760527e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_permission.go @@ -0,0 +1,122 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const RBACPermissionTable = "RBAC_Permission" + +// RBACPermission defines an object in RBAC_Permission table +type RBACPermission struct { + UUID string `ovsdb:"_uuid"` + Authorization []string `ovsdb:"authorization"` + InsertDelete bool `ovsdb:"insert_delete"` + Table string `ovsdb:"table"` + Update []string `ovsdb:"update"` +} + +func (a *RBACPermission) GetUUID() string { + return a.UUID +} + +func (a *RBACPermission) GetAuthorization() []string { + return a.Authorization +} + +func copyRBACPermissionAuthorization(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalRBACPermissionAuthorization(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *RBACPermission) GetInsertDelete() bool { + return a.InsertDelete +} + +func (a *RBACPermission) GetTable() string { + return a.Table +} + +func (a *RBACPermission) GetUpdate() []string { + return a.Update +} + +func copyRBACPermissionUpdate(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalRBACPermissionUpdate(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *RBACPermission) DeepCopyInto(b *RBACPermission) { + *b = *a + b.Authorization = copyRBACPermissionAuthorization(a.Authorization) + b.Update = copyRBACPermissionUpdate(a.Update) +} + +func (a *RBACPermission) DeepCopy() *RBACPermission { + b := new(RBACPermission) + a.DeepCopyInto(b) + return b +} + +func (a *RBACPermission) CloneModelInto(b model.Model) { + c := b.(*RBACPermission) + a.DeepCopyInto(c) +} + +func (a *RBACPermission) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *RBACPermission) Equals(b *RBACPermission) bool { + return a.UUID == b.UUID && + equalRBACPermissionAuthorization(a.Authorization, b.Authorization) && + a.InsertDelete == b.InsertDelete && + a.Table == b.Table && + equalRBACPermissionUpdate(a.Update, b.Update) +} + +func (a *RBACPermission) EqualsModel(b model.Model) bool { + c := b.(*RBACPermission) + return a.Equals(c) +} + +var _ model.CloneableModel = &RBACPermission{} +var _ model.ComparableModel = &RBACPermission{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go new file mode 100644 index 000000000..ce8798645 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/rbac_role.go @@ -0,0 +1,87 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const RBACRoleTable = "RBAC_Role" + +// RBACRole defines an object in RBAC_Role table +type RBACRole struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Permissions map[string]string `ovsdb:"permissions"` +} + +func (a *RBACRole) GetUUID() string { + return a.UUID +} + +func (a *RBACRole) GetName() string { + return a.Name +} + +func (a *RBACRole) GetPermissions() map[string]string { + return a.Permissions +} + +func copyRBACRolePermissions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalRBACRolePermissions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *RBACRole) DeepCopyInto(b *RBACRole) { + *b = *a + b.Permissions = copyRBACRolePermissions(a.Permissions) +} + +func (a *RBACRole) DeepCopy() *RBACRole { + b := new(RBACRole) + a.DeepCopyInto(b) + return b +} + +func (a *RBACRole) CloneModelInto(b model.Model) { + c := b.(*RBACRole) + a.DeepCopyInto(c) +} + +func (a *RBACRole) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *RBACRole) Equals(b *RBACRole) bool { + return a.UUID == b.UUID && + a.Name == b.Name && + equalRBACRolePermissions(a.Permissions, b.Permissions) +} + +func (a *RBACRole) EqualsModel(b model.Model) bool { + c := b.(*RBACRole) + return a.Equals(c) +} + +var _ model.CloneableModel = &RBACRole{} +var _ model.ComparableModel = &RBACRole{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go new file mode 100644 index 000000000..2374478db --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/sb_global.go @@ -0,0 +1,182 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const SBGlobalTable = "SB_Global" + +// SBGlobal defines an object in SB_Global table +type SBGlobal struct { + UUID string `ovsdb:"_uuid"` + Connections []string `ovsdb:"connections"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + Ipsec bool `ovsdb:"ipsec"` + NbCfg int `ovsdb:"nb_cfg"` + Options map[string]string `ovsdb:"options"` + SSL *string `ovsdb:"ssl"` +} + +func (a *SBGlobal) GetUUID() string { + return a.UUID +} + +func (a *SBGlobal) GetConnections() []string { + return a.Connections +} + +func copySBGlobalConnections(a []string) []string { + if a == nil { + return nil + } + b := make([]string, len(a)) + copy(b, a) + return b +} + +func equalSBGlobalConnections(a, b []string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + +func (a *SBGlobal) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySBGlobalExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSBGlobalExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SBGlobal) GetIpsec() bool { + return a.Ipsec +} + +func (a *SBGlobal) GetNbCfg() int { + return a.NbCfg +} + +func (a *SBGlobal) GetOptions() map[string]string { + return a.Options +} + +func copySBGlobalOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSBGlobalOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SBGlobal) GetSSL() *string { + return a.SSL +} + +func copySBGlobalSSL(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalSBGlobalSSL(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *SBGlobal) DeepCopyInto(b *SBGlobal) { + *b = *a + b.Connections = copySBGlobalConnections(a.Connections) + b.ExternalIDs = copySBGlobalExternalIDs(a.ExternalIDs) + b.Options = copySBGlobalOptions(a.Options) + b.SSL = copySBGlobalSSL(a.SSL) +} + +func (a *SBGlobal) DeepCopy() *SBGlobal { + b := new(SBGlobal) + a.DeepCopyInto(b) + return b +} + +func (a *SBGlobal) CloneModelInto(b model.Model) { + c := b.(*SBGlobal) + a.DeepCopyInto(c) +} + +func (a *SBGlobal) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SBGlobal) Equals(b *SBGlobal) bool { + return a.UUID == b.UUID && + equalSBGlobalConnections(a.Connections, b.Connections) && + equalSBGlobalExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.Ipsec == b.Ipsec && + a.NbCfg == b.NbCfg && + equalSBGlobalOptions(a.Options, b.Options) && + equalSBGlobalSSL(a.SSL, b.SSL) +} + +func (a *SBGlobal) EqualsModel(b model.Model) bool { + c := b.(*SBGlobal) + return a.Equals(c) +} + +var _ model.CloneableModel = &SBGlobal{} +var _ model.ComparableModel = &SBGlobal{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go new file mode 100644 index 000000000..d3e118868 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/service_monitor.go @@ -0,0 +1,213 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const ServiceMonitorTable = "Service_Monitor" + +type ( + ServiceMonitorProtocol = string + ServiceMonitorStatus = string +) + +var ( + ServiceMonitorProtocolTCP ServiceMonitorProtocol = "tcp" + ServiceMonitorProtocolUDP ServiceMonitorProtocol = "udp" + ServiceMonitorStatusOnline ServiceMonitorStatus = "online" + ServiceMonitorStatusOffline ServiceMonitorStatus = "offline" + ServiceMonitorStatusError ServiceMonitorStatus = "error" +) + +// ServiceMonitor defines an object in Service_Monitor table +type ServiceMonitor struct { + UUID string `ovsdb:"_uuid"` + ChassisName string `ovsdb:"chassis_name"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + Options map[string]string `ovsdb:"options"` + Port int `ovsdb:"port"` + Protocol *ServiceMonitorProtocol `ovsdb:"protocol"` + SrcIP string `ovsdb:"src_ip"` + SrcMAC string `ovsdb:"src_mac"` + Status *ServiceMonitorStatus `ovsdb:"status"` +} + +func (a *ServiceMonitor) GetUUID() string { + return a.UUID +} + +func (a *ServiceMonitor) GetChassisName() string { + return a.ChassisName +} + +func (a *ServiceMonitor) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyServiceMonitorExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalServiceMonitorExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ServiceMonitor) GetIP() string { + return a.IP +} + +func (a *ServiceMonitor) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *ServiceMonitor) GetOptions() map[string]string { + return a.Options +} + +func copyServiceMonitorOptions(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalServiceMonitorOptions(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ServiceMonitor) GetPort() int { + return a.Port +} + +func (a *ServiceMonitor) GetProtocol() *ServiceMonitorProtocol { + return a.Protocol +} + +func copyServiceMonitorProtocol(a *ServiceMonitorProtocol) *ServiceMonitorProtocol { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalServiceMonitorProtocol(a, b *ServiceMonitorProtocol) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ServiceMonitor) GetSrcIP() string { + return a.SrcIP +} + +func (a *ServiceMonitor) GetSrcMAC() string { + return a.SrcMAC +} + +func (a *ServiceMonitor) GetStatus() *ServiceMonitorStatus { + return a.Status +} + +func copyServiceMonitorStatus(a *ServiceMonitorStatus) *ServiceMonitorStatus { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalServiceMonitorStatus(a, b *ServiceMonitorStatus) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *ServiceMonitor) DeepCopyInto(b *ServiceMonitor) { + *b = *a + b.ExternalIDs = copyServiceMonitorExternalIDs(a.ExternalIDs) + b.Options = copyServiceMonitorOptions(a.Options) + b.Protocol = copyServiceMonitorProtocol(a.Protocol) + b.Status = copyServiceMonitorStatus(a.Status) +} + +func (a *ServiceMonitor) DeepCopy() *ServiceMonitor { + b := new(ServiceMonitor) + a.DeepCopyInto(b) + return b +} + +func (a *ServiceMonitor) CloneModelInto(b model.Model) { + c := b.(*ServiceMonitor) + a.DeepCopyInto(c) +} + +func (a *ServiceMonitor) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ServiceMonitor) Equals(b *ServiceMonitor) bool { + return a.UUID == b.UUID && + a.ChassisName == b.ChassisName && + equalServiceMonitorExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + equalServiceMonitorOptions(a.Options, b.Options) && + a.Port == b.Port && + equalServiceMonitorProtocol(a.Protocol, b.Protocol) && + a.SrcIP == b.SrcIP && + a.SrcMAC == b.SrcMAC && + equalServiceMonitorStatus(a.Status, b.Status) +} + +func (a *ServiceMonitor) EqualsModel(b model.Model) bool { + c := b.(*ServiceMonitor) + return a.Equals(c) +} + +var _ model.CloneableModel = &ServiceMonitor{} +var _ model.ComparableModel = &ServiceMonitor{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go new file mode 100644 index 000000000..3fab5fd1e --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/ssl.go @@ -0,0 +1,117 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const SSLTable = "SSL" + +// SSL defines an object in SSL table +type SSL struct { + UUID string `ovsdb:"_uuid"` + BootstrapCaCert bool `ovsdb:"bootstrap_ca_cert"` + CaCert string `ovsdb:"ca_cert"` + Certificate string `ovsdb:"certificate"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + PrivateKey string `ovsdb:"private_key"` + SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLProtocols string `ovsdb:"ssl_protocols"` +} + +func (a *SSL) GetUUID() string { + return a.UUID +} + +func (a *SSL) GetBootstrapCaCert() bool { + return a.BootstrapCaCert +} + +func (a *SSL) GetCaCert() string { + return a.CaCert +} + +func (a *SSL) GetCertificate() string { + return a.Certificate +} + +func (a *SSL) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copySSLExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalSSLExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *SSL) GetPrivateKey() string { + return a.PrivateKey +} + +func (a *SSL) GetSSLCiphers() string { + return a.SSLCiphers +} + +func (a *SSL) GetSSLProtocols() string { + return a.SSLProtocols +} + +func (a *SSL) DeepCopyInto(b *SSL) { + *b = *a + b.ExternalIDs = copySSLExternalIDs(a.ExternalIDs) +} + +func (a *SSL) DeepCopy() *SSL { + b := new(SSL) + a.DeepCopyInto(b) + return b +} + +func (a *SSL) CloneModelInto(b model.Model) { + c := b.(*SSL) + a.DeepCopyInto(c) +} + +func (a *SSL) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *SSL) Equals(b *SSL) bool { + return a.UUID == b.UUID && + a.BootstrapCaCert == b.BootstrapCaCert && + a.CaCert == b.CaCert && + a.Certificate == b.Certificate && + equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.PrivateKey == b.PrivateKey && + a.SSLCiphers == b.SSLCiphers && + a.SSLProtocols == b.SSLProtocols +} + +func (a *SSL) EqualsModel(b model.Model) bool { + c := b.(*SSL) + return a.Equals(c) +} + +var _ model.CloneableModel = &SSL{} +var _ model.ComparableModel = &SSL{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go new file mode 100644 index 000000000..370968f60 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb/static_mac_binding.go @@ -0,0 +1,78 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-org/libovsdb/model" + +const StaticMACBindingTable = "Static_MAC_Binding" + +// StaticMACBinding defines an object in Static_MAC_Binding table +type StaticMACBinding struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + IP string `ovsdb:"ip"` + LogicalPort string `ovsdb:"logical_port"` + MAC string `ovsdb:"mac"` + OverrideDynamicMAC bool `ovsdb:"override_dynamic_mac"` +} + +func (a *StaticMACBinding) GetUUID() string { + return a.UUID +} + +func (a *StaticMACBinding) GetDatapath() string { + return a.Datapath +} + +func (a *StaticMACBinding) GetIP() string { + return a.IP +} + +func (a *StaticMACBinding) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *StaticMACBinding) GetMAC() string { + return a.MAC +} + +func (a *StaticMACBinding) GetOverrideDynamicMAC() bool { + return a.OverrideDynamicMAC +} + +func (a *StaticMACBinding) DeepCopyInto(b *StaticMACBinding) { + *b = *a +} + +func (a *StaticMACBinding) DeepCopy() *StaticMACBinding { + b := new(StaticMACBinding) + a.DeepCopyInto(b) + return b +} + +func (a *StaticMACBinding) CloneModelInto(b model.Model) { + c := b.(*StaticMACBinding) + a.DeepCopyInto(c) +} + +func (a *StaticMACBinding) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *StaticMACBinding) Equals(b *StaticMACBinding) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + a.IP == b.IP && + a.LogicalPort == b.LogicalPort && + a.MAC == b.MAC && + a.OverrideDynamicMAC == b.OverrideDynamicMAC +} + +func (a *StaticMACBinding) EqualsModel(b model.Model) bool { + c := b.(*StaticMACBinding) + return a.Equals(c) +} + +var _ model.CloneableModel = &StaticMACBinding{} +var _ model.ComparableModel = &StaticMACBinding{} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go new file mode 100644 index 000000000..d014dc3a3 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/const.go @@ -0,0 +1,250 @@ +package types + +import "time" + +const ( + // Default network name + DefaultNetworkName = "default" + K8sPrefix = "k8s-" + HybridOverlayPrefix = "int-" + HybridOverlayGRSubfix = "-gr" + + // K8sMgmtIntfNamePrefix name to be used as an OVS internal port on the node as prefix for networs + K8sMgmtIntfNamePrefix = "ovn-k8s-mp" + + // UDNVRFDeviceSuffix vrf device suffix associated with every user defined primary network. + UDNVRFDeviceSuffix = "-udn-vrf" + // UDNVRFDevicePrefix vrf device prefix associated with every user + UDNVRFDevicePrefix = "mp" + + // K8sMgmtIntfName name to be used as an OVS internal port on the node + K8sMgmtIntfName = K8sMgmtIntfNamePrefix + "0" + + // PhysicalNetworkName is the name that maps to an OVS bridge that provides + // access to physical/external network + PhysicalNetworkName = "physnet" + PhysicalNetworkExGwName = "exgwphysnet" + + // LocalNetworkName is the name that maps to an OVS bridge that provides + // access to local service + LocalNetworkName = "locnet" + + // Local Bridge used for DGP access + LocalBridgeName = "br-local" + LocalnetGatewayNextHopPort = "ovn-k8s-gw0" + + // OVS Bridge Datapath types + DatapathUserspace = "netdev" + + // types.OVNClusterRouter is the name of the distributed router + OVNClusterRouter = "ovn_cluster_router" + OVNJoinSwitch = "join" + + JoinSwitchPrefix = "join_" + ExternalSwitchPrefix = "ext_" + GWRouterPrefix = "GR_" + GWRouterLocalLBPostfix = "_local" + RouterToSwitchPrefix = "rtos-" + InterPrefix = "inter-" + HybridSubnetPrefix = "hybrid-subnet-" + SwitchToRouterPrefix = "stor-" + JoinSwitchToGWRouterPrefix = "jtor-" + GWRouterToJoinSwitchPrefix = "rtoj-" + DistRouterToJoinSwitchPrefix = "dtoj-" + JoinSwitchToDistRouterPrefix = "jtod-" + EXTSwitchToGWRouterPrefix = "etor-" + GWRouterToExtSwitchPrefix = "rtoe-" + EgressGWSwitchPrefix = "exgw-" + PatchPortPrefix = "patch-" + PatchPortSuffix = "-to-br-int" + + NodeLocalSwitch = "node_local_switch" + + // types.OVNLayer2Switch is the name of layer2 topology switch + OVNLayer2Switch = "ovn_layer2_switch" + // types.OVNLocalnetSwitch is the name of localnet topology switch + OVNLocalnetSwitch = "ovn_localnet_switch" + // types.OVNLocalnetPort is the name of localnet topology localnet port + OVNLocalnetPort = "ovn_localnet_port" + + TransitSwitch = "transit_switch" + TransitSwitchToRouterPrefix = "tstor-" + RouterToTransitSwitchPrefix = "rtots-" + + // ACL Default Tier Priorities + + // Default routed multicast allow acl rule priority + DefaultRoutedMcastAllowPriority = 1013 + // Default multicast allow acl rule priority + DefaultMcastAllowPriority = 1012 + // Default multicast deny acl rule priority + DefaultMcastDenyPriority = 1011 + // Default allow acl rule priority + DefaultAllowPriority = 1001 + // Default deny acl rule priority + DefaultDenyPriority = 1000 + + // ACL PlaceHolderACL Tier Priorities + PrimaryUDNAllowPriority = 1001 + // Default deny acl rule priority + PrimaryUDNDenyPriority = 1000 + + // ACL Tiers + // Tier 0 is called Primary as it is evaluated before any other feature-related Tiers. + // Currently used for User Defined Network Feature. + // NOTE: When we upgrade from an OVN version without tiers to the new version with + // tiers, all values in the new ACL.Tier column will be set to 0. + PrimaryACLTier = 0 + // Default Tier for all ACLs + DefaultACLTier = 2 + // Default Tier for all ACLs belonging to Admin Network Policy + DefaultANPACLTier = 1 + // Default Tier for all ACLs belonging to Baseline Admin Network Policy + DefaultBANPACLTier = 3 + + // priority of logical router policies on the OVNClusterRouter + EgressFirewallStartPriority = 10000 + MinimumReservedEgressFirewallPriority = 2000 + MGMTPortPolicyPriority = "1005" + NodeSubnetPolicyPriority = "1004" + InterNodePolicyPriority = "1003" + UDNHostCIDRPolicyPriority = "99" + HybridOverlaySubnetPriority = 1002 + HybridOverlayReroutePriority = 501 + DefaultNoRereoutePriority = 102 + EgressSVCReroutePriority = 101 + EgressIPReroutePriority = 100 + EgressIPRerouteQoSRulePriority = 103 + EgressLiveMigrationReroutePiority = 10 + + // EndpointSliceMirrorControllerName mirror EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) + EndpointSliceMirrorControllerName = "endpointslice-mirror-controller.k8s.ovn.org" + // EndpointSliceDefaultControllerName default kubernetes EndpointSlice controller name (used as a value for the "endpointslice.kubernetes.io/managed-by" label) + EndpointSliceDefaultControllerName = "endpointslice-controller.k8s.io" + // LabelSourceEndpointSlice label key used in mirrored EndpointSlice + // that has the value of the default EndpointSlice name + LabelSourceEndpointSlice = "k8s.ovn.org/source-endpointslice" + // LabelSourceEndpointSliceVersion label key used in mirrored EndpointSlice + // that has the value of the last known default EndpointSlice ResourceVersion + LabelSourceEndpointSliceVersion = "k8s.ovn.org/source-endpointslice-version" + // LabelUserDefinedEndpointSliceNetwork label key used in mirrored EndpointSlices that contains the current primary user defined network name + LabelUserDefinedEndpointSliceNetwork = "k8s.ovn.org/endpointslice-network" + // LabelUserDefinedServiceName label key used in mirrored EndpointSlices that contains the service name matching the EndpointSlice + LabelUserDefinedServiceName = "k8s.ovn.org/service-name" + + // Packet marking + EgressIPNodeConnectionMark = "1008" + EgressIPReplyTrafficConnectionMark = 42 + + // primary user defined network's default join subnet value + // users can configure custom values using NADs + UserDefinedPrimaryNetworkJoinSubnetV4 = "100.65.0.0/16" + UserDefinedPrimaryNetworkJoinSubnetV6 = "fd99::/64" + + // OpenFlow and Networking constants + RouteAdvertisementICMPType = 134 + NeighborAdvertisementICMPType = 136 + + // Meter constants + OvnACLLoggingMeter = "acl-logging" + OvnRateLimitingMeter = "rate-limiter" + PacketsPerSecond = "pktps" + MeterAction = "drop" + + // OVN-K8S annotation & taint constants + OvnK8sPrefix = "k8s.ovn.org" + // Deprecated: we used to set topology version as an annotation on the node. We don't do this anymore. + OvnK8sTopoAnno = OvnK8sPrefix + "/" + "topology-version" + OvnK8sSmallMTUTaintKey = OvnK8sPrefix + "/" + "mtu-too-small" + + // name of the configmap used to synchronize status (e.g. watch for topology changes) + OvnK8sStatusCMName = "control-plane-status" + OvnK8sStatusKeyTopoVersion = "topology-version" + + // Monitoring constants + SFlowAgent = "ovn-k8s-mp0" + + // OVNKube-Node Node types + NodeModeFull = "full" + NodeModeDPU = "dpu" + NodeModeDPUHost = "dpu-host" + + // Geneve header length for IPv4 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) + GeneveHeaderLengthIPv4 = 58 + // Geneve header length for IPv6 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) + GeneveHeaderLengthIPv6 = GeneveHeaderLengthIPv4 + 20 + + ClusterPortGroupNameBase = "clusterPortGroup" + ClusterRtrPortGroupNameBase = "clusterRtrPortGroup" + + OVSDBTimeout = 10 * time.Second + OVSDBWaitTimeout = 0 + + ClusterLBGroupName = "clusterLBGroup" + ClusterSwitchLBGroupName = "clusterSwitchLBGroup" + ClusterRouterLBGroupName = "clusterRouterLBGroup" + + // key for network name external-id + NetworkExternalID = OvnK8sPrefix + "/" + "network" + // key for node name external-id + NodeExternalID = OvnK8sPrefix + "/" + "node" + // key for network role external-id: possible values are "default", "primary", "secondary" + NetworkRoleExternalID = OvnK8sPrefix + "/" + "role" + // key for NAD name external-id, only used for secondary logical switch port of a pod + // key for network name external-id + NADExternalID = OvnK8sPrefix + "/" + "nad" + // key for topology type external-id, only used for secondary network logical entities + TopologyExternalID = OvnK8sPrefix + "/" + "topology" + // key for load_balancer kind external-id + LoadBalancerKindExternalID = OvnK8sPrefix + "/" + "kind" + // key for load_balancer service external-id + LoadBalancerOwnerExternalID = OvnK8sPrefix + "/" + "owner" + // key for UDN enabled services routes + UDNEnabledServiceExternalID = OvnK8sPrefix + "/" + "udn-enabled-default-service" + + // different secondary network topology type defined in CNI netconf + Layer3Topology = "layer3" + Layer2Topology = "layer2" + LocalnetTopology = "localnet" + + // different types of network roles + // defined in CNI netconf as a user defined network + NetworkRolePrimary = "primary" + NetworkRoleSecondary = "secondary" + NetworkRoleDefault = "default" + // defined internally by ovnkube to recognize "default" + // network's role as a "infrastructure-locked" network + // when user defined network is the primary network for + // the pod which makes "default" network niether primary + // nor secondary + NetworkRoleInfrastructure = "infrastructure-locked" + + // db index keys + // PrimaryIDKey is used as a primary client index + PrimaryIDKey = OvnK8sPrefix + "/id" + + OvnDefaultZone = "global" + + // EgressService "reserved" hosts - when set on an EgressService they have a special meaning + + EgressServiceNoHost = "" // set on services with no allocated node + EgressServiceNoSNATHost = "ALL" // set on services with sourceIPBy=Network + + // MaxLogicalPortTunnelKey is maximum tunnel key that can be requested for a + // Logical Switch or Router Port + MaxLogicalPortTunnelKey = 32767 + + // InformerSyncTimeout is used when waiting for the initial informer cache sync + // (i.e. all existing objects should be listed by the informer). + // It allows ~4 list() retries with the default reflector exponential backoff config + InformerSyncTimeout = 20 * time.Second + + // HandlerSyncTimeout is used when waiting for initial object handler sync. + // (i.e. all the ADD events should be processed for the existing objects by the event handler) + HandlerSyncTimeout = 20 * time.Second + + // GRMACBindingAgeThreshold is the lifetime in seconds of each MAC binding + // entry for the gateway routers. After this time, the entry is removed and + // may be refreshed with a new ARP request. + GRMACBindingAgeThreshold = "300" +) diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go new file mode 100644 index 000000000..566f03fa9 --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/errors.go @@ -0,0 +1,44 @@ +package types + +import ( + "errors" + "fmt" + + kerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type SuppressedError struct { + Inner error +} + +func (e *SuppressedError) Error() string { + return fmt.Sprintf("suppressed error logged: %v", e.Inner.Error()) +} + +func (e *SuppressedError) Unwrap() error { + return e.Inner +} + +func NewSuppressedError(err error) error { + return &SuppressedError{ + Inner: err, + } +} + +func IsSuppressedError(err error) bool { + var suppressedError *SuppressedError + // errors.As() is not supported with Aggregate type error. Aggregate.Errors() converts an + // Aggregate type error into a slice of builtin error and then errors.As() can be used + if agg, ok := err.(kerrors.Aggregate); ok && err != nil { + suppress := false + for _, err := range agg.Errors() { + if errors.As(err, &suppressedError) { + suppress = true + } else { + return false + } + } + return suppress + } + return errors.As(err, &suppressedError) +} diff --git a/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go new file mode 100644 index 000000000..2a69fd57c --- /dev/null +++ b/vendor/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types/resource_status.go @@ -0,0 +1,21 @@ +package types + +import ( + "fmt" + "strings" +) + +// this file defines error messages that are used to figure out if a resource reconciliation failed +const ( + APBRouteErrorMsg = "failed to apply policy" + EgressFirewallErrorMsg = "EgressFirewall Rules not correctly applied" + EgressQoSErrorMsg = "EgressQoS Rules not correctly applied" +) + +func GetZoneStatus(zoneID, message string) string { + return fmt.Sprintf("%s: %s", zoneID, message) +} + +func GetZoneFromStatus(status string) string { + return strings.Split(status, ":")[0] +} diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore new file mode 100644 index 000000000..5d7e88de0 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/.gitignore @@ -0,0 +1,36 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea + +fuzz/*.zip diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE new file mode 100644 index 000000000..bd899d835 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md new file mode 100644 index 000000000..4629c9d0e --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -0,0 +1,92 @@ +# lz4 : LZ4 compression in pure Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4) +[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4/v4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/v4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go new file mode 100644 index 000000000..fec8adb03 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go @@ -0,0 +1,481 @@ +package lz4block + +import ( + "encoding/binary" + "math/bits" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +const ( + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + *e = lz4errors.ErrInvalidSourceShortBuffer + } +} + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +func UncompressBlock(src, dst, dict []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src, dict); di >= 0 { + return di, nil + } + return 0, lz4errors.ErrInvalidSourceShortBuffer +} + +type Compressor struct { + // Offsets are at most 64kiB, so we can store only the lower 16 bits of + // match positions: effectively, an offset from some 64kiB block boundary. + // + // When we retrieve such an offset, we interpret it as relative to the last + // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000, + // depending on which of these is inside the current window. If a table + // entry was generated more than 64kiB back in the input, we find out by + // inspecting the input stream. + table [htSize]uint16 + + // Bitmap indicating which positions in the table are in use. + // This allows us to quickly reset the table for reuse, + // without having to zero everything. + inUse [htSize / 32]uint32 +} + +// Get returns the position of a presumptive match for the hash h. +// The match may be a false positive due to a hash collision or an old entry. +// If si < winSize, the return value may be negative. +func (c *Compressor) get(h uint32, si int) int { + h &= htSize - 1 + i := 0 + if c.inUse[h/32]&(1<<(h%32)) != 0 { + i = int(c.table[h]) + } + i += si &^ winMask + if i >= si { + // Try previous 64kiB block (negative when in first block). + i -= winSize + } + return i +} + +func (c *Compressor) put(h uint32, si int) { + h &= htSize - 1 + c.table[h] = uint16(si) + c.inUse[h/32] |= 1 << (h % 32) +} + +func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } + +var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} + +func CompressBlock(src, dst []byte) (int, error) { + c := compressorPool.Get().(*Compressor) + n, err := c.CompressBlock(src, dst) + compressorPool.Put(c) + return n, err +} + +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.reset() + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := c.get(h, si) + ref2 := c.get(h2, si+1) + c.put(h, si) + c.put(h2, si+1) + + offset := si - ref + + if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref3 := c.get(h, si+2) + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref3 + c.put(h, si) + + if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 <= sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF && di < len(dst); l -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(l) + } + di++ + + // Literals. + if di+lLen > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + if di > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + c.put(h, si-2) + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + if di >= len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + if di+len(src)-anchor > len(dst) { + return 0, lz4errors.ErrInvalidSourceShortBuffer + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +type CompressorHC struct { + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + hashTable, chainTable [htSize]int + needsReset bool +} + +var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }} + +func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) { + c := compressorHCPool.Get().(*CompressorHC) + n, err := c.CompressBlock(src, dst, depth) + compressorHCPool.Put(c) + return n, err +} + +func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) { + if c.needsReset { + // Zero out reused table to avoid non-deterministic output (issue #65). + c.hashTable = [htSize]int{} + c.chainTable = [htSize]int{} + } + c.needsReset = true // Only false on first call. + + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + if depth == 0 { + depth = winSize + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + c.chainTable[si&winMask] = c.hashTable[h] + c.hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go new file mode 100644 index 000000000..a1bfa99e4 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -0,0 +1,90 @@ +// Package lz4block provides LZ4 BlockSize types and pools of buffers. +package lz4block + +import "sync" + +const ( + Block64Kb uint32 = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// In legacy mode all blocks are compressed regardless +// of the compressed size: use the bound size. +var Block8Mb = uint32(CompressBlockBound(8 << 20)) + +var ( + BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} + BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} + BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }} + BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }} + BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }} +) + +func Index(b uint32) BlockSizeIndex { + switch b { + case Block64Kb: + return 4 + case Block256Kb: + return 5 + case Block1Mb: + return 6 + case Block4Mb: + return 7 + case Block8Mb: // only valid in legacy mode + return 3 + } + return 0 +} + +func IsValid(b uint32) bool { + return Index(b) > 0 +} + +type BlockSizeIndex uint8 + +func (b BlockSizeIndex) IsValid() bool { + switch b { + case 4, 5, 6, 7: + return true + } + return false +} + +func (b BlockSizeIndex) Get() []byte { + var buf interface{} + switch b { + case 4: + buf = BlockPool64K.Get() + case 5: + buf = BlockPool256K.Get() + case 6: + buf = BlockPool1M.Get() + case 7: + buf = BlockPool4M.Get() + case 3: + buf = BlockPool8M.Get() + } + return buf.([]byte) +} + +func Put(buf []byte) { + // Safeguard: do not allow invalid buffers. + switch c := cap(buf); uint32(c) { + case Block64Kb: + BlockPool64K.Put(buf[:c]) + case Block256Kb: + BlockPool256K.Put(buf[:c]) + case Block1Mb: + BlockPool1M.Put(buf[:c]) + case Block4Mb: + BlockPool4M.Put(buf[:c]) + case Block8Mb: + BlockPool8M.Put(buf[:c]) + } +} + +type CompressionLevel uint32 + +const Fast CompressionLevel = 0 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s new file mode 100644 index 000000000..1d00133fa --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s @@ -0,0 +1,448 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// AX scratch +// BX scratch +// CX literal and match lengths +// DX token, match offset +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// R14 &dict +// R15 len(dict) + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOSPLIT, $48-80 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + CMPQ R9, $0 + JE err_corrupt + ADDQ SI, R9 + + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + + XORL CX, CX + +loop: + // token := uint32(src[si]) + MOVBLZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVL DX, CX + SHRL $4, CX + + // if lit_len != 0xF + CMPL CX, $0xF + JEQ lit_len_loop + CMPQ DI, R12 + JAE copy_literal + CMPQ SI, R13 + JAE copy_literal + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVL DX, CX + ANDL $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWLZX (SI), DX + TESTL DX, DX + JE err_corrupt + ADDQ $2, SI + JC err_short_buf + + MOVQ DI, AX + SUBQ DX, AX + JC err_corrupt + CMPQ AX, DI + JA err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPL CX, $0xF + JEQ match_len_loop_pre + CMPL DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JB match_len_loop_pre + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + LEAQ const_minMatch(DI)(CX*1), DI + + // shortcut complete, load next token + JMP loopcheck + + // Read the rest of the literal length: + // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). +lit_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE lit_len_loop + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R9 + JA err_short_buf + + MOVQ DI, BX + ADDQ CX, BX + JC err_short_buf + CMPQ BX, R8 + JA err_short_buf + + // Copy literals of <=48 bytes through the XMM registers. + CMPQ CX, $48 + JGT memmove_lit + + // if len(dst[di:]) < 48 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $48 + JLT memmove_lit + + // if len(src[si:]) < 48 + MOVQ R9, BX + SUBQ SI, BX + CMPQ BX, $48 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU 32(SI), X2 + MOVOU X0, (DI) + MOVOU X1, 16(DI) + MOVOU X2, 32(DI) + + ADDQ CX, SI + ADDQ CX, DI + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment SI, DI now so we don't need to save CX. + ADDQ CX, DI + ADDQ CX, SI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVL DX, 40(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVL 40(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + // CX := mLen + // free up DX to use for offset + MOVL DX, CX + ANDL $0xF, CX + + CMPQ SI, R9 + JAE end + + // offset + // si += 2 + // DX := int(src[si-2]) | int(src[si-1])<<8 + ADDQ $2, SI + JC err_short_buf + CMPQ SI, R9 + JA err_short_buf + MOVWQZX -2(SI), DX + + // 0 offset is invalid + TESTL DX, DX + JEQ err_corrupt + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + + // do { BX = src[si++]; mlen += BX } while (BX == 0xFF). +match_len_loop: + CMPQ SI, R9 + JAE err_short_buf + + MOVBLZX (SI), BX + INCQ SI + ADDQ BX, CX + + CMPB BX, $0xFF + JE match_len_loop + +copy_match: + ADDQ $const_minMatch, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + JC err_short_buf + CMPQ AX, R8 + JA err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + JC copy_match_from_dict + CMPQ BX, R11 + JBE copy_match_from_dict + + // if offset + match_len < di + LEAQ (BX)(CX*1), AX + CMPQ DI, AX + JA copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + JNZ copy_match_loop + + JMP loopcheck + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + XORL CX, CX + JMP loopcheck + +copy_match_from_dict: + // CX = match_len + // BX = &dst + (di - offset) + + // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary + MOVQ R11, AX + SUBQ BX, AX + + // BX = len(dict) - dict_bytes_available + MOVQ R15, BX + SUBQ AX, BX + JS err_short_dict + + ADDQ R14, BX + + // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy + CMPQ CX, AX + JLT memmove_match + + // The match stretches over the dictionary and our block + // 1) copy what comes from the dictionary + // AX = dict_bytes_available = copy_size + // BX = &dict_end - copy_size + // CX = match_len + + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ AX, 16(SP) + // store extra stuff we want to recover + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 16(SP), AX // copy_size + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX // match_len + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + // di+=copy_size + ADDQ AX, DI + + // 2) copy the rest from the current block + // CX = match_len - copy_size = rest_size + SUBQ AX, CX + MOVQ R11, BX + + // check if we have a copy overlap + // AX = &dst + rest_size + MOVQ CX, AX + ADDQ BX, AX + // if &dst + rest_size > di, copy byte by byte + CMPQ AX, DI + + JA copy_match_loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + + // Spill registers. Increment DI now so we don't need to save CX. + ADDQ CX, DI + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + MOVQ dict_base+48(FP), R14 + MOVQ dict_len+56(FP), R15 + XORL CX, CX + +loopcheck: + // for si < len(src) + CMPQ SI, R9 + JB loop + +end: + // Remaining length must be zero. + TESTQ CX, CX + JNE err_corrupt + + SUBQ R11, DI + MOVQ DI, ret+72(FP) + RET + +err_corrupt: + MOVQ $-1, ret+72(FP) + RET + +err_short_buf: + MOVQ $-2, ret+72(FP) + RET + +err_short_dict: + MOVQ $-3, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s new file mode 100644 index 000000000..20b21fcf1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s @@ -0,0 +1,231 @@ +// +build gc +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define srcend R4 +#define match R5 // Match address. +#define dictend R6 +#define token R7 +#define len R8 // Literal and match lengths. +#define offset R7 // Match offset; overlaps with token. +#define tmp1 R9 +#define tmp2 R11 +#define tmp3 R12 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40 + MOVW dst_base +0(FP), dst + MOVW dst_len +4(FP), dstend + MOVW src_base +12(FP), src + MOVW src_len +16(FP), srcend + + CMP $0, srcend + BEQ shortSrc + + ADD dst, dstend + ADD src, srcend + + MOVW dst, dstorig + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + MOVW token >> 4, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CMP $0, len + BEQ copyLiteralDone + + // Bounds check dst+len and src+len. + ADD.S dst, len, tmp1 + ADD.CC.S src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + //BHI shortDst // Uncomment for distinct error codes. + CMP.LS srcend, tmp2 + BHI shortSrc + + // Copy literal. + CMP $4, len + BLO copyLiteralFinish + + // Copy 0-3 bytes until src is aligned. + TST $1, src + MOVBU.NE.P 1(src), tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $1, len + + TST $2, src + MOVHU.NE.P 2(src), tmp2 + MOVB.NE.P tmp2, 1(dst) + MOVW.NE tmp2 >> 8, tmp1 + MOVB.NE.P tmp1, 1(dst) + SUB.NE $2, len + + B copyLiteralLoopCond + +copyLiteralLoop: + // Aligned load, unaligned write. + MOVW.P 4(src), tmp1 + MOVW tmp1 >> 8, tmp2 + MOVB tmp2, 1(dst) + MOVW tmp1 >> 16, tmp3 + MOVB tmp3, 2(dst) + MOVW tmp1 >> 24, tmp2 + MOVB tmp2, 3(dst) + MOVB.P tmp1, 4(dst) +copyLiteralLoopCond: + // Loop until len-4 < 0. + SUB.S $4, len + BPL copyLiteralLoop + +copyLiteralFinish: + // Copy remaining 0-3 bytes. + // At this point, len may be < 0, but len&3 is still accurate. + TST $1, len + MOVB.NE.P 1(src), tmp3 + MOVB.NE.P tmp3, 1(dst) + TST $2, len + MOVB.NE.P 2(src), tmp1 + MOVB.NE.P tmp1, 2(dst) + MOVB.NE -1(src), tmp2 + MOVB.NE tmp2, -1(dst) + +copyLiteralDone: + // Initial part of match length. + // This frees up the token register for reuse as offset. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADD.S $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVBU -2(src), offset + MOVBU -1(src), tmp1 + ORR.S tmp1 << 8, offset + BEQ corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADD.S tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + // Bounds check dst+len+minMatch. + ADD.S dst, len, tmp1 + ADD.CC.S $const_minMatch, tmp1 + BCS shortDst + CMP dstend, tmp1 + BHI shortDst + + RSB dst, offset, match + CMP dstorig, match + BGE copyMatch4 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + MOVW dict_base+24(FP), match + MOVW dict_len +28(FP), dictend + + ADD $const_minMatch, len + + RSB dst, dstorig, tmp1 + RSB dictend, offset, tmp2 + ADD.S tmp2, tmp1 + BMI shortDict + ADD match, dictend + ADD tmp1, match + +copyDict: + MOVBU.P 1(match), tmp1 + MOVB.P tmp1, 1(dst) + SUB.S $1, len + CMP.NE match, dictend + BNE copyDict + + // If the match extends beyond the dictionary, the rest is at dstorig. + CMP $0, len + BEQ copyMatchDone + MOVW dstorig, match + B copyMatch + + // Copy a regular match. + // Since len+minMatch is at least four, we can do a 4× unrolled + // byte copy loop. Using MOVW instead of four byte loads is faster, + // but to remain portable we'd have to align match first, which is + // too expensive. By alternating loads and stores, we also handle + // the case offset < 4. +copyMatch4: + SUB.S $4, len + MOVBU.P 4(match), tmp1 + MOVB.P tmp1, 4(dst) + MOVBU -3(match), tmp2 + MOVB tmp2, -3(dst) + MOVBU -2(match), tmp3 + MOVB tmp3, -2(dst) + MOVBU -1(match), tmp1 + MOVB tmp1, -1(dst) + BPL copyMatch4 + + // Restore len, which is now negative. + ADD.S $4, len + BEQ copyMatchDone + +copyMatch: + // Finish with a byte-at-a-time copy. + SUB.S $1, len + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + BNE copyMatch + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CMP $0, len + BNE corrupt + SUB dstorig, dst, tmp1 + MOVW tmp1, ret+36(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVW $-1, tmp1 + MOVW tmp1, ret+36(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s new file mode 100644 index 000000000..c43e8a8d2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -0,0 +1,230 @@ +// +build gc +// +build !noasm + +// This implementation assumes that strict alignment checking is turned off. +// The Go compiler makes the same assumption. + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define dst R0 +#define dstorig R1 +#define src R2 +#define dstend R3 +#define dstend16 R4 // dstend - 16 +#define srcend R5 +#define srcend16 R6 // srcend - 16 +#define match R7 // Match address. +#define dict R8 +#define dictlen R9 +#define dictend R10 +#define token R11 +#define len R12 // Literal and match lengths. +#define lenRem R13 +#define offset R14 // Match offset. +#define tmp1 R15 +#define tmp2 R16 +#define tmp3 R17 +#define tmp4 R19 + +// func decodeBlock(dst, src, dict []byte) int +TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 + LDP dst_base+0(FP), (dst, dstend) + ADD dst, dstend + MOVD dst, dstorig + + LDP src_base+24(FP), (src, srcend) + CBZ srcend, shortSrc + ADD src, srcend + + // dstend16 = max(dstend-16, 0) and similarly for srcend16. + SUBS $16, dstend, dstend16 + CSEL LO, ZR, dstend16, dstend16 + SUBS $16, srcend, srcend16 + CSEL LO, ZR, srcend16, srcend16 + + LDP dict_base+48(FP), (dict, dictlen) + ADD dict, dictlen, dictend + +loop: + // Read token. Extract literal length. + MOVBU.P 1(src), token + LSR $4, token, len + CMP $15, len + BNE readLitlenDone + +readLitlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readLitlenLoop + +readLitlenDone: + CBZ len, copyLiteralDone + + // Bounds check dst+len and src+len. + ADDS dst, len, tmp1 + BCS shortSrc + ADDS src, len, tmp2 + BCS shortSrc + CMP dstend, tmp1 + BHI shortDst + CMP srcend, tmp2 + BHI shortSrc + + // Copy literal. + SUBS $16, len + BLO copyLiteralShort + +copyLiteralLoop: + LDP.P 16(src), (tmp1, tmp2) + STP.P (tmp1, tmp2), 16(dst) + SUBS $16, len + BPL copyLiteralLoop + + // Copy (final part of) literal of length 0-15. + // If we have >=16 bytes left in src and dst, just copy 16 bytes. +copyLiteralShort: + CMP dstend16, dst + CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). + BHS copyLiteralShortEnd + + AND $15, len + + LDP (src), (tmp1, tmp2) + ADD len, src + STP (tmp1, tmp2), (dst) + ADD len, dst + + B copyLiteralDone + + // Safe but slow copy near the end of src, dst. +copyLiteralShortEnd: + TBZ $3, len, 3(PC) + MOVD.P 8(src), tmp1 + MOVD.P tmp1, 8(dst) + TBZ $2, len, 3(PC) + MOVW.P 4(src), tmp2 + MOVW.P tmp2, 4(dst) + TBZ $1, len, 3(PC) + MOVH.P 2(src), tmp3 + MOVH.P tmp3, 2(dst) + TBZ $0, len, 3(PC) + MOVBU.P 1(src), tmp4 + MOVB.P tmp4, 1(dst) + +copyLiteralDone: + // Initial part of match length. + AND $15, token, len + + CMP src, srcend + BEQ end + + // Read offset. + ADDS $2, src + BCS shortSrc + CMP srcend, src + BHI shortSrc + MOVHU -2(src), offset + CBZ offset, corrupt + + // Read rest of match length. + CMP $15, len + BNE readMatchlenDone + +readMatchlenLoop: + CMP src, srcend + BEQ shortSrc + MOVBU.P 1(src), tmp1 + ADDS tmp1, len + BVS shortDst + CMP $255, tmp1 + BEQ readMatchlenLoop + +readMatchlenDone: + ADD $const_minMatch, len + + // Bounds check dst+len. + ADDS dst, len, tmp2 + BCS shortDst + CMP dstend, tmp2 + BHI shortDst + + SUB offset, dst, match + CMP dstorig, match + BHS copyMatchTry8 + + // match < dstorig means the match starts in the dictionary, + // at len(dict) - offset + (dst - dstorig). + SUB dstorig, dst, tmp1 + SUB offset, dictlen, tmp2 + ADDS tmp2, tmp1 + BMI shortDict + ADD dict, tmp1, match + +copyDict: + MOVBU.P 1(match), tmp3 + MOVB.P tmp3, 1(dst) + SUBS $1, len + CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. + BNE copyDict + + CBZ len, copyMatchDone + + // If the match extends beyond the dictionary, the rest is at dstorig. + // Recompute the offset for the next check. + MOVD dstorig, match + SUB dstorig, dst, offset + +copyMatchTry8: + // Copy doublewords if both len and offset are at least eight. + // A 16-at-a-time loop doesn't provide a further speedup. + CMP $8, len + CCMP HS, offset, $8, $0 + BLO copyMatchLoop1 + + AND $7, len, lenRem + SUB $8, len +copyMatchLoop8: + MOVD.P 8(match), tmp1 + MOVD.P tmp1, 8(dst) + SUBS $8, len + BPL copyMatchLoop8 + + MOVD (match)(len), tmp2 // match+len == match+lenRem-8. + ADD lenRem, dst + MOVD $0, len + MOVD tmp2, -8(dst) + B copyMatchDone + +copyMatchLoop1: + // Byte-at-a-time copy for small offsets. + MOVBU.P 1(match), tmp2 + MOVB.P tmp2, 1(dst) + SUBS $1, len + BNE copyMatchLoop1 + +copyMatchDone: + CMP src, srcend + BNE loop + +end: + CBNZ len, corrupt + SUB dstorig, dst, tmp1 + MOVD tmp1, ret+72(FP) + RET + + // The error cases have distinct labels so we can put different + // return codes here when debugging, or if the error returns need to + // be changed. +shortDict: +shortDst: +shortSrc: +corrupt: + MOVD $-1, tmp1 + MOVD tmp1, ret+72(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go new file mode 100644 index 000000000..8d9023d10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go @@ -0,0 +1,10 @@ +//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm +// +build amd64 arm arm64 +// +build !appengine +// +build gc +// +build !noasm + +package lz4block + +//go:noescape +func decodeBlock(dst, src, dict []byte) int diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go new file mode 100644 index 000000000..9f568fbb1 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go @@ -0,0 +1,139 @@ +//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm,!arm64 appengine !gc noasm + +package lz4block + +import ( + "encoding/binary" +) + +func decodeBlock(dst, src, dict []byte) (ret int) { + // Restrict capacities so we don't read or write out of bounds. + dst = dst[:len(dst):len(dst)] + src = src[:len(src):len(src)] + + const hasError = -2 + + if len(src) == 0 { + return hasError + } + + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di uint + for si < uint(len(src)) { + // Literals and match lengths (token). + b := uint(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < uint(len(src)): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := u16(src[si:]); mLen <= offset && offset < di { + i := di - offset + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + if end := i + 18; end <= uint(len(dst)) { + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + } + case lLen == 0xF: + for { + x := uint(src[si]) + if lLen += x; int(lLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + + mLen := b & 0xF + if si == uint(len(src)) && mLen == 0 { + break + } else if si >= uint(len(src)) { + return hasError + } + + offset := u16(src[si:]) + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen += minMatch + if mLen == minMatch+0xF { + for { + x := uint(src[si]) + if mLen += x; int(mLen) < 0 { + return hasError + } + si++ + if x != 0xFF { + break + } + } + } + + // Copy the match. + if di < offset { + // The match is beyond our block, meaning the first part + // is in the dictionary. + fromDict := dict[uint(len(dict))+di-offset:] + n := uint(copy(dst[di:di+mLen], fromDict)) + di += n + if mLen -= n; mLen == 0 { + continue + } + // We copied n = offset-di bytes from the dictionary, + // then set di = di+n = offset, so the following code + // copies from dst[di-offset:] = dst[0:]. + } + + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += uint(copy(dst[di:di+mLen], expanded[:mLen])) + } + + return int(di) +} + +func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go new file mode 100644 index 000000000..710ea4281 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go @@ -0,0 +1,19 @@ +package lz4errors + +type Error string + +func (e Error) Error() string { return string(e) } + +const ( + ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short" + ErrInvalidFrame Error = "lz4: bad magic number" + ErrInternalUnhandledState Error = "lz4: unhandled state" + ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum" + ErrInvalidBlockChecksum Error = "lz4: invalid block checksum" + ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum" + ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level" + ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object" + ErrOptionInvalidBlockSize Error = "lz4: invalid block size" + ErrOptionNotApplicable Error = "lz4: option not applicable" + ErrWriterNotClosed Error = "lz4: writer not closed" +) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go new file mode 100644 index 000000000..459086f09 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -0,0 +1,350 @@ +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "sync" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +type Blocks struct { + Block *FrameDataBlock + Blocks chan chan *FrameDataBlock + mu sync.Mutex + err error +} + +func (b *Blocks) initW(f *Frame, dst io.Writer, num int) { + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return + } + b.Block = nil + if cap(b.Blocks) != num { + b.Blocks = make(chan chan *FrameDataBlock, num) + } + // goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range b.Blocks { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + block := <-c + if block == nil { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Do not attempt to write the block upon any previous failure. + if b.err == nil { + // Write the block. + if err := block.Write(f, dst); err != nil { + // Keep the first error. + b.err = err + // All pending compression goroutines need to shut down, so we need to keep going. + } + } + close(c) + } + }() +} + +func (b *Blocks) close(f *Frame, num int) error { + if num == 1 { + if b.Block != nil { + b.Block.Close(f) + } + err := b.err + b.err = nil + return err + } + if b.Blocks == nil { + err := b.err + b.err = nil + return err + } + c := make(chan *FrameDataBlock) + b.Blocks <- c + c <- nil + <-c + err := b.err + b.err = nil + return err +} + +// ErrorR returns any error set while uncompressing a stream. +func (b *Blocks) ErrorR() error { + b.mu.Lock() + defer b.mu.Unlock() + return b.err +} + +// initR returns a channel that streams the uncompressed blocks if in concurrent +// mode and no error. When the channel is closed, check for any error with b.ErrorR. +// +// If not in concurrent mode, the uncompressed block is b.Block and the returned error +// needs to be checked. +func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) { + size := f.Descriptor.Flags.BlockSizeIndex() + if num == 1 { + b.Blocks = nil + b.Block = NewFrameDataBlock(f) + return nil, nil + } + b.Block = nil + blocks := make(chan chan []byte, num) + // data receives the uncompressed blocks. + data := make(chan []byte) + // Read blocks from the source sequentially + // and uncompress them concurrently. + + // In legacy mode, accrue the uncompress sizes in cum. + var cum uint32 + go func() { + var cumx uint32 + var err error + for b.ErrorR() == nil { + block := NewFrameDataBlock(f) + cumx, err = block.Read(f, src, 0) + if err != nil { + block.Close(f) + break + } + // Recheck for an error as reading may be slow and uncompressing is expensive. + if b.ErrorR() != nil { + block.Close(f) + break + } + c := make(chan []byte) + blocks <- c + go func() { + defer block.Close(f) + data, err := block.Uncompress(f, size.Get(), nil, false) + if err != nil { + b.closeR(err) + // Close the block channel to indicate an error. + close(c) + } else { + c <- data + } + }() + } + // End the collection loop and the data channel. + c := make(chan []byte) + blocks <- c + c <- nil // signal the collection loop that we are done + <-c // wait for the collect loop to complete + if f.isLegacy() && cum == cumx { + err = io.EOF + } + b.closeR(err) + close(data) + }() + // Collect the uncompressed blocks and make them available + // on the returned channel. + go func(leg bool) { + defer close(blocks) + skipBlocks := false + for c := range blocks { + buf, ok := <-c + if !ok { + // A closed channel indicates an error. + // All remaining channels should be discarded. + skipBlocks = true + continue + } + if buf == nil { + // Signal to end the loop. + close(c) + return + } + if skipBlocks { + // A previous error has occurred, skipping remaining channels. + continue + } + // Perform checksum now as the blocks are received in order. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(buf) + } + if leg { + cum += uint32(len(buf)) + } + data <- buf + close(c) + } + }(f.isLegacy()) + return data, nil +} + +// closeR safely sets the error on b if not already set. +func (b *Blocks) closeR(err error) { + b.mu.Lock() + if b.err == nil { + b.err = err + } + b.mu.Unlock() +} + +func NewFrameDataBlock(f *Frame) *FrameDataBlock { + buf := f.Descriptor.Flags.BlockSizeIndex().Get() + return &FrameDataBlock{Data: buf, data: buf} +} + +type FrameDataBlock struct { + Size DataBlockSize + Data []byte // compressed or uncompressed data (.data or .src) + Checksum uint32 + data []byte // buffer for compressed data + src []byte // uncompressed data + err error // used in concurrent mode +} + +func (b *FrameDataBlock) Close(f *Frame) { + b.Size = 0 + b.Checksum = 0 + b.err = nil + if b.data != nil { + // Block was not already closed. + lz4block.Put(b.data) + b.Data = nil + b.data = nil + b.src = nil + } +} + +// Block compression errors are ignored since the buffer is sized appropriately. +func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { + data := b.data + if f.isLegacy() { + // In legacy mode, the buffer is sized according to CompressBlockBound, + // but only 8Mb is buffered for compression. + src = src[:8<<20] + } else { + data = data[:len(src)] // trigger the incompressible flag in CompressBlock + } + var n int + switch level { + case lz4block.Fast: + n, _ = lz4block.CompressBlock(src, data) + default: + n, _ = lz4block.CompressBlockHC(src, data, level) + } + if n == 0 { + b.Size.UncompressedSet(true) + b.Data = src + } else { + b.Size.UncompressedSet(false) + b.Data = data[:n] + } + b.Size.sizeSet(len(b.Data)) + b.src = src // keep track of the source for content checksum + + if f.Descriptor.Flags.BlockChecksum() { + b.Checksum = xxh32.ChecksumZero(src) + } + return b +} + +func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error { + // Write is called in the same order as blocks are compressed, + // so content checksum must be done here. + if f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(b.src) + } + buf := f.buf[:] + binary.LittleEndian.PutUint32(buf, uint32(b.Size)) + if _, err := dst.Write(buf[:4]); err != nil { + return err + } + + if _, err := dst.Write(b.Data); err != nil { + return err + } + + if b.Checksum == 0 { + return nil + } + binary.LittleEndian.PutUint32(buf, b.Checksum) + _, err := dst.Write(buf[:4]) + return err +} + +// Read updates b with the next block data, size and checksum if available. +func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) { + x, err := f.readUint32(src) + if err != nil { + return 0, err + } + if f.isLegacy() { + switch x { + case frameMagicLegacy: + // Concatenated legacy frame. + return b.Read(f, src, cum) + case cum: + // Only works in non concurrent mode, for concurrent mode + // it is handled separately. + // Linux kernel format appends the total uncompressed size at the end. + return 0, io.EOF + } + } else if x == 0 { + // Marker for end of stream. + return 0, io.EOF + } + b.Size = DataBlockSize(x) + + size := b.Size.size() + if size > cap(b.data) { + return x, lz4errors.ErrOptionInvalidBlockSize + } + b.data = b.data[:size] + if _, err := io.ReadFull(src, b.data); err != nil { + return x, err + } + if f.Descriptor.Flags.BlockChecksum() { + sum, err := f.readUint32(src) + if err != nil { + return 0, err + } + b.Checksum = sum + } + return x, nil +} + +func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) { + if b.Size.Uncompressed() { + n := copy(dst, b.data) + dst = dst[:n] + } else { + n, err := lz4block.UncompressBlock(b.data, dst, dict) + if err != nil { + return nil, err + } + dst = dst[:n] + } + if f.Descriptor.Flags.BlockChecksum() { + if c := xxh32.ChecksumZero(dst); c != b.Checksum { + err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) + return nil, err + } + } + if sum && f.Descriptor.Flags.ContentChecksum() { + _, _ = f.checksum.Write(dst) + } + return dst, nil +} + +func (f *Frame) readUint32(r io.Reader) (x uint32, err error) { + if _, err = io.ReadFull(r, f.buf[:4]); err != nil { + return + } + x = binary.LittleEndian.Uint32(f.buf[:4]) + return +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go new file mode 100644 index 000000000..18192a943 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go @@ -0,0 +1,204 @@ +// Package lz4stream provides the types that support reading and writing LZ4 data streams. +package lz4stream + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/xxh32" +) + +//go:generate go run gen.go + +const ( + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + frameMagicLegacy uint32 = 0x184C2102 +) + +func NewFrame() *Frame { + return &Frame{} +} + +type Frame struct { + buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes + Magic uint32 + Descriptor FrameDescriptor + Blocks Blocks + Checksum uint32 + checksum xxh32.XXHZero +} + +// Reset allows reusing the Frame. +// The Descriptor configuration is not modified. +func (f *Frame) Reset(num int) { + f.Magic = 0 + f.Descriptor.Checksum = 0 + f.Descriptor.ContentSize = 0 + _ = f.Blocks.close(f, num) + f.Checksum = 0 +} + +func (f *Frame) InitW(dst io.Writer, num int, legacy bool) { + if legacy { + f.Magic = frameMagicLegacy + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + } else { + f.Magic = frameMagic + f.Descriptor.initW() + } + f.Blocks.initW(f, dst, num) + f.checksum.Reset() +} + +func (f *Frame) CloseW(dst io.Writer, num int) error { + if err := f.Blocks.close(f, num); err != nil { + return err + } + if f.isLegacy() { + return nil + } + buf := f.buf[:0] + // End mark (data block size of uint32(0)). + buf = append(buf, 0, 0, 0, 0) + if f.Descriptor.Flags.ContentChecksum() { + buf = f.checksum.Sum(buf) + } + _, err := dst.Write(buf) + return err +} + +func (f *Frame) isLegacy() bool { + return f.Magic == frameMagicLegacy +} + +func (f *Frame) ParseHeaders(src io.Reader) error { + if f.Magic > 0 { + // Header already read. + return nil + } + +newFrame: + var err error + if f.Magic, err = f.readUint32(src); err != nil { + return err + } + switch m := f.Magic; { + case m == frameMagic || m == frameMagicLegacy: + // All 16 values of frameSkipMagic are valid. + case m>>8 == frameSkipMagic>>8: + skip, err := f.readUint32(src) + if err != nil { + return err + } + if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil { + return err + } + goto newFrame + default: + return lz4errors.ErrInvalidFrame + } + if err := f.Descriptor.initR(f, src); err != nil { + return err + } + f.checksum.Reset() + return nil +} + +func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) { + return f.Blocks.initR(f, num, src) +} + +func (f *Frame) CloseR(src io.Reader) (err error) { + if f.isLegacy() { + return nil + } + if !f.Descriptor.Flags.ContentChecksum() { + return nil + } + if f.Checksum, err = f.readUint32(src); err != nil { + return err + } + if c := f.checksum.Sum32(); c != f.Checksum { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum) + } + return nil +} + +type FrameDescriptor struct { + Flags DescriptorFlags + ContentSize uint64 + Checksum uint8 +} + +func (fd *FrameDescriptor) initW() { + fd.Flags.VersionSet(1) + fd.Flags.BlockIndependenceSet(true) +} + +func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error { + if fd.Checksum > 0 { + // Header already written. + return nil + } + + buf := f.buf[:4] + // Write the magic number here even though it belongs to the Frame. + binary.LittleEndian.PutUint32(buf, f.Magic) + if !f.isLegacy() { + buf = buf[:4+2] + binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags)) + + if fd.Flags.Size() { + buf = buf[:4+2+8] + binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize) + } + fd.Checksum = descriptorChecksum(buf[4:]) + buf = append(buf, fd.Checksum) + } + + _, err := dst.Write(buf) + return err +} + +func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error { + if f.isLegacy() { + idx := lz4block.Index(lz4block.Block8Mb) + f.Descriptor.Flags.BlockSizeIndexSet(idx) + return nil + } + // Read the flags and the checksum, hoping that there is not content size. + buf := f.buf[:3] + if _, err := io.ReadFull(src, buf); err != nil { + return err + } + descr := binary.LittleEndian.Uint16(buf) + fd.Flags = DescriptorFlags(descr) + if fd.Flags.Size() { + // Append the 8 missing bytes. + buf = buf[:3+8] + if _, err := io.ReadFull(src, buf[3:]); err != nil { + return err + } + fd.ContentSize = binary.LittleEndian.Uint64(buf[2:]) + } + fd.Checksum = buf[len(buf)-1] // the checksum is the last byte + buf = buf[:len(buf)-1] // all descriptor fields except checksum + if c := descriptorChecksum(buf); fd.Checksum != c { + return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum) + } + // Validate the elements that can be. + if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() { + return lz4errors.ErrOptionInvalidBlockSize + } + return nil +} + +func descriptorChecksum(buf []byte) byte { + return byte(xxh32.ChecksumZero(buf) >> 8) +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go new file mode 100644 index 000000000..d33a6be95 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go @@ -0,0 +1,103 @@ +// Code generated by `gen.exe`. DO NOT EDIT. + +package lz4stream + +import "github.com/pierrec/lz4/v4/internal/lz4block" + +// DescriptorFlags is defined as follow: +// field bits +// ----- ---- +// _ 2 +// ContentChecksum 1 +// Size 1 +// BlockChecksum 1 +// BlockIndependence 1 +// Version 2 +// _ 4 +// BlockSizeIndex 3 +// _ 1 +type DescriptorFlags uint16 + +// Getters. +func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 } +func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 } +func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 } +func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 } +func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) } +func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { + return lz4block.BlockSizeIndex(x >> 12 & 0x7) +} + +// Setters. +func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 2 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { + const b = 1 << 3 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { + const b = 1 << 4 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { + const b = 1 << 5 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} +func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { + *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6) + return x +} +func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { + *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12) + return x +} + +// Code generated by `gen.exe`. DO NOT EDIT. + +// DataBlockSize is defined as follow: +// field bits +// ----- ---- +// size 31 +// Uncompressed 1 +type DataBlockSize uint32 + +// Getters. +func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) } +func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 } + +// Setters. +func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { + *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF + return x +} +func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { + const b = 1 << 31 + if v { + *x = *x&^b | b + } else { + *x &^= b + } + return x +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go new file mode 100644 index 000000000..651d10c10 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go @@ -0,0 +1,212 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v [4]uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v[0] = prime1plus2 + xxh.v[1] = prime2 + xxh.v[2] = 0 + xxh.v[3] = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSizeIndex gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + var buf *[16]byte + if m != 0 { + // some data left from previous update + buf = &xxh.buf + c := copy(buf[m:], input) + n -= c + input = input[c:] + } + update(&xxh.v, buf, input) + xxh.bufused = copy(xxh.buf[:], input[n-n%16:]) + + return n, nil +} + +// Portable version of update. This updates v by processing all of buf +// (if not nil) and all full 16-byte blocks of input. +func updateGo(v *[4]uint32, buf *[16]byte, input []byte) { + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := v[0], v[1], v[2], v[3] + + if buf != nil { + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + } + + for ; len(input) >= 16; input = input[16:] { + sub := input[:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + v[0], v[1], v[2], v[3] = v1, v2, v3, v4 +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3]) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Portable version of ChecksumZero. +func checksumZeroGo(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go new file mode 100644 index 000000000..0978b2665 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go @@ -0,0 +1,11 @@ +// +build !noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +// +//go:noescape +func ChecksumZero(input []byte) uint32 + +//go:noescape +func update(v *[4]uint32, buf *[16]byte, input []byte) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s new file mode 100644 index 000000000..c18ffd574 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s @@ -0,0 +1,251 @@ +// +build !noasm + +#include "go_asm.h" +#include "textflag.h" + +// Register allocation. +#define p R0 +#define n R1 +#define h R2 +#define v1 R2 // Alias for h. +#define v2 R3 +#define v3 R4 +#define v4 R5 +#define x1 R6 +#define x2 R7 +#define x3 R8 +#define x4 R9 + +// We need the primes in registers. The 16-byte loop only uses prime{1,2}. +#define prime1r R11 +#define prime2r R12 +#define prime3r R3 // The rest can alias v{2-4}. +#define prime4r R4 +#define prime5r R5 + +// Update round macros. These read from and increment p. + +#define round16aligned \ + MOVM.IA.W (p), [x1, x2, x3, x4] \ + \ + MULA x1, prime2r, v1, v1 \ + MULA x2, prime2r, v2, v2 \ + MULA x3, prime2r, v3, v3 \ + MULA x4, prime2r, v4, v4 \ + \ + MOVW v1 @> 19, v1 \ + MOVW v2 @> 19, v2 \ + MOVW v3 @> 19, v3 \ + MOVW v4 @> 19, v4 \ + \ + MUL prime1r, v1 \ + MUL prime1r, v2 \ + MUL prime1r, v3 \ + MUL prime1r, v4 \ + +#define round16unaligned \ + MOVBU.P 16(p), x1 \ + MOVBU -15(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -14(p), x3 \ + MOVBU -13(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v1, v1 \ + MOVW v1 @> 19, v1 \ + MUL prime1r, v1 \ + \ + MOVBU -12(p), x1 \ + MOVBU -11(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -10(p), x3 \ + MOVBU -9(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v2, v2 \ + MOVW v2 @> 19, v2 \ + MUL prime1r, v2 \ + \ + MOVBU -8(p), x1 \ + MOVBU -7(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -6(p), x3 \ + MOVBU -5(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v3, v3 \ + MOVW v3 @> 19, v3 \ + MUL prime1r, v3 \ + \ + MOVBU -4(p), x1 \ + MOVBU -3(p), x2 \ + ORR x2 << 8, x1 \ + MOVBU -2(p), x3 \ + MOVBU -1(p), x4 \ + ORR x4 << 8, x3 \ + ORR x3 << 16, x1 \ + \ + MULA x1, prime2r, v4, v4 \ + MOVW v4 @> 19, v4 \ + MUL prime1r, v4 \ + + +// func ChecksumZero([]byte) uint32 +TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16 + MOVW input_base+0(FP), p + MOVW input_len+4(FP), n + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Set up h for n < 16. It's tempting to say {ADD prime5, n, h} + // here, but that's a pseudo-op that generates a load through R11. + MOVW $const_prime5, prime5r + ADD prime5r, n, h + CMP $0, n + BEQ end + + // We let n go negative so we can do comparisons with SUB.S + // instead of separate CMP. + SUB.S $16, n + BMI loop16done + + ADD prime1r, prime2r, v1 + MOVW prime2r, v2 + MOVW $0, v3 + RSB $0, prime1r, v4 + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B loop16finish + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +loop16finish: + MOVW v1 @> 31, h + ADD v2 @> 25, h + ADD v3 @> 20, h + ADD v4 @> 14, h + + // h += len(input) with v2 as temporary. + MOVW input_len+4(FP), v2 + ADD v2, h + +loop16done: + ADD $16, n // Restore number of bytes left. + + SUB.S $4, n + MOVW $const_prime3, prime3r + BMI loop4done + MOVW $const_prime4, prime4r + + TST $3, p + BNE loop4unaligned + +loop4aligned: + SUB.S $4, n + + MOVW.P 4(p), x1 + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4aligned + B loop4done + +loop4unaligned: + SUB.S $4, n + + MOVBU.P 4(p), x1 + MOVBU -3(p), x2 + ORR x2 << 8, x1 + MOVBU -2(p), x3 + ORR x3 << 16, x1 + MOVBU -1(p), x4 + ORR x4 << 24, x1 + + MULA prime3r, x1, h, h + MOVW h @> 15, h + MUL prime4r, h + + BPL loop4unaligned + +loop4done: + ADD.S $4, n // Restore number of bytes left. + BEQ end + + MOVW $const_prime5, prime5r + +loop1: + SUB.S $1, n + + MOVBU.P 1(p), x1 + MULA prime5r, x1, h, h + MOVW h @> 21, h + MUL prime1r, h + + BNE loop1 + +end: + MOVW $const_prime3, prime3r + EOR h >> 15, h + MUL prime2r, h + EOR h >> 13, h + MUL prime3r, h + EOR h >> 16, h + + MOVW h, ret+12(FP) + RET + + +// func update(v *[4]uint64, buf *[16]byte, p []byte) +TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20 + MOVW v+0(FP), p + MOVM.IA (p), [v1, v2, v3, v4] + + MOVW $const_prime1, prime1r + MOVW $const_prime2, prime2r + + // Process buf, if not nil. + MOVW buf+4(FP), p + CMP $0, p + BEQ noBuffered + + round16aligned + +noBuffered: + MOVW input_base +8(FP), p + MOVW input_len +12(FP), n + + SUB.S $16, n + BMI end + + TST $3, p + BNE loop16unaligned + +loop16aligned: + SUB.S $16, n + round16aligned + BPL loop16aligned + B end + +loop16unaligned: + SUB.S $16, n + round16unaligned + BPL loop16unaligned + +end: + MOVW v+0(FP), p + MOVM.IA [v1, v2, v3, v4], (p) + RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go new file mode 100644 index 000000000..c96b59b8c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go @@ -0,0 +1,10 @@ +// +build !arm noasm + +package xxh32 + +// ChecksumZero returns the 32-bit hash of input. +func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) } + +func update(v *[4]uint32, buf *[16]byte, input []byte) { + updateGo(v, buf, input) +} diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go new file mode 100644 index 000000000..a62022e08 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/lz4.go @@ -0,0 +1,157 @@ +// Package lz4 implements reading and writing lz4 compressed data. +// +// The package supports both the LZ4 stream format, +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// and the LZ4 block format, defined at +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html. +// +// See https://github.com/lz4/lz4 for the reference C implementation. +package lz4 + +import ( + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +func _() { + // Safety checks for duplicated elements. + var x [1]struct{} + _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast] + _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)] + _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)] + _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)] + _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)] +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return lz4block.CompressBlockBound(n) +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, nil) +} + +// UncompressBlockWithDict uncompresses the source buffer into the destination one using a +// dictionary, and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlockWithDict(src, dst, dict []byte) (int, error) { + return lz4block.UncompressBlock(src, dst, dict) +} + +// A Compressor compresses data into the LZ4 block format. +// It uses a fast compression algorithm. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type Compressor struct{ c lz4block.Compressor } + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst) +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. + +// CompressBlock is equivalent to Compressor.CompressBlock. +// The final argument is ignored and should be set to nil. +// +// This function is deprecated. Use a Compressor instead. +func CompressBlock(src, dst []byte, _ []int) (int, error) { + return lz4block.CompressBlock(src, dst) +} + +// A CompressorHC compresses data into the LZ4 block format. +// Its compression ratio is potentially better than that of a Compressor, +// but it is also slower and requires more memory. +// +// A Compressor is not safe for concurrent use by multiple goroutines. +// +// Use a Writer to compress into the LZ4 stream format. +type CompressorHC struct { + // Level is the maximum search depth for compression. + // Values <= 0 mean no maximum. + Level CompressionLevel + c lz4block.CompressorHC +} + +// CompressBlock compresses the source buffer src into the destination dst. +// +// If compression is successful, the first return value is the size of the +// compressed data, which is always >0. +// +// If dst has length at least CompressBlockBound(len(src)), compression always +// succeeds. Otherwise, the first return value is zero. The error return is +// non-nil if the compressed data does not fit in dst, but it might fit in a +// larger buffer that is still smaller than CompressBlockBound(len(src)). The +// return value (0, nil) means the data is likely incompressible and a buffer +// of length CompressBlockBound(len(src)) should be passed in. +func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) { + return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level)) +} + +// CompressBlockHC is equivalent to CompressorHC.CompressBlock. +// The final two arguments are ignored and should be set to nil. +// +// This function is deprecated. Use a CompressorHC instead. +func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) { + return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth)) +} + +const ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer + // ErrInvalidFrame is returned when reading an invalid LZ4 archive. + ErrInvalidFrame = lz4errors.ErrInvalidFrame + // ErrInternalUnhandledState is an internal error. + ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState + // ErrInvalidHeaderChecksum is returned when reading a frame. + ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum + // ErrInvalidBlockChecksum is returned when reading a frame. + ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum + // ErrInvalidFrameChecksum is returned when reading a frame. + ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum + // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid. + ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel + // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object. + ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError + // ErrOptionInvalidBlockSize is returned when + ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize + // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it. + ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable + // ErrWriterNotClosed is returned when attempting to reset an unclosed writer. + ErrWriterNotClosed = lz4errors.ErrWriterNotClosed +) diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go new file mode 100644 index 000000000..46a873803 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -0,0 +1,214 @@ +package lz4 + +import ( + "fmt" + "reflect" + "runtime" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go + +type ( + applier interface { + Apply(...Option) error + private() + } + // Option defines the parameters to setup an LZ4 Writer or Reader. + Option func(applier) error +) + +// String returns a string representation of the option with its parameter(s). +func (o Option) String() string { + return o(nil).Error() +} + +// Default options. +var ( + DefaultBlockSizeOption = BlockSizeOption(Block4Mb) + DefaultChecksumOption = ChecksumOption(true) + DefaultConcurrency = ConcurrencyOption(1) + defaultOnBlockDone = OnBlockDoneOption(nil) +) + +const ( + Block64Kb BlockSize = 1 << (16 + iota*2) + Block256Kb + Block1Mb + Block4Mb +) + +// BlockSizeIndex defines the size of the blocks to be compressed. +type BlockSize uint32 + +// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb). +func BlockSizeOption(size BlockSize) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockSizeOption(%s)", size) + return lz4errors.Error(s) + case *Writer: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// BlockChecksumOption enables or disables block checksum (default=false). +func BlockChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("BlockChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ChecksumOption enables/disables all blocks or content checksum (default=true). +func ChecksumOption(flag bool) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("ChecksumOption(%v)", flag) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the +// whole uncompressed data stream. +func SizeOption(size uint64) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("SizeOption(%d)", size) + return lz4errors.Error(s) + case *Writer: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// ConcurrencyOption sets the number of go routines used for compression. +// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used. +func ConcurrencyOption(n int) Option { + if n <= 0 { + n = runtime.GOMAXPROCS(0) + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("ConcurrencyOption(%d)", n) + return lz4errors.Error(s) + case *Writer: + rw.num = n + return nil + case *Reader: + rw.num = n + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression. +type CompressionLevel uint32 + +const ( + Fast CompressionLevel = 0 + Level1 CompressionLevel = 1 << (8 + iota) + Level2 + Level3 + Level4 + Level5 + Level6 + Level7 + Level8 + Level9 +) + +// CompressionLevelOption defines the compression level (default=Fast). +func CompressionLevelOption(level CompressionLevel) Option { + return func(a applier) error { + switch w := a.(type) { + case nil: + s := fmt.Sprintf("CompressionLevelOption(%s)", level) + return lz4errors.Error(s) + case *Writer: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +func onBlockDone(int) {} + +// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed, +// for a Reader, it is when it has been uncompressed. +func OnBlockDoneOption(handler func(size int)) Option { + if handler == nil { + handler = onBlockDone + } + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String()) + return lz4errors.Error(s) + case *Writer: + rw.handler = handler + return nil + case *Reader: + rw.handler = handler + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} + +// LegacyOption provides support for writing LZ4 frames in the legacy format. +// +// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame. +// +// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where +// the compressed stream is followed by the original (uncompressed) size of +// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf). +// This is also supported as a special case. +func LegacyOption(legacy bool) Option { + return func(a applier) error { + switch rw := a.(type) { + case nil: + s := fmt.Sprintf("LegacyOption(%v)", legacy) + return lz4errors.Error(s) + case *Writer: + rw.legacy = legacy + return nil + } + return lz4errors.ErrOptionNotApplicable + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go new file mode 100644 index 000000000..2de814909 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go @@ -0,0 +1,92 @@ +// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Block64Kb-65536] + _ = x[Block256Kb-262144] + _ = x[Block1Mb-1048576] + _ = x[Block4Mb-4194304] +} + +const ( + _BlockSize_name_0 = "Block64Kb" + _BlockSize_name_1 = "Block256Kb" + _BlockSize_name_2 = "Block1Mb" + _BlockSize_name_3 = "Block4Mb" +) + +func (i BlockSize) String() string { + switch { + case i == 65536: + return _BlockSize_name_0 + case i == 262144: + return _BlockSize_name_1 + case i == 1048576: + return _BlockSize_name_2 + case i == 4194304: + return _BlockSize_name_3 + default: + return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Fast-0] + _ = x[Level1-512] + _ = x[Level2-1024] + _ = x[Level3-2048] + _ = x[Level4-4096] + _ = x[Level5-8192] + _ = x[Level6-16384] + _ = x[Level7-32768] + _ = x[Level8-65536] + _ = x[Level9-131072] +} + +const ( + _CompressionLevel_name_0 = "Fast" + _CompressionLevel_name_1 = "Level1" + _CompressionLevel_name_2 = "Level2" + _CompressionLevel_name_3 = "Level3" + _CompressionLevel_name_4 = "Level4" + _CompressionLevel_name_5 = "Level5" + _CompressionLevel_name_6 = "Level6" + _CompressionLevel_name_7 = "Level7" + _CompressionLevel_name_8 = "Level8" + _CompressionLevel_name_9 = "Level9" +) + +func (i CompressionLevel) String() string { + switch { + case i == 0: + return _CompressionLevel_name_0 + case i == 512: + return _CompressionLevel_name_1 + case i == 1024: + return _CompressionLevel_name_2 + case i == 2048: + return _CompressionLevel_name_3 + case i == 4096: + return _CompressionLevel_name_4 + case i == 8192: + return _CompressionLevel_name_5 + case i == 16384: + return _CompressionLevel_name_6 + case i == 32768: + return _CompressionLevel_name_7 + case i == 65536: + return _CompressionLevel_name_8 + case i == 131072: + return _CompressionLevel_name_9 + default: + return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go new file mode 100644 index 000000000..275daad7c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/reader.go @@ -0,0 +1,275 @@ +package lz4 + +import ( + "bytes" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var readerStates = []aState{ + noState: newState, + errorState: newState, + newState: readState, + readState: closedState, + closedState: newState, +} + +// NewReader returns a new LZ4 frame decoder. +func NewReader(r io.Reader) *Reader { + return newReader(r, false) +} + +func newReader(r io.Reader, legacy bool) *Reader { + zr := &Reader{frame: lz4stream.NewFrame()} + zr.state.init(readerStates) + _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone) + zr.Reset(r) + return zr +} + +// Reader allows reading an LZ4 stream. +type Reader struct { + state _State + src io.Reader // source reader + num int // concurrency level + frame *lz4stream.Frame // frame being read + data []byte // block buffer allocated in non concurrent mode + reads chan []byte // pending data + idx int // size of pending data + handler func(int) + cum uint32 + dict []byte +} + +func (*Reader) private() {} + +func (r *Reader) Apply(options ...Option) (err error) { + defer r.state.check(&err) + switch r.state.state { + case newState: + case errorState: + return r.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + for _, o := range options { + if err = o(r); err != nil { + return + } + } + return +} + +// Size returns the size of the underlying uncompressed data, if set in the stream. +func (r *Reader) Size() int { + switch r.state.state { + case readState, closedState: + if r.frame.Descriptor.Flags.Size() { + return int(r.frame.Descriptor.ContentSize) + } + } + return 0 +} + +func (r *Reader) isNotConcurrent() bool { + return r.num == 1 +} + +func (r *Reader) init() error { + err := r.frame.ParseHeaders(r.src) + if err != nil { + return err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + // We can't decompress dependent blocks concurrently. + // Instead of throwing an error to the user, silently drop concurrency + r.num = 1 + } + data, err := r.frame.InitR(r.src, r.num) + if err != nil { + return err + } + r.reads = data + r.idx = 0 + size := r.frame.Descriptor.Flags.BlockSizeIndex() + r.data = size.Get() + r.cum = 0 + return nil +} + +func (r *Reader) Read(buf []byte) (n int, err error) { + defer r.state.check(&err) + switch r.state.state { + case readState: + case closedState, errorState: + return 0, r.state.err + case newState: + // First initialization. + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + for len(buf) > 0 { + var bn int + if r.idx == 0 { + if r.isNotConcurrent() { + bn, err = r.read(buf) + } else { + lz4block.Put(r.data) + r.data = <-r.reads + if len(r.data) == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + if er := r.frame.CloseR(r.src); er != nil { + err = er + } + lz4block.Put(r.data) + r.data = nil + return + default: + return + } + } + if bn == 0 { + // Fill buf with buffered data. + bn = copy(buf, r.data[r.idx:]) + r.idx += bn + if r.idx == len(r.data) { + // All data read, get ready for the next Read. + r.idx = 0 + } + } + buf = buf[bn:] + n += bn + r.handler(bn) + } + return +} + +// read uncompresses the next block as follow: +// - if buf has enough room, the block is uncompressed into it directly +// and the lenght of used space is returned +// - else, the uncompress data is stored in r.data and 0 is returned +func (r *Reader) read(buf []byte) (int, error) { + block := r.frame.Blocks.Block + _, err := block.Read(r.frame, r.src, r.cum) + if err != nil { + return 0, err + } + var direct bool + dst := r.data[:cap(r.data)] + if len(buf) >= len(dst) { + // Uncompress directly into buf. + direct = true + dst = buf + } + dst, err = block.Uncompress(r.frame, dst, r.dict, true) + if err != nil { + return 0, err + } + if !r.frame.Descriptor.Flags.BlockIndependence() { + if len(r.dict)+len(dst) > 128*1024 { + preserveSize := 64*1024 - len(dst) + if preserveSize < 0 { + preserveSize = 0 + } + r.dict = r.dict[len(r.dict)-preserveSize:] + } + r.dict = append(r.dict, dst...) + } + r.cum += uint32(len(dst)) + if direct { + return len(dst), nil + } + r.data = dst + return 0, nil +} + +// Reset clears the state of the Reader r such that it is equivalent to its +// initial state from NewReader, but instead reading from reader. +// No access to reader is performed. +func (r *Reader) Reset(reader io.Reader) { + if r.data != nil { + lz4block.Put(r.data) + r.data = nil + } + r.frame.Reset(r.num) + r.state.reset() + r.src = reader + r.reads = nil +} + +// WriteTo efficiently uncompresses the data from the Reader underlying source to w. +func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { + switch r.state.state { + case closedState, errorState: + return 0, r.state.err + case newState: + if err = r.init(); r.state.next(err) { + return + } + default: + return 0, r.state.fail() + } + defer r.state.nextd(&err) + + var data []byte + if r.isNotConcurrent() { + size := r.frame.Descriptor.Flags.BlockSizeIndex() + data = size.Get() + defer lz4block.Put(data) + } + for { + var bn int + var dst []byte + if r.isNotConcurrent() { + bn, err = r.read(data) + dst = data[:bn] + } else { + lz4block.Put(dst) + dst = <-r.reads + bn = len(dst) + if bn == 0 { + // No uncompressed data: something went wrong or we are done. + err = r.frame.Blocks.ErrorR() + } + } + switch err { + case nil: + case io.EOF: + err = r.frame.CloseR(r.src) + return + default: + return + } + r.handler(bn) + bn, err = w.Write(dst) + n += int64(bn) + if err != nil { + return + } + } +} + +// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header. +func ValidFrameHeader(in []byte) (bool, error) { + f := lz4stream.NewFrame() + err := f.ParseHeaders(bytes.NewReader(in)) + if err == nil { + return true, nil + } + if err == lz4errors.ErrInvalidFrame { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go new file mode 100644 index 000000000..d94f04d05 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state.go @@ -0,0 +1,75 @@ +package lz4 + +import ( + "errors" + "fmt" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4errors" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go + +const ( + noState aState = iota // uninitialized reader + errorState // unrecoverable error encountered + newState // instantiated object + readState // reading data + writeState // writing data + closedState // all done +) + +type ( + aState uint8 + _State struct { + states []aState + state aState + err error + } +) + +func (s *_State) init(states []aState) { + s.states = states + s.state = states[0] +} + +func (s *_State) reset() { + s.state = s.states[0] + s.err = nil +} + +// next sets the state to the next one unless it is passed a non nil error. +// It returns whether or not it is in error. +func (s *_State) next(err error) bool { + if err != nil { + s.err = fmt.Errorf("%s: %w", s.state, err) + s.state = errorState + return true + } + s.state = s.states[s.state] + return false +} + +// nextd is like next but for defers. +func (s *_State) nextd(errp *error) bool { + return errp != nil && s.next(*errp) +} + +// check sets s in error if not already in error and if the error is not nil or io.EOF, +func (s *_State) check(errp *error) { + if s.state == errorState || errp == nil { + return + } + if err := *errp; err != nil { + s.err = fmt.Errorf("%w[%s]", err, s.state) + if !errors.Is(err, io.EOF) { + s.state = errorState + } + } +} + +func (s *_State) fail() error { + s.state = errorState + s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state) + return s.err +} diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go new file mode 100644 index 000000000..75fb82892 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT. + +package lz4 + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[noState-0] + _ = x[errorState-1] + _ = x[newState-2] + _ = x[readState-3] + _ = x[writeState-4] + _ = x[closedState-5] +} + +const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState" + +var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55} + +func (i aState) String() string { + if i >= aState(len(_aState_index)-1) { + return "aState(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _aState_name[_aState_index[i]:_aState_index[i+1]] +} diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go new file mode 100644 index 000000000..77699f2b5 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -0,0 +1,238 @@ +package lz4 + +import ( + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +var writerStates = []aState{ + noState: newState, + newState: writeState, + writeState: closedState, + closedState: newState, + errorState: newState, +} + +// NewWriter returns a new LZ4 frame encoder. +func NewWriter(w io.Writer) *Writer { + zw := &Writer{frame: lz4stream.NewFrame()} + zw.state.init(writerStates) + _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone) + zw.Reset(w) + return zw +} + +// Writer allows writing an LZ4 stream. +type Writer struct { + state _State + src io.Writer // destination writer + level lz4block.CompressionLevel // how hard to try + num int // concurrency level + frame *lz4stream.Frame // frame being built + data []byte // pending data + idx int // size of pending data + handler func(int) + legacy bool +} + +func (*Writer) private() {} + +func (w *Writer) Apply(options ...Option) (err error) { + defer w.state.check(&err) + switch w.state.state { + case newState: + case errorState: + return w.state.err + default: + return lz4errors.ErrOptionClosedOrError + } + w.Reset(w.src) + for _, o := range options { + if err = o(w); err != nil { + return + } + } + return +} + +func (w *Writer) isNotConcurrent() bool { + return w.num == 1 +} + +// init sets up the Writer when in newState. It does not change the Writer state. +func (w *Writer) init() error { + w.frame.InitW(w.src, w.num, w.legacy) + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + w.idx = 0 + return w.frame.Descriptor.Write(w.frame, w.src) +} + +func (w *Writer) Write(buf []byte) (n int, err error) { + defer w.state.check(&err) + switch w.state.state { + case writeState: + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + + zn := len(w.data) + for len(buf) > 0 { + if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err = w.write(buf[:zn], false); err != nil { + return + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(w.data[w.idx:], buf) + n += m + w.idx += m + buf = buf[m:] + + if w.idx < len(w.data) { + // Buffer not filled. + return + } + + // Buffer full. + if err = w.write(w.data, true); err != nil { + return + } + if !w.isNotConcurrent() { + size := w.frame.Descriptor.Flags.BlockSizeIndex() + w.data = size.Get() + } + w.idx = 0 + } + return +} + +func (w *Writer) write(data []byte, safe bool) error { + if w.isNotConcurrent() { + block := w.frame.Blocks.Block + err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src) + w.handler(len(block.Data)) + return err + } + c := make(chan *lz4stream.FrameDataBlock) + w.frame.Blocks.Blocks <- c + go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) { + b := lz4stream.NewFrameDataBlock(w.frame) + c <- b.Compress(w.frame, data, w.level) + <-c + w.handler(len(b.Data)) + b.Close(w.frame) + if safe { + // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed + lz4block.Put(data) + } + }(c, data, safe) + + return nil +} + +// Flush any buffered data to the underlying writer immediately. +func (w *Writer) Flush() (err error) { + switch w.state.state { + case writeState: + case errorState: + return w.state.err + default: + return nil + } + + if w.idx > 0 { + // Flush pending data, disable w.data freeing as it is done later on. + if err = w.write(w.data[:w.idx], false); err != nil { + return err + } + w.idx = 0 + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying writer +// without closing it. +func (w *Writer) Close() error { + if err := w.Flush(); err != nil { + return err + } + err := w.frame.CloseW(w.src, w.num) + // It is now safe to free the buffer. + if w.data != nil { + lz4block.Put(w.data) + w.data = nil + } + return err +} + +// Reset clears the state of the Writer w such that it is equivalent to its +// initial state from NewWriter, but instead writing to writer. +// Reset keeps the previous options unless overwritten by the supplied ones. +// No access to writer is performed. +// +// w.Close must be called before Reset or pending data may be dropped. +func (w *Writer) Reset(writer io.Writer) { + w.frame.Reset(w.num) + w.state.reset() + w.src = writer +} + +// ReadFrom efficiently reads from r and compressed into the Writer destination. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + switch w.state.state { + case closedState, errorState: + return 0, w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } + default: + return 0, w.state.fail() + } + defer w.state.check(&err) + + size := w.frame.Descriptor.Flags.BlockSizeIndex() + var done bool + var rn int + data := size.Get() + if w.isNotConcurrent() { + // Keep the same buffer for the whole process. + defer lz4block.Put(data) + } + for !done { + rn, err = io.ReadFull(r, data) + switch err { + case nil: + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + done = true + default: + return + } + n += int64(rn) + err = w.write(data[:rn], true) + if err != nil { + return + } + w.handler(rn) + if !done && !w.isNotConcurrent() { + // The buffer will be returned automatically by go routines (safe=true) + // so get a new one fo the next round. + data = size.Get() + } + } + return +} diff --git a/vendor/github.com/pion/dtls/v2/.editorconfig b/vendor/github.com/pion/dtls/v2/.editorconfig new file mode 100644 index 000000000..d2b32061a --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/.editorconfig @@ -0,0 +1,21 @@ +# http://editorconfig.org/ + +root = true + +[*] +charset = utf-8 +insert_final_newline = true +trim_trailing_whitespace = true +end_of_line = lf + +[*.go] +indent_style = tab +indent_size = 4 + +[{*.yml,*.yaml}] +indent_style = space +indent_size = 2 + +# Makefiles always use tabs for indentation +[Makefile] +indent_style = tab diff --git a/vendor/github.com/pion/dtls/v2/.gitignore b/vendor/github.com/pion/dtls/v2/.gitignore new file mode 100644 index 000000000..f977e7485 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/.gitignore @@ -0,0 +1,25 @@ +### JetBrains IDE ### +##################### +.idea/ + +### Emacs Temporary Files ### +############################# +*~ + +### Folders ### +############### +bin/ +vendor/ +node_modules/ + +### Files ### +############# +*.ivf +*.ogg +tags +cover.out +*.sw[poe] +*.wasm +examples/sfu-ws/cert.pem +examples/sfu-ws/key.pem +wasm_exec.js diff --git a/vendor/github.com/pion/dtls/v2/.golangci.yml b/vendor/github.com/pion/dtls/v2/.golangci.yml new file mode 100644 index 000000000..48696f16b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/.golangci.yml @@ -0,0 +1,116 @@ +linters-settings: + govet: + check-shadowing: true + misspell: + locale: US + exhaustive: + default-signifies-exhaustive: true + gomodguard: + blocked: + modules: + - github.com/pkg/errors: + recommendations: + - errors + +linters: + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bidichk # Checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - contextcheck # check the function whether use a non-inherited context + - decorder # check declaration order and count of types, constants, variables and functions + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + - dupl # Tool for code clone detection + - durationcheck # check for two durations multiplied together + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. + - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - forcetypeassert # finds forced type assertions + - gci # Gci control golang package import order and make it always deterministic. + - gochecknoglobals # Checks that no globals are present in Go code + - gochecknoinits # Checks that no init functions are present in Go code + - gocognit # Computes and checks the cognitive complexity of functions + - goconst # Finds repeated strings that could be replaced by a constant + - gocritic # The most opinionated Go source code linter + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - gofumpt # Gofumpt checks whether code was gofumpt-ed. + - goheader # Checks is file header matches to pattern + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosec # Inspects source code for security problems + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - grouper # An analyzer to analyze expression groups. + - importas # Enforces consistent import aliases + - ineffassign # Detects when assignments to existing variables are not used + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. + - noctx # noctx finds sending http request without context.Context + - predeclared # find code that shadows one of Go's predeclared identifiers + - revive # golint replacement, finds style mistakes + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - stylecheck # Stylecheck is a replacement for golint + - tagliatelle # Checks the struct tags. + - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unparam # Reports unused function parameters + - unused # Checks Go code for unused constants, variables, functions and types + - wastedassign # wastedassign finds wasted assignment statements + - whitespace # Tool for detection of leading and trailing whitespace + disable: + - containedctx # containedctx is a linter that detects struct contained context.Context field + - cyclop # checks function and package cyclomatic complexity + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - funlen # Tool for detection of long functions + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gomnd # An analyzer to detect magic numbers. + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - ireturn # Accept Interfaces, Return Concrete Types + - lll # Reports long lines + - maintidx # maintidx measures the maintainability index of each function. + - makezero # Finds slice declarations with non-zero initial length + - maligned # Tool to detect Go structs that would take less memory if their fields were sorted + - nestif # Reports deeply nested if statements + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - nolintlint # Reports ill-formed or insufficient nolint directives + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - prealloc # Finds slice declarations that could potentially be preallocated + - promlinter # Check Prometheus metrics naming via promlint + - rowserrcheck # checks whether Err of rows is checked successfully + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + - testpackage # linter that makes you use a separate _test package + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - varnamelen # checks that the length of a variable's name matches its scope + - wrapcheck # Checks that errors returned from external packages are wrapped + - wsl # Whitespace Linter - Forces you to use empty lines! + +issues: + exclude-use-default: false + exclude-rules: + # Allow complex tests, better to be self contained + - path: _test\.go + linters: + - gocognit + + # Allow complex main function in examples + - path: examples + text: "of func `main` is high" + linters: + - gocognit + +run: + skip-dirs-use-default: false diff --git a/vendor/github.com/pion/dtls/v2/.goreleaser.yml b/vendor/github.com/pion/dtls/v2/.goreleaser.yml new file mode 100644 index 000000000..2caa5fbd3 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/.goreleaser.yml @@ -0,0 +1,2 @@ +builds: +- skip: true diff --git a/vendor/github.com/pion/dtls/v2/AUTHORS.txt b/vendor/github.com/pion/dtls/v2/AUTHORS.txt new file mode 100644 index 000000000..2f5448e7f --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/AUTHORS.txt @@ -0,0 +1,55 @@ +# Thank you to everyone that made Pion possible. If you are interested in contributing +# we would love to have you https://github.com/pion/webrtc/wiki/Contributing +# +# This file is auto generated, using git to list all individuals contributors. +# see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting +Aleksandr Razumov +alvarowolfx +Arlo Breault +Atsushi Watanabe +backkem +bjdgyc +boks1971 +Bragadeesh +Carson Hoffman +Cecylia Bocovich +Chris Hiszpanski +Daniele Sluijters +folbrich +Hayden James +Hugo Arregui +Hugo Arregui +igolaizola <11333576+igolaizola@users.noreply.github.com> +Jeffrey Stoke +Jeroen de Bruijn +Jeroen de Bruijn +Jim Wert +jinleileiking +Jozef Kralik +Julien Salleyron +Juliusz Chroboczek +Kegan Dougal +Kevin Wang +Lander Noterman +Len +Lukas Lihotzki +ManuelBk <26275612+ManuelBk@users.noreply.github.com> +Michael Zabka +Michiel De Backker +Rachel Chen +Robert Eperjesi +Ryan Gordon +Sam Lancia +Sean DuBois +Sean DuBois +Shelikhoo +Stefan Tatschner +Steffen Vogel +Vadim +Vadim Filimonov +wmiao +ZHENK +吕海涛 + +# List of contributors not appearing in Git history + diff --git a/vendor/github.com/pion/dtls/v2/LICENSE b/vendor/github.com/pion/dtls/v2/LICENSE new file mode 100644 index 000000000..ab602974d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pion/dtls/v2/README.md b/vendor/github.com/pion/dtls/v2/README.md new file mode 100644 index 000000000..01631a5fc --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/README.md @@ -0,0 +1,139 @@ +

+
+ Pion DTLS +
+

+

A Go implementation of DTLS

+

+ Pion DTLS + Sourcegraph Widget + Slack Widget +
+ Build Status + GoDoc + Coverage Status + Go Report Card + License: MIT +

+
+ +Native [DTLS 1.2][rfc6347] implementation in the Go programming language. + +A long term goal is a professional security review, and maybe an inclusion in stdlib. + +[rfc6347]: https://tools.ietf.org/html/rfc6347 + +### Goals/Progress +This will only be targeting DTLS 1.2, and the most modern/common cipher suites. +We would love contributions that fall under the 'Planned Features' and any bug fixes! + +#### Current features +* DTLS 1.2 Client/Server +* Key Exchange via ECDHE(curve25519, nistp256, nistp384) and PSK +* Packet loss and re-ordering is handled during handshaking +* Key export ([RFC 5705][rfc5705]) +* Serialization and Resumption of sessions +* Extended Master Secret extension ([RFC 7627][rfc7627]) +* ALPN extension ([RFC 7301][rfc7301]) + +[rfc5705]: https://tools.ietf.org/html/rfc5705 +[rfc7627]: https://tools.ietf.org/html/rfc7627 +[rfc7301]: https://tools.ietf.org/html/rfc7301 + +#### Supported ciphers + +##### ECDHE + +* TLS_ECDHE_ECDSA_WITH_AES_128_CCM ([RFC 6655][rfc6655]) +* TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655]) +* TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289]) +* TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ([RFC 5289][rfc5289]) +* TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289]) +* TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ([RFC 5289][rfc5289]) +* TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422]) +* TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ([RFC 8422][rfc8422]) + +##### PSK + +* TLS_PSK_WITH_AES_128_CCM ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_128_CCM_8 ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_256_CCM_8 ([RFC 6655][rfc6655]) +* TLS_PSK_WITH_AES_128_GCM_SHA256 ([RFC 5487][rfc5487]) +* TLS_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5487][rfc5487]) + +##### ECDHE & PSK + +* TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 ([RFC 5489][rfc5489]) + +[rfc5289]: https://tools.ietf.org/html/rfc5289 +[rfc8422]: https://tools.ietf.org/html/rfc8422 +[rfc6655]: https://tools.ietf.org/html/rfc6655 +[rfc5487]: https://tools.ietf.org/html/rfc5487 +[rfc5489]: https://tools.ietf.org/html/rfc5489 + +#### Planned Features +* Chacha20Poly1305 + +#### Excluded Features +* DTLS 1.0 +* Renegotiation +* Compression + +### Using + +This library needs at least Go 1.13, and you should have [Go modules +enabled](https://github.com/golang/go/wiki/Modules). + +#### Pion DTLS +For a DTLS 1.2 Server that listens on 127.0.0.1:4444 +```sh +go run examples/listen/selfsign/main.go +``` + +For a DTLS 1.2 Client that connects to 127.0.0.1:4444 +```sh +go run examples/dial/selfsign/main.go +``` + +#### OpenSSL +Pion DTLS can connect to itself and OpenSSL. +``` + // Generate a certificate + openssl ecparam -out key.pem -name prime256v1 -genkey + openssl req -new -sha256 -key key.pem -out server.csr + openssl x509 -req -sha256 -days 365 -in server.csr -signkey key.pem -out cert.pem + + // Use with examples/dial/selfsign/main.go + openssl s_server -dtls1_2 -cert cert.pem -key key.pem -accept 4444 + + // Use with examples/listen/selfsign/main.go + openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -debug -cert cert.pem -key key.pem +``` + +### Using with PSK +Pion DTLS also comes with examples that do key exchange via PSK + + +#### Pion DTLS +```sh +go run examples/listen/psk/main.go +``` + +```sh +go run examples/dial/psk/main.go +``` + +#### OpenSSL +``` + // Use with examples/dial/psk/main.go + openssl s_server -dtls1_2 -accept 4444 -nocert -psk abc123 -cipher PSK-AES128-CCM8 + + // Use with examples/listen/psk/main.go + openssl s_client -dtls1_2 -connect 127.0.0.1:4444 -psk abc123 -cipher PSK-AES128-CCM8 +``` + +### Contributing +Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible: + +### License +MIT License - see [LICENSE](LICENSE) for full text diff --git a/vendor/github.com/pion/dtls/v2/certificate.go b/vendor/github.com/pion/dtls/v2/certificate.go new file mode 100644 index 000000000..7211ae092 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/certificate.go @@ -0,0 +1,154 @@ +package dtls + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "fmt" + "strings" +) + +// ClientHelloInfo contains information from a ClientHello message in order to +// guide application logic in the GetCertificate. +type ClientHelloInfo struct { + // ServerName indicates the name of the server requested by the client + // in order to support virtual hosting. ServerName is only set if the + // client is using SNI (see RFC 4366, Section 3.1). + ServerName string + + // CipherSuites lists the CipherSuites supported by the client (e.g. + // TLS_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256). + CipherSuites []CipherSuiteID +} + +// CertificateRequestInfo contains information from a server's +// CertificateRequest message, which is used to demand a certificate and proof +// of control from a client. +type CertificateRequestInfo struct { + // AcceptableCAs contains zero or more, DER-encoded, X.501 + // Distinguished Names. These are the names of root or intermediate CAs + // that the server wishes the returned certificate to be signed by. An + // empty slice indicates that the server has no preference. + AcceptableCAs [][]byte +} + +// SupportsCertificate returns nil if the provided certificate is supported by +// the server that sent the CertificateRequest. Otherwise, it returns an error +// describing the reason for the incompatibility. +// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/common.go#L1273 +func (cri *CertificateRequestInfo) SupportsCertificate(c *tls.Certificate) error { + if len(cri.AcceptableCAs) == 0 { + return nil + } + + for j, cert := range c.Certificate { + x509Cert := c.Leaf + // Parse the certificate if this isn't the leaf node, or if + // chain.Leaf was nil. + if j != 0 || x509Cert == nil { + var err error + if x509Cert, err = x509.ParseCertificate(cert); err != nil { + return fmt.Errorf("failed to parse certificate #%d in the chain: %w", j, err) + } + } + + for _, ca := range cri.AcceptableCAs { + if bytes.Equal(x509Cert.RawIssuer, ca) { + return nil + } + } + } + return errNotAcceptableCertificateChain +} + +func (c *handshakeConfig) setNameToCertificateLocked() { + nameToCertificate := make(map[string]*tls.Certificate) + for i := range c.localCertificates { + cert := &c.localCertificates[i] + x509Cert := cert.Leaf + if x509Cert == nil { + var parseErr error + x509Cert, parseErr = x509.ParseCertificate(cert.Certificate[0]) + if parseErr != nil { + continue + } + } + if len(x509Cert.Subject.CommonName) > 0 { + nameToCertificate[strings.ToLower(x509Cert.Subject.CommonName)] = cert + } + for _, san := range x509Cert.DNSNames { + nameToCertificate[strings.ToLower(san)] = cert + } + } + c.nameToCertificate = nameToCertificate +} + +func (c *handshakeConfig) getCertificate(clientHelloInfo *ClientHelloInfo) (*tls.Certificate, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.localGetCertificate != nil && + (len(c.localCertificates) == 0 || len(clientHelloInfo.ServerName) > 0) { + cert, err := c.localGetCertificate(clientHelloInfo) + if cert != nil || err != nil { + return cert, err + } + } + + if c.nameToCertificate == nil { + c.setNameToCertificateLocked() + } + + if len(c.localCertificates) == 0 { + return nil, errNoCertificates + } + + if len(c.localCertificates) == 1 { + // There's only one choice, so no point doing any work. + return &c.localCertificates[0], nil + } + + if len(clientHelloInfo.ServerName) == 0 { + return &c.localCertificates[0], nil + } + + name := strings.TrimRight(strings.ToLower(clientHelloInfo.ServerName), ".") + + if cert, ok := c.nameToCertificate[name]; ok { + return cert, nil + } + + // try replacing labels in the name with wildcards until we get a + // match. + labels := strings.Split(name, ".") + for i := range labels { + labels[i] = "*" + candidate := strings.Join(labels, ".") + if cert, ok := c.nameToCertificate[candidate]; ok { + return cert, nil + } + } + + // If nothing matches, return the first certificate. + return &c.localCertificates[0], nil +} + +// NOTE: original src: https://github.com/golang/go/blob/29b9a328d268d53833d2cc063d1d8b4bf6852675/src/crypto/tls/handshake_client.go#L974 +func (c *handshakeConfig) getClientCertificate(cri *CertificateRequestInfo) (*tls.Certificate, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.localGetClientCertificate != nil { + return c.localGetClientCertificate(cri) + } + + for i := range c.localCertificates { + chain := c.localCertificates[i] + if err := cri.SupportsCertificate(&chain); err != nil { + continue + } + return &chain, nil + } + + // No acceptable certificate found. Don't send a certificate. + return new(tls.Certificate), nil +} diff --git a/vendor/github.com/pion/dtls/v2/cipher_suite.go b/vendor/github.com/pion/dtls/v2/cipher_suite.go new file mode 100644 index 000000000..ddea8f6bb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/cipher_suite.go @@ -0,0 +1,273 @@ +package dtls + +import ( + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "fmt" + "hash" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// CipherSuiteID is an ID for our supported CipherSuites +type CipherSuiteID = ciphersuite.ID + +// Supported Cipher Suites +const ( + // AES-128-CCM + TLS_ECDHE_ECDSA_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM //nolint:revive,stylecheck + TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 //nolint:revive,stylecheck + + // AES-128-GCM-SHA256 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 //nolint:revive,stylecheck + + // AES-256-CBC-SHA + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA CipherSuiteID = ciphersuite.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA //nolint:revive,stylecheck + + TLS_PSK_WITH_AES_128_CCM CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CCM_8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_256_CCM_8 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_256_CCM_8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_GCM_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_GCM_SHA256 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck + + TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 CipherSuiteID = ciphersuite.TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 //nolint:revive,stylecheck +) + +// CipherSuiteAuthenticationType controls what authentication method is using during the handshake for a CipherSuite +type CipherSuiteAuthenticationType = ciphersuite.AuthenticationType + +// AuthenticationType Enums +const ( + CipherSuiteAuthenticationTypeCertificate CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeCertificate + CipherSuiteAuthenticationTypePreSharedKey CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypePreSharedKey + CipherSuiteAuthenticationTypeAnonymous CipherSuiteAuthenticationType = ciphersuite.AuthenticationTypeAnonymous +) + +// CipherSuiteKeyExchangeAlgorithm controls what exchange algorithm is using during the handshake for a CipherSuite +type CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithm + +// CipherSuiteKeyExchangeAlgorithm Bitmask +const ( + CipherSuiteKeyExchangeAlgorithmNone CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmNone + CipherSuiteKeyExchangeAlgorithmPsk CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmPsk + CipherSuiteKeyExchangeAlgorithmEcdhe CipherSuiteKeyExchangeAlgorithm = ciphersuite.KeyExchangeAlgorithmEcdhe +) + +var _ = allCipherSuites() // Necessary until this function isn't only used by Go 1.14 + +// CipherSuite is an interface that all DTLS CipherSuites must satisfy +type CipherSuite interface { + // String of CipherSuite, only used for logging + String() string + + // ID of CipherSuite. + ID() CipherSuiteID + + // What type of Certificate does this CipherSuite use + CertificateType() clientcertificate.Type + + // What Hash function is used during verification + HashFunc() func() hash.Hash + + // AuthenticationType controls what authentication method is using during the handshake + AuthenticationType() CipherSuiteAuthenticationType + + // KeyExchangeAlgorithm controls what exchange algorithm is using during the handshake + KeyExchangeAlgorithm() CipherSuiteKeyExchangeAlgorithm + + // ECC (Elliptic Curve Cryptography) determines whether ECC extesions will be send during handshake. + // https://datatracker.ietf.org/doc/html/rfc4492#page-10 + ECC() bool + + // Called when keying material has been generated, should initialize the internal cipher + Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error + IsInitialized() bool + Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) + Decrypt(in []byte) ([]byte, error) +} + +// CipherSuiteName provides the same functionality as tls.CipherSuiteName +// that appeared first in Go 1.14. +// +// Our implementation differs slightly in that it takes in a CiperSuiteID, +// like the rest of our library, instead of a uint16 like crypto/tls. +func CipherSuiteName(id CipherSuiteID) string { + suite := cipherSuiteForID(id, nil) + if suite != nil { + return suite.String() + } + return fmt.Sprintf("0x%04X", uint16(id)) +} + +// Taken from https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +// A cipherSuite is a specific combination of key agreement, cipher and MAC +// function. +func cipherSuiteForID(id CipherSuiteID, customCiphers func() []CipherSuite) CipherSuite { + switch id { //nolint:exhaustive + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM: + return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm() + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8: + return ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8() + case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{} + case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{} + case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: + return &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{} + case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + return &ciphersuite.TLSEcdheRsaWithAes256CbcSha{} + case TLS_PSK_WITH_AES_128_CCM: + return ciphersuite.NewTLSPskWithAes128Ccm() + case TLS_PSK_WITH_AES_128_CCM_8: + return ciphersuite.NewTLSPskWithAes128Ccm8() + case TLS_PSK_WITH_AES_256_CCM_8: + return ciphersuite.NewTLSPskWithAes256Ccm8() + case TLS_PSK_WITH_AES_128_GCM_SHA256: + return &ciphersuite.TLSPskWithAes128GcmSha256{} + case TLS_PSK_WITH_AES_128_CBC_SHA256: + return &ciphersuite.TLSPskWithAes128CbcSha256{} + case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{} + case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{} + case TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256: + return ciphersuite.NewTLSEcdhePskWithAes128CbcSha256() + } + + if customCiphers != nil { + for _, c := range customCiphers() { + if c.ID() == id { + return c + } + } + } + + return nil +} + +// CipherSuites we support in order of preference +func defaultCipherSuites() []CipherSuite { + return []CipherSuite{ + &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheRsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{}, + &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{}, + } +} + +func allCipherSuites() []CipherSuite { + return []CipherSuite{ + ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm(), + ciphersuite.NewTLSEcdheEcdsaWithAes128Ccm8(), + &ciphersuite.TLSEcdheEcdsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheRsaWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256CbcSha{}, + &ciphersuite.TLSEcdheRsaWithAes256CbcSha{}, + ciphersuite.NewTLSPskWithAes128Ccm(), + ciphersuite.NewTLSPskWithAes128Ccm8(), + ciphersuite.NewTLSPskWithAes256Ccm8(), + &ciphersuite.TLSPskWithAes128GcmSha256{}, + &ciphersuite.TLSEcdheEcdsaWithAes256GcmSha384{}, + &ciphersuite.TLSEcdheRsaWithAes256GcmSha384{}, + } +} + +func cipherSuiteIDs(cipherSuites []CipherSuite) []uint16 { + rtrn := []uint16{} + for _, c := range cipherSuites { + rtrn = append(rtrn, uint16(c.ID())) + } + return rtrn +} + +func parseCipherSuites(userSelectedSuites []CipherSuiteID, customCipherSuites func() []CipherSuite, includeCertificateSuites, includePSKSuites bool) ([]CipherSuite, error) { + cipherSuitesForIDs := func(ids []CipherSuiteID) ([]CipherSuite, error) { + cipherSuites := []CipherSuite{} + for _, id := range ids { + c := cipherSuiteForID(id, nil) + if c == nil { + return nil, &invalidCipherSuiteError{id} + } + cipherSuites = append(cipherSuites, c) + } + return cipherSuites, nil + } + + var ( + cipherSuites []CipherSuite + err error + i int + ) + if userSelectedSuites != nil { + cipherSuites, err = cipherSuitesForIDs(userSelectedSuites) + if err != nil { + return nil, err + } + } else { + cipherSuites = defaultCipherSuites() + } + + // Put CustomCipherSuites before ID selected suites + if customCipherSuites != nil { + cipherSuites = append(customCipherSuites(), cipherSuites...) + } + + var foundCertificateSuite, foundPSKSuite, foundAnonymousSuite bool + for _, c := range cipherSuites { + switch { + case includeCertificateSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate: + foundCertificateSuite = true + case includePSKSuites && c.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey: + foundPSKSuite = true + case c.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous: + foundAnonymousSuite = true + default: + continue + } + cipherSuites[i] = c + i++ + } + + switch { + case includeCertificateSuites && !foundCertificateSuite && !foundAnonymousSuite: + return nil, errNoAvailableCertificateCipherSuite + case includePSKSuites && !foundPSKSuite: + return nil, errNoAvailablePSKCipherSuite + case i == 0: + return nil, errNoAvailableCipherSuites + } + + return cipherSuites[:i], nil +} + +func filterCipherSuitesForCertificate(cert *tls.Certificate, cipherSuites []CipherSuite) []CipherSuite { + if cert == nil || cert.PrivateKey == nil { + return cipherSuites + } + var certType clientcertificate.Type + switch cert.PrivateKey.(type) { + case ed25519.PrivateKey, *ecdsa.PrivateKey: + certType = clientcertificate.ECDSASign + case *rsa.PrivateKey: + certType = clientcertificate.RSASign + } + + filtered := []CipherSuite{} + for _, c := range cipherSuites { + if c.AuthenticationType() != CipherSuiteAuthenticationTypeCertificate || certType == c.CertificateType() { + filtered = append(filtered, c) + } + } + return filtered +} diff --git a/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go b/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go new file mode 100644 index 000000000..5c63c0913 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/cipher_suite_go114.go @@ -0,0 +1,41 @@ +//go:build go1.14 +// +build go1.14 + +package dtls + +import ( + "crypto/tls" +) + +// VersionDTLS12 is the DTLS version in the same style as +// VersionTLSXX from crypto/tls +const VersionDTLS12 = 0xfefd + +// Convert from our cipherSuite interface to a tls.CipherSuite struct +func toTLSCipherSuite(c CipherSuite) *tls.CipherSuite { + return &tls.CipherSuite{ + ID: uint16(c.ID()), + Name: c.String(), + SupportedVersions: []uint16{VersionDTLS12}, + Insecure: false, + } +} + +// CipherSuites returns a list of cipher suites currently implemented by this +// package, excluding those with security issues, which are returned by +// InsecureCipherSuites. +func CipherSuites() []*tls.CipherSuite { + suites := allCipherSuites() + res := make([]*tls.CipherSuite, len(suites)) + for i, c := range suites { + res[i] = toTLSCipherSuite(c) + } + return res +} + +// InsecureCipherSuites returns a list of cipher suites currently implemented by +// this package and which have security issues. +func InsecureCipherSuites() []*tls.CipherSuite { + var res []*tls.CipherSuite + return res +} diff --git a/vendor/github.com/pion/dtls/v2/codecov.yml b/vendor/github.com/pion/dtls/v2/codecov.yml new file mode 100644 index 000000000..085200a48 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/codecov.yml @@ -0,0 +1,20 @@ +# +# DO NOT EDIT THIS FILE +# +# It is automatically copied from https://github.com/pion/.goassets repository. +# + +coverage: + status: + project: + default: + # Allow decreasing 2% of total coverage to avoid noise. + threshold: 2% + patch: + default: + target: 70% + only_pulls: true + +ignore: + - "examples/*" + - "examples/**/*" diff --git a/vendor/github.com/pion/dtls/v2/compression_method.go b/vendor/github.com/pion/dtls/v2/compression_method.go new file mode 100644 index 000000000..693eb7a52 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/compression_method.go @@ -0,0 +1,9 @@ +package dtls + +import "github.com/pion/dtls/v2/pkg/protocol" + +func defaultCompressionMethods() []*protocol.CompressionMethod { + return []*protocol.CompressionMethod{ + {}, + } +} diff --git a/vendor/github.com/pion/dtls/v2/config.go b/vendor/github.com/pion/dtls/v2/config.go new file mode 100644 index 000000000..13a172251 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/config.go @@ -0,0 +1,250 @@ +package dtls + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "io" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/logging" +) + +const keyLogLabelTLS12 = "CLIENT_RANDOM" + +// Config is used to configure a DTLS client or server. +// After a Config is passed to a DTLS function it must not be modified. +type Config struct { + // Certificates contains certificate chain to present to the other side of the connection. + // Server MUST set this if PSK is non-nil + // client SHOULD sets this so CertificateRequests can be handled if PSK is non-nil + Certificates []tls.Certificate + + // CipherSuites is a list of supported cipher suites. + // If CipherSuites is nil, a default list is used + CipherSuites []CipherSuiteID + + // CustomCipherSuites is a list of CipherSuites that can be + // provided by the user. This allow users to user Ciphers that are reserved + // for private usage. + CustomCipherSuites func() []CipherSuite + + // SignatureSchemes contains the signature and hash schemes that the peer requests to verify. + SignatureSchemes []tls.SignatureScheme + + // SRTPProtectionProfiles are the supported protection profiles + // Clients will send this via use_srtp and assert that the server properly responds + // Servers will assert that clients send one of these profiles and will respond as needed + SRTPProtectionProfiles []SRTPProtectionProfile + + // ClientAuth determines the server's policy for + // TLS Client Authentication. The default is NoClientCert. + ClientAuth ClientAuthType + + // RequireExtendedMasterSecret determines if the "Extended Master Secret" extension + // should be disabled, requested, or required (default requested). + ExtendedMasterSecret ExtendedMasterSecretType + + // FlightInterval controls how often we send outbound handshake messages + // defaults to time.Second + FlightInterval time.Duration + + // PSK sets the pre-shared key used by this DTLS connection + // If PSK is non-nil only PSK CipherSuites will be used + PSK PSKCallback + PSKIdentityHint []byte + + // InsecureSkipVerify controls whether a client verifies the + // server's certificate chain and host name. + // If InsecureSkipVerify is true, TLS accepts any certificate + // presented by the server and any host name in that certificate. + // In this mode, TLS is susceptible to man-in-the-middle attacks. + // This should be used only for testing. + InsecureSkipVerify bool + + // InsecureHashes allows the use of hashing algorithms that are known + // to be vulnerable. + InsecureHashes bool + + // VerifyPeerCertificate, if not nil, is called after normal + // certificate verification by either a client or server. It + // receives the certificate provided by the peer and also a flag + // that tells if normal verification has succeedded. If it returns a + // non-nil error, the handshake is aborted and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. If normal verification is disabled by + // setting InsecureSkipVerify, or (for a server) when ClientAuth is + // RequestClientCert or RequireAnyClientCert, then this callback will + // be considered but the verifiedChains will always be nil. + VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + + // VerifyConnection, if not nil, is called after normal certificate + // verification/PSK and after VerifyPeerCertificate by either a TLS client + // or server. If it returns a non-nil error, the handshake is aborted + // and that error results. + // + // If normal verification fails then the handshake will abort before + // considering this callback. This callback will run for all connections + // regardless of InsecureSkipVerify or ClientAuth settings. + VerifyConnection func(*State) error + + // RootCAs defines the set of root certificate authorities + // that one peer uses when verifying the other peer's certificates. + // If RootCAs is nil, TLS uses the host's root CA set. + RootCAs *x509.CertPool + + // ClientCAs defines the set of root certificate authorities + // that servers use if required to verify a client certificate + // by the policy in ClientAuth. + ClientCAs *x509.CertPool + + // ServerName is used to verify the hostname on the returned + // certificates unless InsecureSkipVerify is given. + ServerName string + + LoggerFactory logging.LoggerFactory + + // ConnectContextMaker is a function to make a context used in Dial(), + // Client(), Server(), and Accept(). If nil, the default ConnectContextMaker + // is used. It can be implemented as following. + // + // func ConnectContextMaker() (context.Context, func()) { + // return context.WithTimeout(context.Background(), 30*time.Second) + // } + ConnectContextMaker func() (context.Context, func()) + + // MTU is the length at which handshake messages will be fragmented to + // fit within the maximum transmission unit (default is 1200 bytes) + MTU int + + // ReplayProtectionWindow is the size of the replay attack protection window. + // Duplication of the sequence number is checked in this window size. + // Packet with sequence number older than this value compared to the latest + // accepted packet will be discarded. (default is 64) + ReplayProtectionWindow int + + // KeyLogWriter optionally specifies a destination for TLS master secrets + // in NSS key log format that can be used to allow external programs + // such as Wireshark to decrypt TLS connections. + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. + // Use of KeyLogWriter compromises security and should only be + // used for debugging. + KeyLogWriter io.Writer + + // SessionStore is the container to store session for resumption. + SessionStore SessionStore + + // List of application protocols the peer supports, for ALPN + SupportedProtocols []string + + // List of Elliptic Curves to use + // + // If an ECC ciphersuite is configured and EllipticCurves is empty + // it will default to X25519, P-256, P-384 in this specific order. + EllipticCurves []elliptic.Curve + + // GetCertificate returns a Certificate based on the given + // ClientHelloInfo. It will only be called if the client supplies SNI + // information or if Certificates is empty. + // + // If GetCertificate is nil or returns nil, then the certificate is + // retrieved from NameToCertificate. If NameToCertificate is nil, the + // best element of Certificates will be used. + GetCertificate func(*ClientHelloInfo) (*tls.Certificate, error) + + // GetClientCertificate, if not nil, is called when a server requests a + // certificate from a client. If set, the contents of Certificates will + // be ignored. + // + // If GetClientCertificate returns an error, the handshake will be + // aborted and that error will be returned. Otherwise + // GetClientCertificate must return a non-nil Certificate. If + // Certificate.Certificate is empty then no certificate will be sent to + // the server. If this is unacceptable to the server then it may abort + // the handshake. + GetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error) + + // InsecureSkipVerifyHello, if true and when acting as server, allow client to + // skip hello verify phase and receive ServerHello after initial ClientHello. + // This have implication on DoS attack resistance. + InsecureSkipVerifyHello bool +} + +func defaultConnectContextMaker() (context.Context, func()) { + return context.WithTimeout(context.Background(), 30*time.Second) +} + +func (c *Config) connectContextMaker() (context.Context, func()) { + if c.ConnectContextMaker == nil { + return defaultConnectContextMaker() + } + return c.ConnectContextMaker() +} + +func (c *Config) includeCertificateSuites() bool { + return c.PSK == nil || len(c.Certificates) > 0 || c.GetCertificate != nil || c.GetClientCertificate != nil +} + +const defaultMTU = 1200 // bytes + +var defaultCurves = []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384} //nolint:gochecknoglobals + +// PSKCallback is called once we have the remote's PSKIdentityHint. +// If the remote provided none it will be nil +type PSKCallback func([]byte) ([]byte, error) + +// ClientAuthType declares the policy the server will follow for +// TLS Client Authentication. +type ClientAuthType int + +// ClientAuthType enums +const ( + NoClientCert ClientAuthType = iota + RequestClientCert + RequireAnyClientCert + VerifyClientCertIfGiven + RequireAndVerifyClientCert +) + +// ExtendedMasterSecretType declares the policy the client and server +// will follow for the Extended Master Secret extension +type ExtendedMasterSecretType int + +// ExtendedMasterSecretType enums +const ( + RequestExtendedMasterSecret ExtendedMasterSecretType = iota + RequireExtendedMasterSecret + DisableExtendedMasterSecret +) + +func validateConfig(config *Config) error { + switch { + case config == nil: + return errNoConfigProvided + case config.PSKIdentityHint != nil && config.PSK == nil: + return errIdentityNoPSK + } + + for _, cert := range config.Certificates { + if cert.Certificate == nil { + return errInvalidCertificate + } + if cert.PrivateKey != nil { + switch cert.PrivateKey.(type) { + case ed25519.PrivateKey: + case *ecdsa.PrivateKey: + case *rsa.PrivateKey: + default: + return errInvalidPrivateKey + } + } + } + + _, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil) + return err +} diff --git a/vendor/github.com/pion/dtls/v2/conn.go b/vendor/github.com/pion/dtls/v2/conn.go new file mode 100644 index 000000000..bea31c2b6 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/conn.go @@ -0,0 +1,1024 @@ +package dtls + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/pion/dtls/v2/internal/closer" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + "github.com/pion/logging" + "github.com/pion/transport/v2/connctx" + "github.com/pion/transport/v2/deadline" + "github.com/pion/transport/v2/replaydetector" +) + +const ( + initialTickerInterval = time.Second + cookieLength = 20 + sessionLength = 32 + defaultNamedCurve = elliptic.X25519 + inboundBufferSize = 8192 + // Default replay protection window is specified by RFC 6347 Section 4.1.2.6 + defaultReplayProtectionWindow = 64 +) + +func invalidKeyingLabels() map[string]bool { + return map[string]bool{ + "client finished": true, + "server finished": true, + "master secret": true, + "key expansion": true, + } +} + +// Conn represents a DTLS connection +type Conn struct { + lock sync.RWMutex // Internal lock (must not be public) + nextConn connctx.ConnCtx // Embedded Conn, typically a udpconn we read/write from + fragmentBuffer *fragmentBuffer // out-of-order and missing fragment handling + handshakeCache *handshakeCache // caching of handshake messages for verifyData generation + decrypted chan interface{} // Decrypted Application Data or error, pull by calling `Read` + + state State // Internal state + + maximumTransmissionUnit int + + handshakeCompletedSuccessfully atomic.Value + + encryptedPackets [][]byte + + connectionClosedByUser bool + closeLock sync.Mutex + closed *closer.Closer + handshakeLoopsFinished sync.WaitGroup + + readDeadline *deadline.Deadline + writeDeadline *deadline.Deadline + + log logging.LeveledLogger + + reading chan struct{} + handshakeRecv chan chan struct{} + cancelHandshaker func() + cancelHandshakeReader func() + + fsm *handshakeFSM + + replayProtectionWindow uint +} + +func createConn(ctx context.Context, nextConn net.Conn, config *Config, isClient bool, initialState *State) (*Conn, error) { + err := validateConfig(config) + if err != nil { + return nil, err + } + + if nextConn == nil { + return nil, errNilNextConn + } + + cipherSuites, err := parseCipherSuites(config.CipherSuites, config.CustomCipherSuites, config.includeCertificateSuites(), config.PSK != nil) + if err != nil { + return nil, err + } + + signatureSchemes, err := signaturehash.ParseSignatureSchemes(config.SignatureSchemes, config.InsecureHashes) + if err != nil { + return nil, err + } + + workerInterval := initialTickerInterval + if config.FlightInterval != 0 { + workerInterval = config.FlightInterval + } + + loggerFactory := config.LoggerFactory + if loggerFactory == nil { + loggerFactory = logging.NewDefaultLoggerFactory() + } + + logger := loggerFactory.NewLogger("dtls") + + mtu := config.MTU + if mtu <= 0 { + mtu = defaultMTU + } + + replayProtectionWindow := config.ReplayProtectionWindow + if replayProtectionWindow <= 0 { + replayProtectionWindow = defaultReplayProtectionWindow + } + + c := &Conn{ + nextConn: connctx.New(nextConn), + fragmentBuffer: newFragmentBuffer(), + handshakeCache: newHandshakeCache(), + maximumTransmissionUnit: mtu, + + decrypted: make(chan interface{}, 1), + log: logger, + + readDeadline: deadline.New(), + writeDeadline: deadline.New(), + + reading: make(chan struct{}, 1), + handshakeRecv: make(chan chan struct{}), + closed: closer.NewCloser(), + cancelHandshaker: func() {}, + + replayProtectionWindow: uint(replayProtectionWindow), + + state: State{ + isClient: isClient, + }, + } + + c.setRemoteEpoch(0) + c.setLocalEpoch(0) + + serverName := config.ServerName + // Do not allow the use of an IP address literal as an SNI value. + // See RFC 6066, Section 3. + if net.ParseIP(serverName) != nil { + serverName = "" + } + + curves := config.EllipticCurves + if len(curves) == 0 { + curves = defaultCurves + } + + hsCfg := &handshakeConfig{ + localPSKCallback: config.PSK, + localPSKIdentityHint: config.PSKIdentityHint, + localCipherSuites: cipherSuites, + localSignatureSchemes: signatureSchemes, + extendedMasterSecret: config.ExtendedMasterSecret, + localSRTPProtectionProfiles: config.SRTPProtectionProfiles, + serverName: serverName, + supportedProtocols: config.SupportedProtocols, + clientAuth: config.ClientAuth, + localCertificates: config.Certificates, + insecureSkipVerify: config.InsecureSkipVerify, + verifyPeerCertificate: config.VerifyPeerCertificate, + verifyConnection: config.VerifyConnection, + rootCAs: config.RootCAs, + clientCAs: config.ClientCAs, + customCipherSuites: config.CustomCipherSuites, + retransmitInterval: workerInterval, + log: logger, + initialEpoch: 0, + keyLogWriter: config.KeyLogWriter, + sessionStore: config.SessionStore, + ellipticCurves: curves, + localGetCertificate: config.GetCertificate, + localGetClientCertificate: config.GetClientCertificate, + insecureSkipHelloVerify: config.InsecureSkipVerifyHello, + } + + // rfc5246#section-7.4.3 + // In addition, the hash and signature algorithms MUST be compatible + // with the key in the server's end-entity certificate. + if !isClient { + cert, err := hsCfg.getCertificate(&ClientHelloInfo{}) + if err != nil && !errors.Is(err, errNoCertificates) { + return nil, err + } + hsCfg.localCipherSuites = filterCipherSuitesForCertificate(cert, cipherSuites) + } + + var initialFlight flightVal + var initialFSMState handshakeState + + if initialState != nil { + if c.state.isClient { + initialFlight = flight5 + } else { + initialFlight = flight6 + } + initialFSMState = handshakeFinished + + c.state = *initialState + } else { + if c.state.isClient { + initialFlight = flight1 + } else { + initialFlight = flight0 + } + initialFSMState = handshakePreparing + } + // Do handshake + if err := c.handshake(ctx, hsCfg, initialFlight, initialFSMState); err != nil { + return nil, err + } + + c.log.Trace("Handshake Completed") + + return c, nil +} + +// Dial connects to the given network address and establishes a DTLS connection on top. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use DialWithContext() instead. +func Dial(network string, raddr *net.UDPAddr, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return DialWithContext(ctx, network, raddr, config) +} + +// Client establishes a DTLS connection over an existing connection. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use ClientWithContext() instead. +func Client(conn net.Conn, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return ClientWithContext(ctx, conn, config) +} + +// Server listens for incoming DTLS connections. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, use ServerWithContext() instead. +func Server(conn net.Conn, config *Config) (*Conn, error) { + ctx, cancel := config.connectContextMaker() + defer cancel() + + return ServerWithContext(ctx, conn, config) +} + +// DialWithContext connects to the given network address and establishes a DTLS connection on top. +func DialWithContext(ctx context.Context, network string, raddr *net.UDPAddr, config *Config) (*Conn, error) { + pConn, err := net.DialUDP(network, nil, raddr) + if err != nil { + return nil, err + } + return ClientWithContext(ctx, pConn, config) +} + +// ClientWithContext establishes a DTLS connection over an existing connection. +func ClientWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) { + switch { + case config == nil: + return nil, errNoConfigProvided + case config.PSK != nil && config.PSKIdentityHint == nil: + return nil, errPSKAndIdentityMustBeSetForClient + } + + return createConn(ctx, conn, config, true, nil) +} + +// ServerWithContext listens for incoming DTLS connections. +func ServerWithContext(ctx context.Context, conn net.Conn, config *Config) (*Conn, error) { + if config == nil { + return nil, errNoConfigProvided + } + + return createConn(ctx, conn, config, false, nil) +} + +// Read reads data from the connection. +func (c *Conn) Read(p []byte) (n int, err error) { + if !c.isHandshakeCompletedSuccessfully() { + return 0, errHandshakeInProgress + } + + select { + case <-c.readDeadline.Done(): + return 0, errDeadlineExceeded + default: + } + + for { + select { + case <-c.readDeadline.Done(): + return 0, errDeadlineExceeded + case out, ok := <-c.decrypted: + if !ok { + return 0, io.EOF + } + switch val := out.(type) { + case ([]byte): + if len(p) < len(val) { + return 0, errBufferTooSmall + } + copy(p, val) + return len(val), nil + case (error): + return 0, val + } + } + } +} + +// Write writes len(p) bytes from p to the DTLS connection +func (c *Conn) Write(p []byte) (int, error) { + if c.isConnectionClosed() { + return 0, ErrConnClosed + } + + select { + case <-c.writeDeadline.Done(): + return 0, errDeadlineExceeded + default: + } + + if !c.isHandshakeCompletedSuccessfully() { + return 0, errHandshakeInProgress + } + + return len(p), c.writePackets(c.writeDeadline, []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Epoch: c.state.getLocalEpoch(), + Version: protocol.Version1_2, + }, + Content: &protocol.ApplicationData{ + Data: p, + }, + }, + shouldEncrypt: true, + }, + }) +} + +// Close closes the connection. +func (c *Conn) Close() error { + err := c.close(true) //nolint:contextcheck + c.handshakeLoopsFinished.Wait() + return err +} + +// ConnectionState returns basic DTLS details about the connection. +// Note that this replaced the `Export` function of v1. +func (c *Conn) ConnectionState() State { + c.lock.RLock() + defer c.lock.RUnlock() + return *c.state.clone() +} + +// SelectedSRTPProtectionProfile returns the selected SRTPProtectionProfile +func (c *Conn) SelectedSRTPProtectionProfile() (SRTPProtectionProfile, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + + if c.state.srtpProtectionProfile == 0 { + return 0, false + } + + return c.state.srtpProtectionProfile, true +} + +func (c *Conn) writePackets(ctx context.Context, pkts []*packet) error { + c.lock.Lock() + defer c.lock.Unlock() + + var rawPackets [][]byte + + for _, p := range pkts { + if h, ok := p.record.Content.(*handshake.Handshake); ok { + handshakeRaw, err := p.record.Marshal() + if err != nil { + return err + } + + c.log.Tracef("[handshake:%v] -> %s (epoch: %d, seq: %d)", + srvCliStr(c.state.isClient), h.Header.Type.String(), + p.record.Header.Epoch, h.Header.MessageSequence) + c.handshakeCache.push(handshakeRaw[recordlayer.HeaderSize:], p.record.Header.Epoch, h.Header.MessageSequence, h.Header.Type, c.state.isClient) + + rawHandshakePackets, err := c.processHandshakePacket(p, h) + if err != nil { + return err + } + rawPackets = append(rawPackets, rawHandshakePackets...) + } else { + rawPacket, err := c.processPacket(p) + if err != nil { + return err + } + rawPackets = append(rawPackets, rawPacket) + } + } + if len(rawPackets) == 0 { + return nil + } + compactedRawPackets := c.compactRawPackets(rawPackets) + + for _, compactedRawPackets := range compactedRawPackets { + if _, err := c.nextConn.WriteContext(ctx, compactedRawPackets); err != nil { + return netError(err) + } + } + + return nil +} + +func (c *Conn) compactRawPackets(rawPackets [][]byte) [][]byte { + combinedRawPackets := make([][]byte, 0) + currentCombinedRawPacket := make([]byte, 0) + + for _, rawPacket := range rawPackets { + if len(currentCombinedRawPacket) > 0 && len(currentCombinedRawPacket)+len(rawPacket) >= c.maximumTransmissionUnit { + combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket) + currentCombinedRawPacket = []byte{} + } + currentCombinedRawPacket = append(currentCombinedRawPacket, rawPacket...) + } + + combinedRawPackets = append(combinedRawPackets, currentCombinedRawPacket) + + return combinedRawPackets +} + +func (c *Conn) processPacket(p *packet) ([]byte, error) { + epoch := p.record.Header.Epoch + for len(c.state.localSequenceNumber) <= int(epoch) { + c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0)) + } + seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1 + if seq > recordlayer.MaxSequenceNumber { + // RFC 6347 Section 4.1.0 + // The implementation must either abandon an association or rehandshake + // prior to allowing the sequence number to wrap. + return nil, errSequenceNumberOverflow + } + p.record.Header.SequenceNumber = seq + + rawPacket, err := p.record.Marshal() + if err != nil { + return nil, err + } + + if p.shouldEncrypt { + var err error + rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket) + if err != nil { + return nil, err + } + } + + return rawPacket, nil +} + +func (c *Conn) processHandshakePacket(p *packet, h *handshake.Handshake) ([][]byte, error) { + rawPackets := make([][]byte, 0) + + handshakeFragments, err := c.fragmentHandshake(h) + if err != nil { + return nil, err + } + epoch := p.record.Header.Epoch + for len(c.state.localSequenceNumber) <= int(epoch) { + c.state.localSequenceNumber = append(c.state.localSequenceNumber, uint64(0)) + } + + for _, handshakeFragment := range handshakeFragments { + seq := atomic.AddUint64(&c.state.localSequenceNumber[epoch], 1) - 1 + if seq > recordlayer.MaxSequenceNumber { + return nil, errSequenceNumberOverflow + } + + recordlayerHeader := &recordlayer.Header{ + Version: p.record.Header.Version, + ContentType: p.record.Header.ContentType, + ContentLen: uint16(len(handshakeFragment)), + Epoch: p.record.Header.Epoch, + SequenceNumber: seq, + } + + rawPacket, err := recordlayerHeader.Marshal() + if err != nil { + return nil, err + } + + p.record.Header = *recordlayerHeader + + rawPacket = append(rawPacket, handshakeFragment...) + if p.shouldEncrypt { + var err error + rawPacket, err = c.state.cipherSuite.Encrypt(p.record, rawPacket) + if err != nil { + return nil, err + } + } + + rawPackets = append(rawPackets, rawPacket) + } + + return rawPackets, nil +} + +func (c *Conn) fragmentHandshake(h *handshake.Handshake) ([][]byte, error) { + content, err := h.Message.Marshal() + if err != nil { + return nil, err + } + + fragmentedHandshakes := make([][]byte, 0) + + contentFragments := splitBytes(content, c.maximumTransmissionUnit) + if len(contentFragments) == 0 { + contentFragments = [][]byte{ + {}, + } + } + + offset := 0 + for _, contentFragment := range contentFragments { + contentFragmentLen := len(contentFragment) + + headerFragment := &handshake.Header{ + Type: h.Header.Type, + Length: h.Header.Length, + MessageSequence: h.Header.MessageSequence, + FragmentOffset: uint32(offset), + FragmentLength: uint32(contentFragmentLen), + } + + offset += contentFragmentLen + + fragmentedHandshake, err := headerFragment.Marshal() + if err != nil { + return nil, err + } + + fragmentedHandshake = append(fragmentedHandshake, contentFragment...) + fragmentedHandshakes = append(fragmentedHandshakes, fragmentedHandshake) + } + + return fragmentedHandshakes, nil +} + +var poolReadBuffer = sync.Pool{ //nolint:gochecknoglobals + New: func() interface{} { + b := make([]byte, inboundBufferSize) + return &b + }, +} + +func (c *Conn) readAndBuffer(ctx context.Context) error { + bufptr, ok := poolReadBuffer.Get().(*[]byte) + if !ok { + return errFailedToAccessPoolReadBuffer + } + defer poolReadBuffer.Put(bufptr) + + b := *bufptr + i, err := c.nextConn.ReadContext(ctx, b) + if err != nil { + return netError(err) + } + + pkts, err := recordlayer.UnpackDatagram(b[:i]) + if err != nil { + return err + } + + var hasHandshake bool + for _, p := range pkts { + hs, alert, err := c.handleIncomingPacket(ctx, p, true) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err == nil { + err = alertErr + } + } + } + if hs { + hasHandshake = true + } + + var e *alertError + if errors.As(err, &e) { + if e.IsFatalOrCloseNotify() { + return e + } + } else if err != nil { + return e + } + } + if hasHandshake { + done := make(chan struct{}) + select { + case c.handshakeRecv <- done: + // If the other party may retransmit the flight, + // we should respond even if it not a new message. + <-done + case <-c.fsm.Done(): + } + } + return nil +} + +func (c *Conn) handleQueuedPackets(ctx context.Context) error { + pkts := c.encryptedPackets + c.encryptedPackets = nil + + for _, p := range pkts { + _, alert, err := c.handleIncomingPacket(ctx, p, false) // don't re-enqueue + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err == nil { + err = alertErr + } + } + } + var e *alertError + if errors.As(err, &e) { + if e.IsFatalOrCloseNotify() { + return e + } + } else if err != nil { + return e + } + } + return nil +} + +func (c *Conn) handleIncomingPacket(ctx context.Context, buf []byte, enqueue bool) (bool, *alert.Alert, error) { //nolint:gocognit + h := &recordlayer.Header{} + if err := h.Unmarshal(buf); err != nil { + // Decode error must be silently discarded + // [RFC6347 Section-4.1.2.7] + c.log.Debugf("discarded broken packet: %v", err) + return false, nil, nil + } + + // Validate epoch + remoteEpoch := c.state.getRemoteEpoch() + if h.Epoch > remoteEpoch { + if h.Epoch > remoteEpoch+1 { + c.log.Debugf("discarded future packet (epoch: %d, seq: %d)", + h.Epoch, h.SequenceNumber, + ) + return false, nil, nil + } + if enqueue { + c.log.Debug("received packet of next epoch, queuing packet") + c.encryptedPackets = append(c.encryptedPackets, buf) + } + return false, nil, nil + } + + // Anti-replay protection + for len(c.state.replayDetector) <= int(h.Epoch) { + c.state.replayDetector = append(c.state.replayDetector, + replaydetector.New(c.replayProtectionWindow, recordlayer.MaxSequenceNumber), + ) + } + markPacketAsValid, ok := c.state.replayDetector[int(h.Epoch)].Check(h.SequenceNumber) + if !ok { + c.log.Debugf("discarded duplicated packet (epoch: %d, seq: %d)", + h.Epoch, h.SequenceNumber, + ) + return false, nil, nil + } + + // Decrypt + if h.Epoch != 0 { + if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() { + if enqueue { + c.encryptedPackets = append(c.encryptedPackets, buf) + c.log.Debug("handshake not finished, queuing packet") + } + return false, nil, nil + } + + var err error + buf, err = c.state.cipherSuite.Decrypt(buf) + if err != nil { + c.log.Debugf("%s: decrypt failed: %s", srvCliStr(c.state.isClient), err) + return false, nil, nil + } + } + + isHandshake, err := c.fragmentBuffer.push(append([]byte{}, buf...)) + if err != nil { + // Decode error must be silently discarded + // [RFC6347 Section-4.1.2.7] + c.log.Debugf("defragment failed: %s", err) + return false, nil, nil + } else if isHandshake { + markPacketAsValid() + for out, epoch := c.fragmentBuffer.pop(); out != nil; out, epoch = c.fragmentBuffer.pop() { + header := &handshake.Header{} + if err := header.Unmarshal(out); err != nil { + c.log.Debugf("%s: handshake parse failed: %s", srvCliStr(c.state.isClient), err) + continue + } + c.handshakeCache.push(out, epoch, header.MessageSequence, header.Type, !c.state.isClient) + } + + return true, nil, nil + } + + r := &recordlayer.RecordLayer{} + if err := r.Unmarshal(buf); err != nil { + return false, &alert.Alert{Level: alert.Fatal, Description: alert.DecodeError}, err + } + + switch content := r.Content.(type) { + case *alert.Alert: + c.log.Tracef("%s: <- %s", srvCliStr(c.state.isClient), content.String()) + var a *alert.Alert + if content.Description == alert.CloseNotify { + // Respond with a close_notify [RFC5246 Section 7.2.1] + a = &alert.Alert{Level: alert.Warning, Description: alert.CloseNotify} + } + markPacketAsValid() + return false, a, &alertError{content} + case *protocol.ChangeCipherSpec: + if c.state.cipherSuite == nil || !c.state.cipherSuite.IsInitialized() { + if enqueue { + c.encryptedPackets = append(c.encryptedPackets, buf) + c.log.Debugf("CipherSuite not initialized, queuing packet") + } + return false, nil, nil + } + + newRemoteEpoch := h.Epoch + 1 + c.log.Tracef("%s: <- ChangeCipherSpec (epoch: %d)", srvCliStr(c.state.isClient), newRemoteEpoch) + + if c.state.getRemoteEpoch()+1 == newRemoteEpoch { + c.setRemoteEpoch(newRemoteEpoch) + markPacketAsValid() + } + case *protocol.ApplicationData: + if h.Epoch == 0 { + return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errApplicationDataEpochZero + } + + markPacketAsValid() + + select { + case c.decrypted <- content.Data: + case <-c.closed.Done(): + case <-ctx.Done(): + } + + default: + return false, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, fmt.Errorf("%w: %d", errUnhandledContextType, content.ContentType()) + } + return false, nil, nil +} + +func (c *Conn) recvHandshake() <-chan chan struct{} { + return c.handshakeRecv +} + +func (c *Conn) notify(ctx context.Context, level alert.Level, desc alert.Description) error { + if level == alert.Fatal && len(c.state.SessionID) > 0 { + // According to the RFC, we need to delete the stored session. + // https://datatracker.ietf.org/doc/html/rfc5246#section-7.2 + if ss := c.fsm.cfg.sessionStore; ss != nil { + c.log.Tracef("clean invalid session: %s", c.state.SessionID) + if err := ss.Del(c.sessionKey()); err != nil { + return err + } + } + } + return c.writePackets(ctx, []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Epoch: c.state.getLocalEpoch(), + Version: protocol.Version1_2, + }, + Content: &alert.Alert{ + Level: level, + Description: desc, + }, + }, + shouldEncrypt: c.isHandshakeCompletedSuccessfully(), + }, + }) +} + +func (c *Conn) setHandshakeCompletedSuccessfully() { + c.handshakeCompletedSuccessfully.Store(struct{ bool }{true}) +} + +func (c *Conn) isHandshakeCompletedSuccessfully() bool { + boolean, _ := c.handshakeCompletedSuccessfully.Load().(struct{ bool }) + return boolean.bool +} + +func (c *Conn) handshake(ctx context.Context, cfg *handshakeConfig, initialFlight flightVal, initialState handshakeState) error { //nolint:gocognit + c.fsm = newHandshakeFSM(&c.state, c.handshakeCache, cfg, initialFlight) + + done := make(chan struct{}) + ctxRead, cancelRead := context.WithCancel(context.Background()) + c.cancelHandshakeReader = cancelRead + cfg.onFlightState = func(f flightVal, s handshakeState) { + if s == handshakeFinished && !c.isHandshakeCompletedSuccessfully() { + c.setHandshakeCompletedSuccessfully() + close(done) + } + } + + ctxHs, cancel := context.WithCancel(context.Background()) + c.cancelHandshaker = cancel + + firstErr := make(chan error, 1) + + c.handshakeLoopsFinished.Add(2) + + // Handshake routine should be live until close. + // The other party may request retransmission of the last flight to cope with packet drop. + go func() { + defer c.handshakeLoopsFinished.Done() + err := c.fsm.Run(ctxHs, c, initialState) + if !errors.Is(err, context.Canceled) { + select { + case firstErr <- err: + default: + } + } + }() + go func() { + defer func() { + // Escaping read loop. + // It's safe to close decrypted channnel now. + close(c.decrypted) + + // Force stop handshaker when the underlying connection is closed. + cancel() + }() + defer c.handshakeLoopsFinished.Done() + for { + if err := c.readAndBuffer(ctxRead); err != nil { + var e *alertError + if errors.As(err, &e) { + if !e.IsFatalOrCloseNotify() { + if c.isHandshakeCompletedSuccessfully() { + // Pass the error to Read() + select { + case c.decrypted <- err: + case <-c.closed.Done(): + case <-ctxRead.Done(): + } + } + continue // non-fatal alert must not stop read loop + } + } else { + switch { + case errors.Is(err, context.DeadlineExceeded), errors.Is(err, context.Canceled), errors.Is(err, io.EOF): + default: + if c.isHandshakeCompletedSuccessfully() { + // Keep read loop and pass the read error to Read() + select { + case c.decrypted <- err: + case <-c.closed.Done(): + case <-ctxRead.Done(): + } + continue // non-fatal alert must not stop read loop + } + } + } + + select { + case firstErr <- err: + default: + } + + if e != nil { + if e.IsFatalOrCloseNotify() { + _ = c.close(false) //nolint:contextcheck + } + } + if !c.isConnectionClosed() && errors.Is(err, context.Canceled) { + c.log.Trace("handshake timeouts - closing underline connection") + _ = c.close(false) //nolint:contextcheck + } + return + } + } + }() + + select { + case err := <-firstErr: + cancelRead() + cancel() + c.handshakeLoopsFinished.Wait() + return c.translateHandshakeCtxError(err) + case <-ctx.Done(): + cancelRead() + cancel() + c.handshakeLoopsFinished.Wait() + return c.translateHandshakeCtxError(ctx.Err()) + case <-done: + return nil + } +} + +func (c *Conn) translateHandshakeCtxError(err error) error { + if err == nil { + return nil + } + if errors.Is(err, context.Canceled) && c.isHandshakeCompletedSuccessfully() { + return nil + } + return &HandshakeError{Err: err} +} + +func (c *Conn) close(byUser bool) error { + c.cancelHandshaker() + c.cancelHandshakeReader() + + if c.isHandshakeCompletedSuccessfully() && byUser { + // Discard error from notify() to return non-error on the first user call of Close() + // even if the underlying connection is already closed. + _ = c.notify(context.Background(), alert.Warning, alert.CloseNotify) + } + + c.closeLock.Lock() + // Don't return ErrConnClosed at the first time of the call from user. + closedByUser := c.connectionClosedByUser + if byUser { + c.connectionClosedByUser = true + } + isClosed := c.isConnectionClosed() + c.closed.Close() + c.closeLock.Unlock() + + if closedByUser { + return ErrConnClosed + } + + if isClosed { + return nil + } + + return c.nextConn.Close() +} + +func (c *Conn) isConnectionClosed() bool { + select { + case <-c.closed.Done(): + return true + default: + return false + } +} + +func (c *Conn) setLocalEpoch(epoch uint16) { + c.state.localEpoch.Store(epoch) +} + +func (c *Conn) setRemoteEpoch(epoch uint16) { + c.state.remoteEpoch.Store(epoch) +} + +// LocalAddr implements net.Conn.LocalAddr +func (c *Conn) LocalAddr() net.Addr { + return c.nextConn.LocalAddr() +} + +// RemoteAddr implements net.Conn.RemoteAddr +func (c *Conn) RemoteAddr() net.Addr { + return c.nextConn.RemoteAddr() +} + +func (c *Conn) sessionKey() []byte { + if c.state.isClient { + // As ServerName can be like 0.example.com, it's better to add + // delimiter character which is not allowed to be in + // neither address or domain name. + return []byte(c.nextConn.RemoteAddr().String() + "_" + c.fsm.cfg.serverName) + } + return c.state.SessionID +} + +// SetDeadline implements net.Conn.SetDeadline +func (c *Conn) SetDeadline(t time.Time) error { + c.readDeadline.Set(t) + return c.SetWriteDeadline(t) +} + +// SetReadDeadline implements net.Conn.SetReadDeadline +func (c *Conn) SetReadDeadline(t time.Time) error { + c.readDeadline.Set(t) + // Read deadline is fully managed by this layer. + // Don't set read deadline to underlying connection. + return nil +} + +// SetWriteDeadline implements net.Conn.SetWriteDeadline +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline.Set(t) + // Write deadline is also fully managed by this layer. + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/crypto.go b/vendor/github.com/pion/dtls/v2/crypto.go new file mode 100644 index 000000000..885a9a56b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/crypto.go @@ -0,0 +1,225 @@ +package dtls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/binary" + "math/big" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/hash" +) + +type ecdsaSignature struct { + R, S *big.Int +} + +func valueKeyMessage(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve) []byte { + serverECDHParams := make([]byte, 4) + serverECDHParams[0] = 3 // named curve + binary.BigEndian.PutUint16(serverECDHParams[1:], uint16(namedCurve)) + serverECDHParams[3] = byte(len(publicKey)) + + plaintext := []byte{} + plaintext = append(plaintext, clientRandom...) + plaintext = append(plaintext, serverRandom...) + plaintext = append(plaintext, serverECDHParams...) + plaintext = append(plaintext, publicKey...) + + return plaintext +} + +// If the client provided a "signature_algorithms" extension, then all +// certificates provided by the server MUST be signed by a +// hash/signature algorithm pair that appears in that extension +// +// https://tools.ietf.org/html/rfc5246#section-7.4.2 +func generateKeySignature(clientRandom, serverRandom, publicKey []byte, namedCurve elliptic.Curve, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) { + msg := valueKeyMessage(clientRandom, serverRandom, publicKey, namedCurve) + switch p := privateKey.(type) { + case ed25519.PrivateKey: + // https://crypto.stackexchange.com/a/55483 + return p.Sign(rand.Reader, msg, crypto.Hash(0)) + case *ecdsa.PrivateKey: + hashed := hashAlgorithm.Digest(msg) + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + case *rsa.PrivateKey: + hashed := hashAlgorithm.Digest(msg) + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + } + + return nil, errKeySignatureGenerateUnimplemented +} + +func verifyKeySignature(message, remoteKeySignature []byte, hashAlgorithm hash.Algorithm, rawCertificates [][]byte) error { //nolint:dupl + if len(rawCertificates) == 0 { + return errLengthMismatch + } + certificate, err := x509.ParseCertificate(rawCertificates[0]) + if err != nil { + return err + } + + switch p := certificate.PublicKey.(type) { + case ed25519.PublicKey: + if ok := ed25519.Verify(p, message, remoteKeySignature); !ok { + return errKeySignatureMismatch + } + return nil + case *ecdsa.PublicKey: + ecdsaSig := &ecdsaSignature{} + if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errInvalidECDSASignature + } + hashed := hashAlgorithm.Digest(message) + if !ecdsa.Verify(p, hashed, ecdsaSig.R, ecdsaSig.S) { + return errKeySignatureMismatch + } + return nil + case *rsa.PublicKey: + switch certificate.SignatureAlgorithm { + case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA: + hashed := hashAlgorithm.Digest(message) + return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hashed, remoteKeySignature) + default: + return errKeySignatureVerifyUnimplemented + } + } + + return errKeySignatureVerifyUnimplemented +} + +// If the server has sent a CertificateRequest message, the client MUST send the Certificate +// message. The ClientKeyExchange message is now sent, and the content +// of that message will depend on the public key algorithm selected +// between the ClientHello and the ServerHello. If the client has sent +// a certificate with signing ability, a digitally-signed +// CertificateVerify message is sent to explicitly verify possession of +// the private key in the certificate. +// https://tools.ietf.org/html/rfc5246#section-7.3 +func generateCertificateVerify(handshakeBodies []byte, privateKey crypto.PrivateKey, hashAlgorithm hash.Algorithm) ([]byte, error) { + if p, ok := privateKey.(ed25519.PrivateKey); ok { + // https://pkg.go.dev/crypto/ed25519#PrivateKey.Sign + // Sign signs the given message with priv. Ed25519 performs two passes over + // messages to be signed and therefore cannot handle pre-hashed messages. + return p.Sign(rand.Reader, handshakeBodies, crypto.Hash(0)) + } + + h := sha256.New() + if _, err := h.Write(handshakeBodies); err != nil { + return nil, err + } + hashed := h.Sum(nil) + + switch p := privateKey.(type) { + case *ecdsa.PrivateKey: + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + case *rsa.PrivateKey: + return p.Sign(rand.Reader, hashed, hashAlgorithm.CryptoHash()) + } + + return nil, errInvalidSignatureAlgorithm +} + +func verifyCertificateVerify(handshakeBodies []byte, hashAlgorithm hash.Algorithm, remoteKeySignature []byte, rawCertificates [][]byte) error { //nolint:dupl + if len(rawCertificates) == 0 { + return errLengthMismatch + } + certificate, err := x509.ParseCertificate(rawCertificates[0]) + if err != nil { + return err + } + + switch p := certificate.PublicKey.(type) { + case ed25519.PublicKey: + if ok := ed25519.Verify(p, handshakeBodies, remoteKeySignature); !ok { + return errKeySignatureMismatch + } + return nil + case *ecdsa.PublicKey: + ecdsaSig := &ecdsaSignature{} + if _, err := asn1.Unmarshal(remoteKeySignature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errInvalidECDSASignature + } + hash := hashAlgorithm.Digest(handshakeBodies) + if !ecdsa.Verify(p, hash, ecdsaSig.R, ecdsaSig.S) { + return errKeySignatureMismatch + } + return nil + case *rsa.PublicKey: + switch certificate.SignatureAlgorithm { + case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA: + hash := hashAlgorithm.Digest(handshakeBodies) + return rsa.VerifyPKCS1v15(p, hashAlgorithm.CryptoHash(), hash, remoteKeySignature) + default: + return errKeySignatureVerifyUnimplemented + } + } + + return errKeySignatureVerifyUnimplemented +} + +func loadCerts(rawCertificates [][]byte) ([]*x509.Certificate, error) { + if len(rawCertificates) == 0 { + return nil, errLengthMismatch + } + + certs := make([]*x509.Certificate, 0, len(rawCertificates)) + for _, rawCert := range rawCertificates { + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + return certs, nil +} + +func verifyClientCert(rawCertificates [][]byte, roots *x509.CertPool) (chains [][]*x509.Certificate, err error) { + certificate, err := loadCerts(rawCertificates) + if err != nil { + return nil, err + } + intermediateCAPool := x509.NewCertPool() + for _, cert := range certificate[1:] { + intermediateCAPool.AddCert(cert) + } + opts := x509.VerifyOptions{ + Roots: roots, + CurrentTime: time.Now(), + Intermediates: intermediateCAPool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + return certificate[0].Verify(opts) +} + +func verifyServerCert(rawCertificates [][]byte, roots *x509.CertPool, serverName string) (chains [][]*x509.Certificate, err error) { + certificate, err := loadCerts(rawCertificates) + if err != nil { + return nil, err + } + intermediateCAPool := x509.NewCertPool() + for _, cert := range certificate[1:] { + intermediateCAPool.AddCert(cert) + } + opts := x509.VerifyOptions{ + Roots: roots, + CurrentTime: time.Now(), + DNSName: serverName, + Intermediates: intermediateCAPool, + } + return certificate[0].Verify(opts) +} diff --git a/vendor/github.com/pion/dtls/v2/dtls.go b/vendor/github.com/pion/dtls/v2/dtls.go new file mode 100644 index 000000000..125b904e5 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/dtls.go @@ -0,0 +1,2 @@ +// Package dtls implements Datagram Transport Layer Security (DTLS) 1.2 +package dtls diff --git a/vendor/github.com/pion/dtls/v2/errors.go b/vendor/github.com/pion/dtls/v2/errors.go new file mode 100644 index 000000000..df16a21de --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/errors.go @@ -0,0 +1,154 @@ +package dtls + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" +) + +// Typed errors +var ( + ErrConnClosed = &FatalError{Err: errors.New("conn is closed")} //nolint:goerr113 + + errDeadlineExceeded = &TimeoutError{Err: fmt.Errorf("read/write timeout: %w", context.DeadlineExceeded)} + errInvalidContentType = &TemporaryError{Err: errors.New("invalid content type")} //nolint:goerr113 + + errBufferTooSmall = &TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errContextUnsupported = &TemporaryError{Err: errors.New("context is not supported for ExportKeyingMaterial")} //nolint:goerr113 + errHandshakeInProgress = &TemporaryError{Err: errors.New("handshake is in progress")} //nolint:goerr113 + errReservedExportKeyingMaterial = &TemporaryError{Err: errors.New("ExportKeyingMaterial can not be used with a reserved label")} //nolint:goerr113 + errApplicationDataEpochZero = &TemporaryError{Err: errors.New("ApplicationData with epoch of 0")} //nolint:goerr113 + errUnhandledContextType = &TemporaryError{Err: errors.New("unhandled contentType")} //nolint:goerr113 + + errCertificateVerifyNoCertificate = &FatalError{Err: errors.New("client sent certificate verify but we have no certificate to verify")} //nolint:goerr113 + errCipherSuiteNoIntersection = &FatalError{Err: errors.New("client+server do not support any shared cipher suites")} //nolint:goerr113 + errClientCertificateNotVerified = &FatalError{Err: errors.New("client sent certificate but did not verify it")} //nolint:goerr113 + errClientCertificateRequired = &FatalError{Err: errors.New("server required client verification, but got none")} //nolint:goerr113 + errClientNoMatchingSRTPProfile = &FatalError{Err: errors.New("server responded with SRTP Profile we do not support")} //nolint:goerr113 + errClientRequiredButNoServerEMS = &FatalError{Err: errors.New("client required Extended Master Secret extension, but server does not support it")} //nolint:goerr113 + errCookieMismatch = &FatalError{Err: errors.New("client+server cookie does not match")} //nolint:goerr113 + errIdentityNoPSK = &FatalError{Err: errors.New("PSK Identity Hint provided but PSK is nil")} //nolint:goerr113 + errInvalidCertificate = &FatalError{Err: errors.New("no certificate provided")} //nolint:goerr113 + errInvalidCipherSuite = &FatalError{Err: errors.New("invalid or unknown cipher suite")} //nolint:goerr113 + errInvalidECDSASignature = &FatalError{Err: errors.New("ECDSA signature contained zero or negative values")} //nolint:goerr113 + errInvalidPrivateKey = &FatalError{Err: errors.New("invalid private key type")} //nolint:goerr113 + errInvalidSignatureAlgorithm = &FatalError{Err: errors.New("invalid signature algorithm")} //nolint:goerr113 + errKeySignatureMismatch = &FatalError{Err: errors.New("expected and actual key signature do not match")} //nolint:goerr113 + errNilNextConn = &FatalError{Err: errors.New("Conn can not be created with a nil nextConn")} //nolint:goerr113 + errNoAvailableCipherSuites = &FatalError{Err: errors.New("connection can not be created, no CipherSuites satisfy this Config")} //nolint:goerr113 + errNoAvailablePSKCipherSuite = &FatalError{Err: errors.New("connection can not be created, pre-shared key present but no compatible CipherSuite")} //nolint:goerr113 + errNoAvailableCertificateCipherSuite = &FatalError{Err: errors.New("connection can not be created, certificate present but no compatible CipherSuite")} //nolint:goerr113 + errNoAvailableSignatureSchemes = &FatalError{Err: errors.New("connection can not be created, no SignatureScheme satisfy this Config")} //nolint:goerr113 + errNoCertificates = &FatalError{Err: errors.New("no certificates configured")} //nolint:goerr113 + errNoConfigProvided = &FatalError{Err: errors.New("no config provided")} //nolint:goerr113 + errNoSupportedEllipticCurves = &FatalError{Err: errors.New("client requested zero or more elliptic curves that are not supported by the server")} //nolint:goerr113 + errUnsupportedProtocolVersion = &FatalError{Err: errors.New("unsupported protocol version")} //nolint:goerr113 + errPSKAndIdentityMustBeSetForClient = &FatalError{Err: errors.New("PSK and PSK Identity Hint must both be set for client")} //nolint:goerr113 + errRequestedButNoSRTPExtension = &FatalError{Err: errors.New("SRTP support was requested but server did not respond with use_srtp extension")} //nolint:goerr113 + errServerNoMatchingSRTPProfile = &FatalError{Err: errors.New("client requested SRTP but we have no matching profiles")} //nolint:goerr113 + errServerRequiredButNoClientEMS = &FatalError{Err: errors.New("server requires the Extended Master Secret extension, but the client does not support it")} //nolint:goerr113 + errVerifyDataMismatch = &FatalError{Err: errors.New("expected and actual verify data does not match")} //nolint:goerr113 + errNotAcceptableCertificateChain = &FatalError{Err: errors.New("certificate chain is not signed by an acceptable CA")} //nolint:goerr113 + + errInvalidFlight = &InternalError{Err: errors.New("invalid flight number")} //nolint:goerr113 + errKeySignatureGenerateUnimplemented = &InternalError{Err: errors.New("unable to generate key signature, unimplemented")} //nolint:goerr113 + errKeySignatureVerifyUnimplemented = &InternalError{Err: errors.New("unable to verify key signature, unimplemented")} //nolint:goerr113 + errLengthMismatch = &InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113 + errSequenceNumberOverflow = &InternalError{Err: errors.New("sequence number overflow")} //nolint:goerr113 + errInvalidFSMTransition = &InternalError{Err: errors.New("invalid state machine transition")} //nolint:goerr113 + errFailedToAccessPoolReadBuffer = &InternalError{Err: errors.New("failed to access pool read buffer")} //nolint:goerr113 + errFragmentBufferOverflow = &InternalError{Err: errors.New("fragment buffer overflow")} //nolint:goerr113 +) + +// FatalError indicates that the DTLS connection is no longer available. +// It is mainly caused by wrong configuration of server or client. +type FatalError = protocol.FatalError + +// InternalError indicates and internal error caused by the implementation, and the DTLS connection is no longer available. +// It is mainly caused by bugs or tried to use unimplemented features. +type InternalError = protocol.InternalError + +// TemporaryError indicates that the DTLS connection is still available, but the request was failed temporary. +type TemporaryError = protocol.TemporaryError + +// TimeoutError indicates that the request was timed out. +type TimeoutError = protocol.TimeoutError + +// HandshakeError indicates that the handshake failed. +type HandshakeError = protocol.HandshakeError + +// errInvalidCipherSuite indicates an attempt at using an unsupported cipher suite. +type invalidCipherSuiteError struct { + id CipherSuiteID +} + +func (e *invalidCipherSuiteError) Error() string { + return fmt.Sprintf("CipherSuite with id(%d) is not valid", e.id) +} + +func (e *invalidCipherSuiteError) Is(err error) bool { + var other *invalidCipherSuiteError + if errors.As(err, &other) { + return e.id == other.id + } + return false +} + +// errAlert wraps DTLS alert notification as an error +type alertError struct { + *alert.Alert +} + +func (e *alertError) Error() string { + return fmt.Sprintf("alert: %s", e.Alert.String()) +} + +func (e *alertError) IsFatalOrCloseNotify() bool { + return e.Level == alert.Fatal || e.Description == alert.CloseNotify +} + +func (e *alertError) Is(err error) bool { + var other *alertError + if errors.As(err, &other) { + return e.Level == other.Level && e.Description == other.Description + } + return false +} + +// netError translates an error from underlying Conn to corresponding net.Error. +func netError(err error) error { + switch { + case errors.Is(err, io.EOF), errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + // Return io.EOF and context errors as is. + return err + } + + var ( + ne net.Error + opError *net.OpError + se *os.SyscallError + ) + + if errors.As(err, &opError) { + if errors.As(opError, &se) { + if se.Timeout() { + return &TimeoutError{Err: err} + } + if isOpErrorTemporary(se) { + return &TemporaryError{Err: err} + } + } + } + + if errors.As(err, &ne) { + return err + } + + return &FatalError{Err: err} +} diff --git a/vendor/github.com/pion/dtls/v2/errors_errno.go b/vendor/github.com/pion/dtls/v2/errors_errno.go new file mode 100644 index 000000000..ddb0ebbc0 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/errors_errno.go @@ -0,0 +1,19 @@ +//go:build aix || darwin || dragonfly || freebsd || linux || nacl || nacljs || netbsd || openbsd || solaris || windows +// +build aix darwin dragonfly freebsd linux nacl nacljs netbsd openbsd solaris windows + +// For systems having syscall.Errno. +// Update build targets by following command: +// $ grep -R ECONN $(go env GOROOT)/src/syscall/zerrors_*.go \ +// | tr "." "_" | cut -d"_" -f"2" | sort | uniq + +package dtls + +import ( + "errors" + "os" + "syscall" +) + +func isOpErrorTemporary(err *os.SyscallError) bool { + return errors.Is(err.Err, syscall.ECONNREFUSED) +} diff --git a/vendor/github.com/pion/dtls/v2/errors_noerrno.go b/vendor/github.com/pion/dtls/v2/errors_noerrno.go new file mode 100644 index 000000000..ad1bf8523 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/errors_noerrno.go @@ -0,0 +1,15 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !nacl && !nacljs && !netbsd && !openbsd && !solaris && !windows +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!nacl,!nacljs,!netbsd,!openbsd,!solaris,!windows + +// For systems without syscall.Errno. +// Build targets must be inverse of errors_errno.go + +package dtls + +import ( + "os" +) + +func isOpErrorTemporary(err *os.SyscallError) bool { + return false +} diff --git a/vendor/github.com/pion/dtls/v2/flight.go b/vendor/github.com/pion/dtls/v2/flight.go new file mode 100644 index 000000000..bed3f8c9a --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight.go @@ -0,0 +1,101 @@ +package dtls + +/* + DTLS messages are grouped into a series of message flights, according + to the diagrams below. Although each flight of messages may consist + of a number of messages, they should be viewed as monolithic for the + purpose of timeout and retransmission. + https://tools.ietf.org/html/rfc4347#section-4.2.4 + + Message flights for full handshake: + + Client Server + ------ ------ + Waiting Flight 0 + + ClientHello --------> Flight 1 + + <------- HelloVerifyRequest Flight 2 + + ClientHello --------> Flight 3 + + ServerHello \ + Certificate* \ + ServerKeyExchange* Flight 4 + CertificateRequest* / + <-------- ServerHelloDone / + + Certificate* \ + ClientKeyExchange \ + CertificateVerify* Flight 5 + [ChangeCipherSpec] / + Finished --------> / + + [ChangeCipherSpec] \ Flight 6 + <-------- Finished / + + Message flights for session-resuming handshake (no cookie exchange): + + Client Server + ------ ------ + Waiting Flight 0 + + ClientHello --------> Flight 1 + + ServerHello \ + [ChangeCipherSpec] Flight 4b + <-------- Finished / + + [ChangeCipherSpec] \ Flight 5b + Finished --------> / + + [ChangeCipherSpec] \ Flight 6 + <-------- Finished / +*/ + +type flightVal uint8 + +const ( + flight0 flightVal = iota + 1 + flight1 + flight2 + flight3 + flight4 + flight4b + flight5 + flight5b + flight6 +) + +func (f flightVal) String() string { + switch f { + case flight0: + return "Flight 0" + case flight1: + return "Flight 1" + case flight2: + return "Flight 2" + case flight3: + return "Flight 3" + case flight4: + return "Flight 4" + case flight4b: + return "Flight 4b" + case flight5: + return "Flight 5" + case flight5b: + return "Flight 5b" + case flight6: + return "Flight 6" + default: + return "Invalid Flight" + } +} + +func (f flightVal) isLastSendFlight() bool { + return f == flight6 || f == flight5b +} + +func (f flightVal) isLastRecvFlight() bool { + return f == flight5 || f == flight4b +} diff --git a/vendor/github.com/pion/dtls/v2/flight0handler.go b/vendor/github.com/pion/dtls/v2/flight0handler.go new file mode 100644 index 000000000..fa4720e0e --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight0handler.go @@ -0,0 +1,135 @@ +package dtls + +import ( + "context" + "crypto/rand" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +func flight0Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + seq, msgs, ok := cache.fullPullMap(0, state.cipherSuite, + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + var clientHello *handshake.MessageClientHello + + // Validate type + if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if !clientHello.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + + state.remoteRandom = clientHello.Random + + cipherSuites := []CipherSuite{} + for _, id := range clientHello.CipherSuiteIDs { + if c := cipherSuiteForID(CipherSuiteID(id), cfg.customCipherSuites); c != nil { + cipherSuites = append(cipherSuites, c) + } + } + + if state.cipherSuite, ok = findMatchingCipherSuite(cipherSuites, cfg.localCipherSuites); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection + } + + for _, val := range clientHello.Extensions { + switch e := val.(type) { + case *extension.SupportedEllipticCurves: + if len(e.EllipticCurves) == 0 { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoSupportedEllipticCurves + } + state.namedCurve = e.EllipticCurves[0] + case *extension.UseSRTP: + profile, ok := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles) + if !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerNoMatchingSRTPProfile + } + state.srtpProtectionProfile = profile + case *extension.UseExtendedMasterSecret: + if cfg.extendedMasterSecret != DisableExtendedMasterSecret { + state.extendedMasterSecret = true + } + case *extension.ServerName: + state.serverName = e.ServerName // remote server name + case *extension.ALPN: + state.peerSupportedProtocols = e.ProtocolNameList + } + } + + if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errServerRequiredButNoClientEMS + } + + if state.localKeypair == nil { + var err error + state.localKeypair, err = elliptic.GenerateKeypair(state.namedCurve) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } + + nextFlight := flight2 + + if cfg.insecureSkipHelloVerify { + nextFlight = flight4 + } + + return handleHelloResume(clientHello.SessionID, state, cfg, nextFlight) +} + +func handleHelloResume(sessionID []byte, state *State, cfg *handshakeConfig, next flightVal) (flightVal, *alert.Alert, error) { + if len(sessionID) > 0 && cfg.sessionStore != nil { + if s, err := cfg.sessionStore.Get(sessionID); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } else if s.ID != nil { + cfg.log.Tracef("[handshake] resume session: %x", sessionID) + + state.SessionID = sessionID + state.masterSecret = s.Secret + + if err := state.initCipherSuite(); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + clientRandom := state.localRandom.MarshalFixed() + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return flight4b, nil, nil + } + } + return next, nil, nil +} + +func flight0Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + // Initialize + if !cfg.insecureSkipHelloVerify { + state.cookie = make([]byte, cookieLength) + if _, err := rand.Read(state.cookie); err != nil { + return nil, nil, err + } + } + + var zeroEpoch uint16 + state.localEpoch.Store(zeroEpoch) + state.remoteEpoch.Store(zeroEpoch) + state.namedCurve = defaultNamedCurve + + if err := state.localRandom.Populate(); err != nil { + return nil, nil, err + } + + return nil, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight1handler.go b/vendor/github.com/pion/dtls/v2/flight1handler.go new file mode 100644 index 000000000..7cfcea8fb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight1handler.go @@ -0,0 +1,138 @@ +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight1Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + // HelloVerifyRequest can be skipped by the server, + // so allow ServerHello during flight1 also + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, true}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok := msgs[handshake.TypeServerHello]; ok { + // Flight1 and flight2 were skipped. + // Parse as flight3. + return flight3Parse(ctx, c, state, cache, cfg) + } + + if h, ok := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); ok { + // DTLS 1.2 clients must not assume that the server will use the protocol version + // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1 + if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + state.cookie = append([]byte{}, h.Cookie...) + state.handshakeRecvSequence = seq + return flight3, nil, nil + } + + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil +} + +func flight1Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var zeroEpoch uint16 + state.localEpoch.Store(zeroEpoch) + state.remoteEpoch.Store(zeroEpoch) + state.namedCurve = defaultNamedCurve + state.cookie = nil + + if err := state.localRandom.Populate(); err != nil { + return nil, nil, err + } + + extensions := []extension.Extension{ + &extension.SupportedSignatureAlgorithms{ + SignatureHashAlgorithms: cfg.localSignatureSchemes, + }, + &extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }, + } + + var setEllipticCurveCryptographyClientHelloExtensions bool + for _, c := range cfg.localCipherSuites { + if c.ECC() { + setEllipticCurveCryptographyClientHelloExtensions = true + break + } + } + + if setEllipticCurveCryptographyClientHelloExtensions { + extensions = append(extensions, []extension.Extension{ + &extension.SupportedEllipticCurves{ + EllipticCurves: cfg.ellipticCurves, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + }...) + } + + if len(cfg.localSRTPProtectionProfiles) > 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: cfg.localSRTPProtectionProfiles, + }) + } + + if cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + + if len(cfg.serverName) > 0 { + extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName}) + } + + if len(cfg.supportedProtocols) > 0 { + extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols}) + } + + if cfg.sessionStore != nil { + cfg.log.Tracef("[handshake] try to resume session") + if s, err := cfg.sessionStore.Get(c.sessionKey()); err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } else if s.ID != nil { + cfg.log.Tracef("[handshake] get saved session: %x", s.ID) + + state.SessionID = s.ID + state.masterSecret = s.Secret + } + } + + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + SessionID: state.SessionID, + Cookie: state.cookie, + Random: state.localRandom, + CipherSuiteIDs: cipherSuiteIDs(cfg.localCipherSuites), + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + }, + }, + }, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight2handler.go b/vendor/github.com/pion/dtls/v2/flight2handler.go new file mode 100644 index 000000000..9a1bf34f7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight2handler.go @@ -0,0 +1,61 @@ +package dtls + +import ( + "bytes" + "context" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight2Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + if !ok { + // Client may retransmit the first ClientHello when HelloVerifyRequest is dropped. + // Parse as flight 0 in this case. + return flight0Parse(ctx, c, state, cache, cfg) + } + state.handshakeRecvSequence = seq + + var clientHello *handshake.MessageClientHello + + // Validate type + if clientHello, ok = msgs[handshake.TypeClientHello].(*handshake.MessageClientHello); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if !clientHello.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + + if len(clientHello.Cookie) == 0 { + return 0, nil, nil + } + if !bytes.Equal(state.cookie, clientHello.Cookie) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.AccessDenied}, errCookieMismatch + } + return flight4, nil, nil +} + +func flight2Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + state.handshakeSendSequence = 0 + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageHelloVerifyRequest{ + Version: protocol.Version1_2, + Cookie: state.cookie, + }, + }, + }, + }, + }, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight3handler.go b/vendor/github.com/pion/dtls/v2/flight3handler.go new file mode 100644 index 000000000..0a8c5d952 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight3handler.go @@ -0,0 +1,288 @@ +package dtls + +import ( + "bytes" + "context" + + "github.com/pion/dtls/v2/internal/ciphersuite/types" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight3Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit + // Clients may receive multiple HelloVerifyRequest messages with different cookies. + // Clients SHOULD handle this by sending a new ClientHello with a cookie in response + // to the new HelloVerifyRequest. RFC 6347 Section 4.2.1 + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeHelloVerifyRequest, cfg.initialEpoch, false, true}, + ) + if ok { + if h, msgOk := msgs[handshake.TypeHelloVerifyRequest].(*handshake.MessageHelloVerifyRequest); msgOk { + // DTLS 1.2 clients must not assume that the server will use the protocol version + // specified in HelloVerifyRequest message. RFC 6347 Section 4.2.1 + if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + state.cookie = append([]byte{}, h.Cookie...) + state.handshakeRecvSequence = seq + return flight3, nil, nil + } + } + + _, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + ) + if !ok { + // Don't have enough messages. Keep reading + return 0, nil, nil + } + + if h, msgOk := msgs[handshake.TypeServerHello].(*handshake.MessageServerHello); msgOk { + if !h.Version.Equal(protocol.Version1_2) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.ProtocolVersion}, errUnsupportedProtocolVersion + } + for _, v := range h.Extensions { + switch e := v.(type) { + case *extension.UseSRTP: + profile, found := findMatchingSRTPProfile(e.ProtectionProfiles, cfg.localSRTPProtectionProfiles) + if !found { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, errClientNoMatchingSRTPProfile + } + state.srtpProtectionProfile = profile + case *extension.UseExtendedMasterSecret: + if cfg.extendedMasterSecret != DisableExtendedMasterSecret { + state.extendedMasterSecret = true + } + case *extension.ALPN: + if len(e.ProtocolNameList) > 1 { // This should be exactly 1, the zero case is handle when unmarshalling + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, extension.ErrALPNInvalidFormat // Meh, internal error? + } + state.NegotiatedProtocol = e.ProtocolNameList[0] + } + } + if cfg.extendedMasterSecret == RequireExtendedMasterSecret && !state.extendedMasterSecret { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errClientRequiredButNoServerEMS + } + if len(cfg.localSRTPProtectionProfiles) > 0 && state.srtpProtectionProfile == 0 { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errRequestedButNoSRTPExtension + } + + remoteCipherSuite := cipherSuiteForID(CipherSuiteID(*h.CipherSuiteID), cfg.customCipherSuites) + if remoteCipherSuite == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errCipherSuiteNoIntersection + } + + selectedCipherSuite, found := findMatchingCipherSuite([]CipherSuite{remoteCipherSuite}, cfg.localCipherSuites) + if !found { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + + state.cipherSuite = selectedCipherSuite + state.remoteRandom = h.Random + cfg.log.Tracef("[handshake] use cipher suite: %s", selectedCipherSuite.String()) + + if len(h.SessionID) > 0 && bytes.Equal(state.SessionID, h.SessionID) { + return handleResumption(ctx, c, state, cache, cfg) + } + + if len(state.SessionID) > 0 { + cfg.log.Tracef("[handshake] clean old session : %s", state.SessionID) + if err := cfg.sessionStore.Del(state.SessionID); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if cfg.sessionStore == nil { + state.SessionID = []byte{} + } else { + state.SessionID = h.SessionID + } + + state.masterSecret = []byte{} + } + + if cfg.localPSKCallback != nil { + seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + ) + } else { + seq, msgs, ok = cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, true}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + ) + } + if !ok { + // Don't have enough messages. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + if h, ok := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); ok { + state.PeerCertificates = h.Certificate + } else if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errInvalidCertificate + } + + if h, ok := msgs[handshake.TypeServerKeyExchange].(*handshake.MessageServerKeyExchange); ok { + alertPtr, err := handleServerKeyExchange(c, state, cfg, h) + if err != nil { + return 0, alertPtr, err + } + } + + if _, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok { + state.remoteRequestedCertificate = true + } + + return flight5, nil, nil +} + +func handleResumption(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + if err := state.initCipherSuite(); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + // Now, encrypted packets can be handled + if err := c.handleQueuedPackets(ctx); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence+1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + ) + + expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + clientRandom := state.localRandom.MarshalFixed() + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return flight5b, nil, nil +} + +func handleServerKeyExchange(_ flightConn, state *State, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange) (*alert.Alert, error) { + var err error + if state.cipherSuite == nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + if cfg.localPSKCallback != nil { + var psk []byte + if psk, err = cfg.localPSKCallback(h.IdentityHint); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.IdentityHint = h.IdentityHint + switch state.cipherSuite.KeyExchangeAlgorithm() { + case types.KeyExchangeAlgorithmPsk: + state.preMasterSecret = prf.PSKPreMasterSecret(psk) + case (types.KeyExchangeAlgorithmEcdhe | types.KeyExchangeAlgorithmPsk): + if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + default: + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errInvalidCipherSuite + } + } else { + if state.localKeypair, err = elliptic.GenerateKeypair(h.NamedCurve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + if state.preMasterSecret, err = prf.PreMasterSecret(h.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + return nil, nil //nolint:nilnil +} + +func flight3Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + extensions := []extension.Extension{ + &extension.SupportedSignatureAlgorithms{ + SignatureHashAlgorithms: cfg.localSignatureSchemes, + }, + &extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }, + } + if state.namedCurve != 0 { + extensions = append(extensions, []extension.Extension{ + &extension.SupportedEllipticCurves{ + EllipticCurves: []elliptic.Curve{elliptic.X25519, elliptic.P256, elliptic.P384}, + }, + &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }, + }...) + } + + if len(cfg.localSRTPProtectionProfiles) > 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: cfg.localSRTPProtectionProfiles, + }) + } + + if cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + + if len(cfg.serverName) > 0 { + extensions = append(extensions, &extension.ServerName{ServerName: cfg.serverName}) + } + + if len(cfg.supportedProtocols) > 0 { + extensions = append(extensions, &extension.ALPN{ProtocolNameList: cfg.supportedProtocols}) + } + + return []*packet{ + { + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageClientHello{ + Version: protocol.Version1_2, + SessionID: state.SessionID, + Cookie: state.cookie, + Random: state.localRandom, + CipherSuiteIDs: cipherSuiteIDs(cfg.localCipherSuites), + CompressionMethods: defaultCompressionMethods(), + Extensions: extensions, + }, + }, + }, + }, + }, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight4bhandler.go b/vendor/github.com/pion/dtls/v2/flight4bhandler.go new file mode 100644 index 000000000..46515c57f --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight4bhandler.go @@ -0,0 +1,141 @@ +package dtls + +import ( + "bytes" + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight4bParse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + + expectedVerifyData, err := prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + // Other party may re-transmit the last flight. Keep state to be flight4b. + return flight4b, nil, nil +} + +func flight4bGenerate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var pkts []*packet + + extensions := []extension.Extension{&extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }} + if (cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + if state.srtpProtectionProfile != 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: []SRTPProtectionProfile{state.srtpProtectionProfile}, + }) + } + + selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err + } + if selectedProto != "" { + extensions = append(extensions, &extension.ALPN{ + ProtocolNameList: []string{selectedProto}, + }) + state.NegotiatedProtocol = selectedProto + } + + cipherSuiteID := uint16(state.cipherSuite.ID()) + serverHello := &handshake.Handshake{ + Message: &handshake.MessageServerHello{ + Version: protocol.Version1_2, + Random: state.localRandom, + SessionID: state.SessionID, + CipherSuiteID: &cipherSuiteID, + CompressionMethod: defaultCompressionMethods()[0], + Extensions: extensions, + }, + } + + serverHello.Header.MessageSequence = uint16(state.handshakeSendSequence) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + ) + raw, err := serverHello.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + plainText = append(plainText, raw...) + + state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: serverHello, + }, + }, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }, + ) + + return pkts, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight4handler.go b/vendor/github.com/pion/dtls/v2/flight4handler.go new file mode 100644 index 000000000..9792e0a92 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight4handler.go @@ -0,0 +1,399 @@ +package dtls + +import ( + "context" + "crypto/rand" + "crypto/x509" + + "github.com/pion/dtls/v2/internal/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/extension" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight4Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { //nolint:gocognit + seq, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, true}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, true}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + // Validate type + var clientKeyExchange *handshake.MessageClientKeyExchange + if clientKeyExchange, ok = msgs[handshake.TypeClientKeyExchange].(*handshake.MessageClientKeyExchange); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if h, hasCert := msgs[handshake.TypeCertificate].(*handshake.MessageCertificate); hasCert { + state.PeerCertificates = h.Certificate + // If the client offer its certificate, just disable session resumption. + // Otherwise, we have to store the certificate identitfication and expire time. + // And we have to check whether this certificate expired, revoked or changed. + // + // https://curl.se/docs/CVE-2016-5419.html + state.SessionID = nil + } + + if h, hasCertVerify := msgs[handshake.TypeCertificateVerify].(*handshake.MessageCertificateVerify); hasCertVerify { + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errCertificateVerifyNoCertificate + } + + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + ) + + // Verify that the pair of hash algorithm and signiture is listed. + var validSignatureScheme bool + for _, ss := range cfg.localSignatureSchemes { + if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm { + validSignatureScheme = true + break + } + } + if !validSignatureScheme { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes + } + + if err := verifyCertificateVerify(plainText, h.HashAlgorithm, h.Signature, state.PeerCertificates); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + var chains [][]*x509.Certificate + var err error + var verified bool + if cfg.clientAuth >= VerifyClientCertIfGiven { + if chains, err = verifyClientCert(state.PeerCertificates, cfg.clientCAs); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + verified = true + } + if cfg.verifyPeerCertificate != nil { + if err := cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + state.peerCertificatesVerified = verified + } else if state.PeerCertificates != nil { + // A certificate was received, but we haven't seen a CertificateVerify + // keep reading until we receive one + return 0, nil, nil + } + + if !state.cipherSuite.IsInitialized() { + serverRandom := state.localRandom.MarshalFixed() + clientRandom := state.remoteRandom.MarshalFixed() + + var err error + var preMasterSecret []byte + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypePreSharedKey { + var psk []byte + if psk, err = cfg.localPSKCallback(clientKeyExchange.IdentityHint); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.IdentityHint = clientKeyExchange.IdentityHint + switch state.cipherSuite.KeyExchangeAlgorithm() { + case CipherSuiteKeyExchangeAlgorithmPsk: + preMasterSecret = prf.PSKPreMasterSecret(psk) + case (CipherSuiteKeyExchangeAlgorithmPsk | CipherSuiteKeyExchangeAlgorithmEcdhe): + if preMasterSecret, err = prf.EcdhePSKPreMasterSecret(psk, clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + default: + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidCipherSuite + } + } else { + preMasterSecret, err = prf.PreMasterSecret(clientKeyExchange.PublicKey, state.localKeypair.PrivateKey, state.localKeypair.Curve) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } + + if state.extendedMasterSecret { + var sessionHash []byte + sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + state.masterSecret, err = prf.ExtendedMasterSecret(preMasterSecret, sessionHash, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } else { + state.masterSecret, err = prf.MasterSecret(preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if err := state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], false); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + } + + if len(state.SessionID) > 0 { + s := Session{ + ID: state.SessionID, + Secret: state.masterSecret, + } + cfg.log.Tracef("[handshake] save new session: %x", s.ID) + if err := cfg.sessionStore.Set(state.SessionID, s); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + // Now, encrypted packets can be handled + if err := c.handleQueuedPackets(ctx); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + seq, msgs, ok = cache.fullPullMap(seq, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + state.handshakeRecvSequence = seq + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeAnonymous { + if cfg.verifyConnection != nil { + if err := cfg.verifyConnection(state.clone()); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + return flight6, nil, nil + } + + switch cfg.clientAuth { + case RequireAnyClientCert: + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired + } + case VerifyClientCertIfGiven: + if state.PeerCertificates != nil && !state.peerCertificatesVerified { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified + } + case RequireAndVerifyClientCert: + if state.PeerCertificates == nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.NoCertificate}, errClientCertificateRequired + } + if !state.peerCertificatesVerified { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, errClientCertificateNotVerified + } + case NoClientCert, RequestClientCert: + // go to flight6 + } + if cfg.verifyConnection != nil { + if err := cfg.verifyConnection(state.clone()); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + + return flight6, nil, nil +} + +func flight4Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + extensions := []extension.Extension{&extension.RenegotiationInfo{ + RenegotiatedConnection: 0, + }} + if (cfg.extendedMasterSecret == RequestExtendedMasterSecret || + cfg.extendedMasterSecret == RequireExtendedMasterSecret) && state.extendedMasterSecret { + extensions = append(extensions, &extension.UseExtendedMasterSecret{ + Supported: true, + }) + } + if state.srtpProtectionProfile != 0 { + extensions = append(extensions, &extension.UseSRTP{ + ProtectionProfiles: []SRTPProtectionProfile{state.srtpProtectionProfile}, + }) + } + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + extensions = append(extensions, &extension.SupportedPointFormats{ + PointFormats: []elliptic.CurvePointFormat{elliptic.CurvePointFormatUncompressed}, + }) + } + + selectedProto, err := extension.ALPNProtocolSelection(cfg.supportedProtocols, state.peerSupportedProtocols) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.NoApplicationProtocol}, err + } + if selectedProto != "" { + extensions = append(extensions, &extension.ALPN{ + ProtocolNameList: []string{selectedProto}, + }) + state.NegotiatedProtocol = selectedProto + } + + var pkts []*packet + cipherSuiteID := uint16(state.cipherSuite.ID()) + + if cfg.sessionStore != nil { + state.SessionID = make([]byte, sessionLength) + if _, err := rand.Read(state.SessionID); err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerHello{ + Version: protocol.Version1_2, + Random: state.localRandom, + SessionID: state.SessionID, + CipherSuiteID: &cipherSuiteID, + CompressionMethod: defaultCompressionMethods()[0], + Extensions: extensions, + }, + }, + }, + }) + + switch { + case state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate: + certificate, err := cfg.getCertificate(&ClientHelloInfo{ + ServerName: state.serverName, + CipherSuites: []ciphersuite.ID{state.cipherSuite.ID()}, + }) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificate{ + Certificate: certificate.Certificate, + }, + }, + }, + }) + + serverRandom := state.localRandom.MarshalFixed() + clientRandom := state.remoteRandom.MarshalFixed() + + // Find compatible signature scheme + signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, certificate.PrivateKey) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err + } + + signature, err := generateKeySignature(clientRandom[:], serverRandom[:], state.localKeypair.PublicKey, state.namedCurve, certificate.PrivateKey, signatureHashAlgo.Hash) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.localKeySignature = signature + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerKeyExchange{ + EllipticCurveType: elliptic.CurveTypeNamedCurve, + NamedCurve: state.namedCurve, + PublicKey: state.localKeypair.PublicKey, + HashAlgorithm: signatureHashAlgo.Hash, + SignatureAlgorithm: signatureHashAlgo.Signature, + Signature: state.localKeySignature, + }, + }, + }, + }) + + if cfg.clientAuth > NoClientCert { + // An empty list of certificateAuthorities signals to + // the client that it may send any certificate in response + // to our request. When we know the CAs we trust, then + // we can send them down, so that the client can choose + // an appropriate certificate to give to us. + var certificateAuthorities [][]byte + if cfg.clientCAs != nil { + // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool and it's ok if certificate authorities is empty. + certificateAuthorities = cfg.clientCAs.Subjects() + } + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificateRequest{ + CertificateTypes: []clientcertificate.Type{clientcertificate.RSASign, clientcertificate.ECDSASign}, + SignatureHashAlgorithms: cfg.localSignatureSchemes, + CertificateAuthoritiesNames: certificateAuthorities, + }, + }, + }, + }) + } + case cfg.localPSKIdentityHint != nil || state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe): + // To help the client in selecting which identity to use, the server + // can provide a "PSK identity hint" in the ServerKeyExchange message. + // If no hint is provided and cipher suite doesn't use elliptic curve, + // the ServerKeyExchange message is omitted. + // + // https://tools.ietf.org/html/rfc4279#section-2 + srvExchange := &handshake.MessageServerKeyExchange{ + IdentityHint: cfg.localPSKIdentityHint, + } + if state.cipherSuite.KeyExchangeAlgorithm().Has(CipherSuiteKeyExchangeAlgorithmEcdhe) { + srvExchange.EllipticCurveType = elliptic.CurveTypeNamedCurve + srvExchange.NamedCurve = state.namedCurve + srvExchange.PublicKey = state.localKeypair.PublicKey + } + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: srvExchange, + }, + }, + }) + } + + pkts = append(pkts, &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageServerHelloDone{}, + }, + }, + }) + + return pkts, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight5bhandler.go b/vendor/github.com/pion/dtls/v2/flight5bhandler.go new file mode 100644 index 000000000..bd330d515 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight5bhandler.go @@ -0,0 +1,75 @@ +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight5bParse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + // Other party may re-transmit the last flight. Keep state to be flight5b. + return flight5b, nil, nil +} + +func flight5bGenerate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit + var pkts []*packet + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }) + + return pkts, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flight5handler.go b/vendor/github.com/pion/dtls/v2/flight5handler.go new file mode 100644 index 000000000..5a60973d7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight5handler.go @@ -0,0 +1,354 @@ +package dtls + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight5Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, false, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + var finished *handshake.MessageFinished + if finished, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + expectedVerifyData, err := prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + if !bytes.Equal(expectedVerifyData, finished.VerifyData) { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errVerifyDataMismatch + } + + if len(state.SessionID) > 0 { + s := Session{ + ID: state.SessionID, + Secret: state.masterSecret, + } + cfg.log.Tracef("[handshake] save new session: %x", s.ID) + if err := cfg.sessionStore.Set(c.sessionKey(), s); err != nil { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + return flight5, nil, nil +} + +func flight5Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { //nolint:gocognit + var privateKey crypto.PrivateKey + var pkts []*packet + if state.remoteRequestedCertificate { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-2, state.cipherSuite, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired + } + reqInfo := CertificateRequestInfo{} + if r, ok := msgs[handshake.TypeCertificateRequest].(*handshake.MessageCertificateRequest); ok { + reqInfo.AcceptableCAs = r.CertificateAuthoritiesNames + } else { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errClientCertificateRequired + } + certificate, err := cfg.getClientCertificate(&reqInfo) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, err + } + if certificate == nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.HandshakeFailure}, errNotAcceptableCertificateChain + } + if certificate.Certificate != nil { + privateKey = certificate.PrivateKey + } + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificate{ + Certificate: certificate.Certificate, + }, + }, + }, + }) + } + + clientKeyExchange := &handshake.MessageClientKeyExchange{} + if cfg.localPSKCallback == nil { + clientKeyExchange.PublicKey = state.localKeypair.PublicKey + } else { + clientKeyExchange.IdentityHint = cfg.localPSKIdentityHint + } + if state != nil && state.localKeypair != nil && len(state.localKeypair.PublicKey) > 0 { + clientKeyExchange.PublicKey = state.localKeypair.PublicKey + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: clientKeyExchange, + }, + }, + }) + + serverKeyExchangeData := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + ) + + serverKeyExchange := &handshake.MessageServerKeyExchange{} + + // handshakeMessageServerKeyExchange is optional for PSK + if len(serverKeyExchangeData) == 0 { + alertPtr, err := handleServerKeyExchange(c, state, cfg, &handshake.MessageServerKeyExchange{}) + if err != nil { + return nil, alertPtr, err + } + } else { + rawHandshake := &handshake.Handshake{ + KeyExchangeAlgorithm: state.cipherSuite.KeyExchangeAlgorithm(), + } + err := rawHandshake.Unmarshal(serverKeyExchangeData) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, err + } + + switch h := rawHandshake.Message.(type) { + case *handshake.MessageServerKeyExchange: + serverKeyExchange = h + default: + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.UnexpectedMessage}, errInvalidContentType + } + } + + // Append not-yet-sent packets + merged := []byte{} + seqPred := uint16(state.handshakeSendSequence) + for _, p := range pkts { + h, ok := p.record.Content.(*handshake.Handshake) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType + } + h.Header.MessageSequence = seqPred + seqPred++ + raw, err := h.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + merged = append(merged, raw...) + } + + if alertPtr, err := initalizeCipherSuite(state, cache, cfg, serverKeyExchange, merged); err != nil { + return nil, alertPtr, err + } + + // If the client has sent a certificate with signing ability, a digitally-signed + // CertificateVerify message is sent to explicitly verify possession of the + // private key in the certificate. + if state.remoteRequestedCertificate && privateKey != nil { + plainText := append(cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + ), merged...) + + // Find compatible signature scheme + signatureHashAlgo, err := signaturehash.SelectSignatureScheme(cfg.localSignatureSchemes, privateKey) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, err + } + + certVerify, err := generateCertificateVerify(plainText, privateKey, signatureHashAlgo.Hash) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + state.localCertificatesVerify = certVerify + + p := &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageCertificateVerify{ + HashAlgorithm: signatureHashAlgo.Hash, + SignatureAlgorithm: signatureHashAlgo.Signature, + Signature: state.localCertificatesVerify, + }, + }, + }, + } + pkts = append(pkts, p) + + h, ok := p.record.Content.(*handshake.Handshake) + if !ok { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, errInvalidContentType + } + h.Header.MessageSequence = seqPred + // seqPred++ // this is the last use of seqPred + raw, err := h.Marshal() + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + merged = append(merged, raw...) + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataClient(state.masterSecret, append(plainText, merged...), state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }) + + return pkts, nil, nil +} + +func initalizeCipherSuite(state *State, cache *handshakeCache, cfg *handshakeConfig, h *handshake.MessageServerKeyExchange, sendingPlainText []byte) (*alert.Alert, error) { //nolint:gocognit + if state.cipherSuite.IsInitialized() { + return nil, nil //nolint + } + + clientRandom := state.localRandom.MarshalFixed() + serverRandom := state.remoteRandom.MarshalFixed() + + var err error + + if state.extendedMasterSecret { + var sessionHash []byte + sessionHash, err = cache.sessionHash(state.cipherSuite.HashFunc(), cfg.initialEpoch, sendingPlainText) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + state.masterSecret, err = prf.ExtendedMasterSecret(state.preMasterSecret, sessionHash, state.cipherSuite.HashFunc()) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.IllegalParameter}, err + } + } else { + state.masterSecret, err = prf.MasterSecret(state.preMasterSecret, clientRandom[:], serverRandom[:], state.cipherSuite.HashFunc()) + if err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + if state.cipherSuite.AuthenticationType() == CipherSuiteAuthenticationTypeCertificate { + // Verify that the pair of hash algorithm and signiture is listed. + var validSignatureScheme bool + for _, ss := range cfg.localSignatureSchemes { + if ss.Hash == h.HashAlgorithm && ss.Signature == h.SignatureAlgorithm { + validSignatureScheme = true + break + } + } + if !validSignatureScheme { + return &alert.Alert{Level: alert.Fatal, Description: alert.InsufficientSecurity}, errNoAvailableSignatureSchemes + } + + expectedMsg := valueKeyMessage(clientRandom[:], serverRandom[:], h.PublicKey, h.NamedCurve) + if err = verifyKeySignature(expectedMsg, h.Signature, h.HashAlgorithm, state.PeerCertificates); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + var chains [][]*x509.Certificate + if !cfg.insecureSkipVerify { + if chains, err = verifyServerCert(state.PeerCertificates, cfg.rootCAs, cfg.serverName); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + if cfg.verifyPeerCertificate != nil { + if err = cfg.verifyPeerCertificate(state.PeerCertificates, chains); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + } + if cfg.verifyConnection != nil { + if err = cfg.verifyConnection(state.clone()); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.BadCertificate}, err + } + } + + if err = state.cipherSuite.Init(state.masterSecret, clientRandom[:], serverRandom[:], true); err != nil { + return &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + + cfg.writeKeyLog(keyLogLabelTLS12, clientRandom[:], state.masterSecret) + + return nil, nil //nolint +} diff --git a/vendor/github.com/pion/dtls/v2/flight6handler.go b/vendor/github.com/pion/dtls/v2/flight6handler.go new file mode 100644 index 000000000..d3d62f05b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flight6handler.go @@ -0,0 +1,82 @@ +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +func flight6Parse(ctx context.Context, c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) (flightVal, *alert.Alert, error) { + _, msgs, ok := cache.fullPullMap(state.handshakeRecvSequence-1, state.cipherSuite, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + if !ok { + // No valid message received. Keep reading + return 0, nil, nil + } + + if _, ok = msgs[handshake.TypeFinished].(*handshake.MessageFinished); !ok { + return 0, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, nil + } + + // Other party may re-transmit the last flight. Keep state to be flight6. + return flight6, nil, nil +} + +func flight6Generate(c flightConn, state *State, cache *handshakeCache, cfg *handshakeConfig) ([]*packet, *alert.Alert, error) { + var pkts []*packet + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + }, + Content: &protocol.ChangeCipherSpec{}, + }, + }) + + if len(state.localVerifyData) == 0 { + plainText := cache.pullAndMerge( + handshakeCachePullRule{handshake.TypeClientHello, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, cfg.initialEpoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeCertificateVerify, cfg.initialEpoch, true, false}, + handshakeCachePullRule{handshake.TypeFinished, cfg.initialEpoch + 1, true, false}, + ) + + var err error + state.localVerifyData, err = prf.VerifyDataServer(state.masterSecret, plainText, state.cipherSuite.HashFunc()) + if err != nil { + return nil, &alert.Alert{Level: alert.Fatal, Description: alert.InternalError}, err + } + } + + pkts = append(pkts, + &packet{ + record: &recordlayer.RecordLayer{ + Header: recordlayer.Header{ + Version: protocol.Version1_2, + Epoch: 1, + }, + Content: &handshake.Handshake{ + Message: &handshake.MessageFinished{ + VerifyData: state.localVerifyData, + }, + }, + }, + shouldEncrypt: true, + resetLocalSequenceNumber: true, + }, + ) + return pkts, nil, nil +} diff --git a/vendor/github.com/pion/dtls/v2/flighthandler.go b/vendor/github.com/pion/dtls/v2/flighthandler.go new file mode 100644 index 000000000..f899ffa5b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/flighthandler.go @@ -0,0 +1,65 @@ +package dtls + +import ( + "context" + + "github.com/pion/dtls/v2/pkg/protocol/alert" +) + +// Parse received handshakes and return next flightVal +type flightParser func(context.Context, flightConn, *State, *handshakeCache, *handshakeConfig) (flightVal, *alert.Alert, error) + +// Generate flights +type flightGenerator func(flightConn, *State, *handshakeCache, *handshakeConfig) ([]*packet, *alert.Alert, error) + +func (f flightVal) getFlightParser() (flightParser, error) { + switch f { + case flight0: + return flight0Parse, nil + case flight1: + return flight1Parse, nil + case flight2: + return flight2Parse, nil + case flight3: + return flight3Parse, nil + case flight4: + return flight4Parse, nil + case flight4b: + return flight4bParse, nil + case flight5: + return flight5Parse, nil + case flight5b: + return flight5bParse, nil + case flight6: + return flight6Parse, nil + default: + return nil, errInvalidFlight + } +} + +func (f flightVal) getFlightGenerator() (gen flightGenerator, retransmit bool, err error) { + switch f { + case flight0: + return flight0Generate, true, nil + case flight1: + return flight1Generate, true, nil + case flight2: + // https://tools.ietf.org/html/rfc6347#section-3.2.1 + // HelloVerifyRequests must not be retransmitted. + return flight2Generate, false, nil + case flight3: + return flight3Generate, true, nil + case flight4: + return flight4Generate, true, nil + case flight4b: + return flight4bGenerate, true, nil + case flight5: + return flight5Generate, true, nil + case flight5b: + return flight5bGenerate, true, nil + case flight6: + return flight6Generate, true, nil + default: + return nil, false, errInvalidFlight + } +} diff --git a/vendor/github.com/pion/dtls/v2/fragment_buffer.go b/vendor/github.com/pion/dtls/v2/fragment_buffer.go new file mode 100644 index 000000000..c86a7e8f9 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/fragment_buffer.go @@ -0,0 +1,129 @@ +package dtls + +import ( + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// 2 megabytes +const fragmentBufferMaxSize = 2000000 + +type fragment struct { + recordLayerHeader recordlayer.Header + handshakeHeader handshake.Header + data []byte +} + +type fragmentBuffer struct { + // map of MessageSequenceNumbers that hold slices of fragments + cache map[uint16][]*fragment + + currentMessageSequenceNumber uint16 +} + +func newFragmentBuffer() *fragmentBuffer { + return &fragmentBuffer{cache: map[uint16][]*fragment{}} +} + +// current total size of buffer +func (f *fragmentBuffer) size() int { + size := 0 + for i := range f.cache { + for j := range f.cache[i] { + size += len(f.cache[i][j].data) + } + } + return size +} + +// Attempts to push a DTLS packet to the fragmentBuffer +// when it returns true it means the fragmentBuffer has inserted and the buffer shouldn't be handled +// when an error returns it is fatal, and the DTLS connection should be stopped +func (f *fragmentBuffer) push(buf []byte) (bool, error) { + if f.size()+len(buf) >= fragmentBufferMaxSize { + return false, errFragmentBufferOverflow + } + + frag := new(fragment) + if err := frag.recordLayerHeader.Unmarshal(buf); err != nil { + return false, err + } + + // fragment isn't a handshake, we don't need to handle it + if frag.recordLayerHeader.ContentType != protocol.ContentTypeHandshake { + return false, nil + } + + for buf = buf[recordlayer.HeaderSize:]; len(buf) != 0; frag = new(fragment) { + if err := frag.handshakeHeader.Unmarshal(buf); err != nil { + return false, err + } + + if _, ok := f.cache[frag.handshakeHeader.MessageSequence]; !ok { + f.cache[frag.handshakeHeader.MessageSequence] = []*fragment{} + } + + // end index should be the length of handshake header but if the handshake + // was fragmented, we should keep them all + end := int(handshake.HeaderLength + frag.handshakeHeader.Length) + if size := len(buf); end > size { + end = size + } + + // Discard all headers, when rebuilding the packet we will re-build + frag.data = append([]byte{}, buf[handshake.HeaderLength:end]...) + f.cache[frag.handshakeHeader.MessageSequence] = append(f.cache[frag.handshakeHeader.MessageSequence], frag) + buf = buf[end:] + } + + return true, nil +} + +func (f *fragmentBuffer) pop() (content []byte, epoch uint16) { + frags, ok := f.cache[f.currentMessageSequenceNumber] + if !ok { + return nil, 0 + } + + // Go doesn't support recursive lambdas + var appendMessage func(targetOffset uint32) bool + + rawMessage := []byte{} + appendMessage = func(targetOffset uint32) bool { + for _, f := range frags { + if f.handshakeHeader.FragmentOffset == targetOffset { + fragmentEnd := (f.handshakeHeader.FragmentOffset + f.handshakeHeader.FragmentLength) + if fragmentEnd != f.handshakeHeader.Length && f.handshakeHeader.FragmentLength != 0 { + if !appendMessage(fragmentEnd) { + return false + } + } + + rawMessage = append(f.data, rawMessage...) + return true + } + } + return false + } + + // Recursively collect up + if !appendMessage(0) { + return nil, 0 + } + + firstHeader := frags[0].handshakeHeader + firstHeader.FragmentOffset = 0 + firstHeader.FragmentLength = firstHeader.Length + + rawHeader, err := firstHeader.Marshal() + if err != nil { + return nil, 0 + } + + messageEpoch := frags[0].recordLayerHeader.Epoch + + delete(f.cache, f.currentMessageSequenceNumber) + f.currentMessageSequenceNumber++ + return append(rawHeader, rawMessage...), messageEpoch +} diff --git a/vendor/github.com/pion/dtls/v2/handshake_cache.go b/vendor/github.com/pion/dtls/v2/handshake_cache.go new file mode 100644 index 000000000..27d246597 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/handshake_cache.go @@ -0,0 +1,169 @@ +package dtls + +import ( + "sync" + + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +type handshakeCacheItem struct { + typ handshake.Type + isClient bool + epoch uint16 + messageSequence uint16 + data []byte +} + +type handshakeCachePullRule struct { + typ handshake.Type + epoch uint16 + isClient bool + optional bool +} + +type handshakeCache struct { + cache []*handshakeCacheItem + mu sync.Mutex +} + +func newHandshakeCache() *handshakeCache { + return &handshakeCache{} +} + +func (h *handshakeCache) push(data []byte, epoch, messageSequence uint16, typ handshake.Type, isClient bool) { + h.mu.Lock() + defer h.mu.Unlock() + + h.cache = append(h.cache, &handshakeCacheItem{ + data: append([]byte{}, data...), + epoch: epoch, + messageSequence: messageSequence, + typ: typ, + isClient: isClient, + }) +} + +// returns a list handshakes that match the requested rules +// the list will contain null entries for rules that can't be satisfied +// multiple entries may match a rule, but only the last match is returned (ie ClientHello with cookies) +func (h *handshakeCache) pull(rules ...handshakeCachePullRule) []*handshakeCacheItem { + h.mu.Lock() + defer h.mu.Unlock() + + out := make([]*handshakeCacheItem, len(rules)) + for i, r := range rules { + for _, c := range h.cache { + if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch { + switch { + case out[i] == nil: + out[i] = c + case out[i].messageSequence < c.messageSequence: + out[i] = c + } + } + } + } + + return out +} + +// fullPullMap pulls all handshakes between rules[0] to rules[len(rules)-1] as map. +func (h *handshakeCache) fullPullMap(startSeq int, cipherSuite CipherSuite, rules ...handshakeCachePullRule) (int, map[handshake.Type]handshake.Message, bool) { + h.mu.Lock() + defer h.mu.Unlock() + + ci := make(map[handshake.Type]*handshakeCacheItem) + for _, r := range rules { + var item *handshakeCacheItem + for _, c := range h.cache { + if c.typ == r.typ && c.isClient == r.isClient && c.epoch == r.epoch { + switch { + case item == nil: + item = c + case item.messageSequence < c.messageSequence: + item = c + } + } + } + if !r.optional && item == nil { + // Missing mandatory message. + return startSeq, nil, false + } + ci[r.typ] = item + } + out := make(map[handshake.Type]handshake.Message) + seq := startSeq + for _, r := range rules { + t := r.typ + i := ci[t] + if i == nil { + continue + } + var keyExchangeAlgorithm CipherSuiteKeyExchangeAlgorithm + if cipherSuite != nil { + keyExchangeAlgorithm = cipherSuite.KeyExchangeAlgorithm() + } + rawHandshake := &handshake.Handshake{ + KeyExchangeAlgorithm: keyExchangeAlgorithm, + } + if err := rawHandshake.Unmarshal(i.data); err != nil { + return startSeq, nil, false + } + if uint16(seq) != rawHandshake.Header.MessageSequence { + // There is a gap. Some messages are not arrived. + return startSeq, nil, false + } + seq++ + out[t] = rawHandshake.Message + } + return seq, out, true +} + +// pullAndMerge calls pull and then merges the results, ignoring any null entries +func (h *handshakeCache) pullAndMerge(rules ...handshakeCachePullRule) []byte { + merged := []byte{} + + for _, p := range h.pull(rules...) { + if p != nil { + merged = append(merged, p.data...) + } + } + return merged +} + +// sessionHash returns the session hash for Extended Master Secret support +// https://tools.ietf.org/html/draft-ietf-tls-session-hash-06#section-4 +func (h *handshakeCache) sessionHash(hf prf.HashFunc, epoch uint16, additional ...[]byte) ([]byte, error) { + merged := []byte{} + + // Order defined by https://tools.ietf.org/html/rfc5246#section-7.3 + handshakeBuffer := h.pull( + handshakeCachePullRule{handshake.TypeClientHello, epoch, true, false}, + handshakeCachePullRule{handshake.TypeServerHello, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, epoch, false, false}, + handshakeCachePullRule{handshake.TypeServerKeyExchange, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificateRequest, epoch, false, false}, + handshakeCachePullRule{handshake.TypeServerHelloDone, epoch, false, false}, + handshakeCachePullRule{handshake.TypeCertificate, epoch, true, false}, + handshakeCachePullRule{handshake.TypeClientKeyExchange, epoch, true, false}, + ) + + for _, p := range handshakeBuffer { + if p == nil { + continue + } + + merged = append(merged, p.data...) + } + for _, a := range additional { + merged = append(merged, a...) + } + + hash := hf() + if _, err := hash.Write(merged); err != nil { + return []byte{}, err + } + + return hash.Sum(nil), nil +} diff --git a/vendor/github.com/pion/dtls/v2/handshaker.go b/vendor/github.com/pion/dtls/v2/handshaker.go new file mode 100644 index 000000000..e68802c0b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/handshaker.go @@ -0,0 +1,347 @@ +package dtls + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "sync" + "time" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/logging" +) + +// [RFC6347 Section-4.2.4] +// +-----------+ +// +---> | PREPARING | <--------------------+ +// | +-----------+ | +// | | | +// | | Buffer next flight | +// | | | +// | \|/ | +// | +-----------+ | +// | | SENDING |<------------------+ | Send +// | +-----------+ | | HelloRequest +// Receive | | | | +// next | | Send flight | | or +// flight | +--------+ | | +// | | | Set retransmit timer | | Receive +// | | \|/ | | HelloRequest +// | | +-----------+ | | Send +// +--)--| WAITING |-------------------+ | ClientHello +// | | +-----------+ Timer expires | | +// | | | | | +// | | +------------------------+ | +// Receive | | Send Read retransmit | +// last | | last | +// flight | | flight | +// | | | +// \|/\|/ | +// +-----------+ | +// | FINISHED | -------------------------------+ +// +-----------+ +// | /|\ +// | | +// +---+ +// Read retransmit +// Retransmit last flight + +type handshakeState uint8 + +const ( + handshakeErrored handshakeState = iota + handshakePreparing + handshakeSending + handshakeWaiting + handshakeFinished +) + +func (s handshakeState) String() string { + switch s { + case handshakeErrored: + return "Errored" + case handshakePreparing: + return "Preparing" + case handshakeSending: + return "Sending" + case handshakeWaiting: + return "Waiting" + case handshakeFinished: + return "Finished" + default: + return "Unknown" + } +} + +type handshakeFSM struct { + currentFlight flightVal + flights []*packet + retransmit bool + state *State + cache *handshakeCache + cfg *handshakeConfig + closed chan struct{} +} + +type handshakeConfig struct { + localPSKCallback PSKCallback + localPSKIdentityHint []byte + localCipherSuites []CipherSuite // Available CipherSuites + localSignatureSchemes []signaturehash.Algorithm // Available signature schemes + extendedMasterSecret ExtendedMasterSecretType // Policy for the Extended Master Support extension + localSRTPProtectionProfiles []SRTPProtectionProfile // Available SRTPProtectionProfiles, if empty no SRTP support + serverName string + supportedProtocols []string + clientAuth ClientAuthType // If we are a client should we request a client certificate + localCertificates []tls.Certificate + nameToCertificate map[string]*tls.Certificate + insecureSkipVerify bool + verifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + verifyConnection func(*State) error + sessionStore SessionStore + rootCAs *x509.CertPool + clientCAs *x509.CertPool + retransmitInterval time.Duration + customCipherSuites func() []CipherSuite + ellipticCurves []elliptic.Curve + insecureSkipHelloVerify bool + + onFlightState func(flightVal, handshakeState) + log logging.LeveledLogger + keyLogWriter io.Writer + + localGetCertificate func(*ClientHelloInfo) (*tls.Certificate, error) + localGetClientCertificate func(*CertificateRequestInfo) (*tls.Certificate, error) + + initialEpoch uint16 + + mu sync.Mutex +} + +type flightConn interface { + notify(ctx context.Context, level alert.Level, desc alert.Description) error + writePackets(context.Context, []*packet) error + recvHandshake() <-chan chan struct{} + setLocalEpoch(epoch uint16) + handleQueuedPackets(context.Context) error + sessionKey() []byte +} + +func (c *handshakeConfig) writeKeyLog(label string, clientRandom, secret []byte) { + if c.keyLogWriter == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + _, err := c.keyLogWriter.Write([]byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret))) + if err != nil { + c.log.Debugf("failed to write key log file: %s", err) + } +} + +func srvCliStr(isClient bool) string { + if isClient { + return "client" + } + return "server" +} + +func newHandshakeFSM( + s *State, cache *handshakeCache, cfg *handshakeConfig, + initialFlight flightVal, +) *handshakeFSM { + return &handshakeFSM{ + currentFlight: initialFlight, + state: s, + cache: cache, + cfg: cfg, + closed: make(chan struct{}), + } +} + +func (s *handshakeFSM) Run(ctx context.Context, c flightConn, initialState handshakeState) error { + state := initialState + defer func() { + close(s.closed) + }() + for { + s.cfg.log.Tracef("[handshake:%s] %s: %s", srvCliStr(s.state.isClient), s.currentFlight.String(), state.String()) + if s.cfg.onFlightState != nil { + s.cfg.onFlightState(s.currentFlight, state) + } + var err error + switch state { + case handshakePreparing: + state, err = s.prepare(ctx, c) + case handshakeSending: + state, err = s.send(ctx, c) + case handshakeWaiting: + state, err = s.wait(ctx, c) + case handshakeFinished: + state, err = s.finish(ctx, c) + default: + return errInvalidFSMTransition + } + if err != nil { + return err + } + } +} + +func (s *handshakeFSM) Done() <-chan struct{} { + return s.closed +} + +func (s *handshakeFSM) prepare(ctx context.Context, c flightConn) (handshakeState, error) { + s.flights = nil + // Prepare flights + var ( + a *alert.Alert + err error + pkts []*packet + ) + gen, retransmit, errFlight := s.currentFlight.getFlightGenerator() + if errFlight != nil { + err = errFlight + a = &alert.Alert{Level: alert.Fatal, Description: alert.InternalError} + } else { + pkts, a, err = gen(c, s.state, s.cache, s.cfg) + s.retransmit = retransmit + } + if a != nil { + if alertErr := c.notify(ctx, a.Level, a.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + + s.flights = pkts + epoch := s.cfg.initialEpoch + nextEpoch := epoch + for _, p := range s.flights { + p.record.Header.Epoch += epoch + if p.record.Header.Epoch > nextEpoch { + nextEpoch = p.record.Header.Epoch + } + if h, ok := p.record.Content.(*handshake.Handshake); ok { + h.Header.MessageSequence = uint16(s.state.handshakeSendSequence) + s.state.handshakeSendSequence++ + } + } + if epoch != nextEpoch { + s.cfg.log.Tracef("[handshake:%s] -> changeCipherSpec (epoch: %d)", srvCliStr(s.state.isClient), nextEpoch) + c.setLocalEpoch(nextEpoch) + } + return handshakeSending, nil +} + +func (s *handshakeFSM) send(ctx context.Context, c flightConn) (handshakeState, error) { + // Send flights + if err := c.writePackets(ctx, s.flights); err != nil { + return handshakeErrored, err + } + + if s.currentFlight.isLastSendFlight() { + return handshakeFinished, nil + } + return handshakeWaiting, nil +} + +func (s *handshakeFSM) wait(ctx context.Context, c flightConn) (handshakeState, error) { //nolint:gocognit + parse, errFlight := s.currentFlight.getFlightParser() + if errFlight != nil { + if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil { + if errFlight != nil { + return handshakeErrored, alertErr + } + } + return handshakeErrored, errFlight + } + + retransmitTimer := time.NewTimer(s.cfg.retransmitInterval) + for { + select { + case done := <-c.recvHandshake(): + nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg) + close(done) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + if nextFlight == 0 { + break + } + s.cfg.log.Tracef("[handshake:%s] %s -> %s", srvCliStr(s.state.isClient), s.currentFlight.String(), nextFlight.String()) + if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight { + return handshakeFinished, nil + } + s.currentFlight = nextFlight + return handshakePreparing, nil + + case <-retransmitTimer.C: + if !s.retransmit { + return handshakeWaiting, nil + } + return handshakeSending, nil + case <-ctx.Done(): + return handshakeErrored, ctx.Err() + } + } +} + +func (s *handshakeFSM) finish(ctx context.Context, c flightConn) (handshakeState, error) { + parse, errFlight := s.currentFlight.getFlightParser() + if errFlight != nil { + if alertErr := c.notify(ctx, alert.Fatal, alert.InternalError); alertErr != nil { + if errFlight != nil { + return handshakeErrored, alertErr + } + } + return handshakeErrored, errFlight + } + + retransmitTimer := time.NewTimer(s.cfg.retransmitInterval) + select { + case done := <-c.recvHandshake(): + nextFlight, alert, err := parse(ctx, c, s.state, s.cache, s.cfg) + close(done) + if alert != nil { + if alertErr := c.notify(ctx, alert.Level, alert.Description); alertErr != nil { + if err != nil { + err = alertErr + } + } + } + if err != nil { + return handshakeErrored, err + } + if nextFlight == 0 { + break + } + if nextFlight.isLastRecvFlight() && s.currentFlight == nextFlight { + return handshakeFinished, nil + } + <-retransmitTimer.C + // Retransmit last flight + return handshakeSending, nil + + case <-ctx.Done(): + return handshakeErrored, ctx.Err() + } + return handshakeFinished, nil +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go new file mode 100644 index 000000000..afff7365b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_128_ccm.go @@ -0,0 +1,30 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// Aes128Ccm is a base class used by multiple AES-CCM Ciphers +type Aes128Ccm struct { + AesCcm +} + +func newAes128Ccm(clientCertificateType clientcertificate.Type, id ID, psk bool, cryptoCCMTagLen ciphersuite.CCMTagLen, keyExchangeAlgorithm KeyExchangeAlgorithm, ecc bool) *Aes128Ccm { + return &Aes128Ccm{ + AesCcm: AesCcm{ + clientCertificateType: clientCertificateType, + id: id, + psk: psk, + cryptoCCMTagLen: cryptoCCMTagLen, + keyExchangeAlgorithm: keyExchangeAlgorithm, + ecc: ecc, + }, + } +} + +// Init initializes the internal Cipher with keying material +func (c *Aes128Ccm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const prfKeyLen = 16 + return c.AesCcm.Init(masterSecret, clientRandom, serverRandom, isClient, prfKeyLen) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go new file mode 100644 index 000000000..d56ffc5c5 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_256_ccm.go @@ -0,0 +1,30 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// Aes256Ccm is a base class used by multiple AES-CCM Ciphers +type Aes256Ccm struct { + AesCcm +} + +func newAes256Ccm(clientCertificateType clientcertificate.Type, id ID, psk bool, cryptoCCMTagLen ciphersuite.CCMTagLen, keyExchangeAlgorithm KeyExchangeAlgorithm, ecc bool) *Aes256Ccm { + return &Aes256Ccm{ + AesCcm: AesCcm{ + clientCertificateType: clientCertificateType, + id: id, + psk: psk, + cryptoCCMTagLen: cryptoCCMTagLen, + keyExchangeAlgorithm: keyExchangeAlgorithm, + ecc: ecc, + }, + } +} + +// Init initializes the internal Cipher with keying material +func (c *Aes256Ccm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const prfKeyLen = 32 + return c.AesCcm.Init(masterSecret, clientRandom, serverRandom, isClient, prfKeyLen) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go new file mode 100644 index 000000000..224d3906c --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/aes_ccm.go @@ -0,0 +1,110 @@ +package ciphersuite + +import ( + "crypto/sha256" + "fmt" + "hash" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// AesCcm is a base class used by multiple AES-CCM Ciphers +type AesCcm struct { + ccm atomic.Value // *cryptoCCM + clientCertificateType clientcertificate.Type + id ID + psk bool + keyExchangeAlgorithm KeyExchangeAlgorithm + cryptoCCMTagLen ciphersuite.CCMTagLen + ecc bool +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *AesCcm) CertificateType() clientcertificate.Type { + return c.clientCertificateType +} + +// ID returns the ID of the CipherSuite +func (c *AesCcm) ID() ID { + return c.id +} + +func (c *AesCcm) String() string { + return c.id.String() +} + +// ECC uses Elliptic Curve Cryptography +func (c *AesCcm) ECC() bool { + return c.ecc +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *AesCcm) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return c.keyExchangeAlgorithm +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *AesCcm) HashFunc() func() hash.Hash { + return sha256.New +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *AesCcm) AuthenticationType() AuthenticationType { + if c.psk { + return AuthenticationTypePreSharedKey + } + return AuthenticationTypeCertificate +} + +// IsInitialized returns if the CipherSuite has keying material and can +// encrypt/decrypt packets +func (c *AesCcm) IsInitialized() bool { + return c.ccm.Load() != nil +} + +// Init initializes the internal Cipher with keying material +func (c *AesCcm) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool, prfKeyLen int) error { + const ( + prfMacLen = 0 + prfIvLen = 4 + ) + + keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) + if err != nil { + return err + } + + var ccm *ciphersuite.CCM + if isClient { + ccm, err = ciphersuite.NewCCM(c.cryptoCCMTagLen, keys.ClientWriteKey, keys.ClientWriteIV, keys.ServerWriteKey, keys.ServerWriteIV) + } else { + ccm, err = ciphersuite.NewCCM(c.cryptoCCMTagLen, keys.ServerWriteKey, keys.ServerWriteIV, keys.ClientWriteKey, keys.ClientWriteIV) + } + c.ccm.Store(ccm) + + return err +} + +// Encrypt encrypts a single TLS RecordLayer +func (c *AesCcm) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + cipherSuite, ok := c.ccm.Load().(*ciphersuite.CCM) + if !ok { + return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Encrypt(pkt, raw) +} + +// Decrypt decrypts a single TLS RecordLayer +func (c *AesCcm) Decrypt(raw []byte) ([]byte, error) { + cipherSuite, ok := c.ccm.Load().(*ciphersuite.CCM) + if !ok { + return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Decrypt(raw) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go new file mode 100644 index 000000000..fbfadc119 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/ciphersuite.go @@ -0,0 +1,95 @@ +// Package ciphersuite provides TLS Ciphers as registered with the IANA https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4 +package ciphersuite + +import ( + "errors" + "fmt" + + "github.com/pion/dtls/v2/internal/ciphersuite/types" + "github.com/pion/dtls/v2/pkg/protocol" +) + +var errCipherSuiteNotInit = &protocol.TemporaryError{Err: errors.New("CipherSuite has not been initialized")} //nolint:goerr113 + +// ID is an ID for our supported CipherSuites +type ID uint16 + +func (i ID) String() string { + switch i { + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM: + return "TLS_ECDHE_ECDSA_WITH_AES_128_CCM" + case TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8: + return "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8" + case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" + case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + case TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: + return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" + case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: + return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" + case TLS_PSK_WITH_AES_128_CCM: + return "TLS_PSK_WITH_AES_128_CCM" + case TLS_PSK_WITH_AES_128_CCM_8: + return "TLS_PSK_WITH_AES_128_CCM_8" + case TLS_PSK_WITH_AES_256_CCM_8: + return "TLS_PSK_WITH_AES_256_CCM_8" + case TLS_PSK_WITH_AES_128_GCM_SHA256: + return "TLS_PSK_WITH_AES_128_GCM_SHA256" + case TLS_PSK_WITH_AES_128_CBC_SHA256: + return "TLS_PSK_WITH_AES_128_CBC_SHA256" + case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + case TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256: + return "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256" + default: + return fmt.Sprintf("unknown(%v)", uint16(i)) + } +} + +// Supported Cipher Suites +const ( + // AES-128-CCM + TLS_ECDHE_ECDSA_WITH_AES_128_CCM ID = 0xc0ac //nolint:revive,stylecheck + TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 ID = 0xc0ae //nolint:revive,stylecheck + + // AES-128-GCM-SHA256 + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 ID = 0xc02b //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 ID = 0xc02f //nolint:revive,stylecheck + + TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 ID = 0xc02c //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 ID = 0xc030 //nolint:revive,stylecheck + // AES-256-CBC-SHA + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA ID = 0xc00a //nolint:revive,stylecheck + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA ID = 0xc014 //nolint:revive,stylecheck + + TLS_PSK_WITH_AES_128_CCM ID = 0xc0a4 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CCM_8 ID = 0xc0a8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_256_CCM_8 ID = 0xc0a9 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_GCM_SHA256 ID = 0x00a8 //nolint:revive,stylecheck + TLS_PSK_WITH_AES_128_CBC_SHA256 ID = 0x00ae //nolint:revive,stylecheck + + TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 ID = 0xC037 //nolint:revive,stylecheck +) + +// AuthenticationType controls what authentication method is using during the handshake +type AuthenticationType = types.AuthenticationType + +// AuthenticationType Enums +const ( + AuthenticationTypeCertificate AuthenticationType = types.AuthenticationTypeCertificate + AuthenticationTypePreSharedKey AuthenticationType = types.AuthenticationTypePreSharedKey + AuthenticationTypeAnonymous AuthenticationType = types.AuthenticationTypeAnonymous +) + +// KeyExchangeAlgorithm controls what exchange algorithm was chosen. +type KeyExchangeAlgorithm = types.KeyExchangeAlgorithm + +// KeyExchangeAlgorithm Bitmask +const ( + KeyExchangeAlgorithmNone KeyExchangeAlgorithm = types.KeyExchangeAlgorithmNone + KeyExchangeAlgorithmPsk KeyExchangeAlgorithm = types.KeyExchangeAlgorithmPsk + KeyExchangeAlgorithmEcdhe KeyExchangeAlgorithm = types.KeyExchangeAlgorithmEcdhe +) diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go new file mode 100644 index 000000000..91189e139 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm.go @@ -0,0 +1,11 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// NewTLSEcdheEcdsaWithAes128Ccm constructs a TLS_ECDHE_ECDSA_WITH_AES_128_CCM Cipher +func NewTLSEcdheEcdsaWithAes128Ccm() *Aes128Ccm { + return newAes128Ccm(clientcertificate.ECDSASign, TLS_ECDHE_ECDSA_WITH_AES_128_CCM, false, ciphersuite.CCMTagLength, KeyExchangeAlgorithmEcdhe, true) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go new file mode 100644 index 000000000..81368f4ba --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_ccm8.go @@ -0,0 +1,11 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// NewTLSEcdheEcdsaWithAes128Ccm8 creates a new TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 CipherSuite +func NewTLSEcdheEcdsaWithAes128Ccm8() *Aes128Ccm { + return newAes128Ccm(clientcertificate.ECDSASign, TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8, false, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmEcdhe, true) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go new file mode 100644 index 000000000..3d1d5e21f --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_128_gcm_sha256.go @@ -0,0 +1,105 @@ +package ciphersuite + +import ( + "crypto/sha256" + "fmt" + "hash" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// TLSEcdheEcdsaWithAes128GcmSha256 represents a TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuite +type TLSEcdheEcdsaWithAes128GcmSha256 struct { + gcm atomic.Value // *cryptoGCM +} + +// CertificateType returns what type of certficate this CipherSuite exchanges +func (c *TLSEcdheEcdsaWithAes128GcmSha256) CertificateType() clientcertificate.Type { + return clientcertificate.ECDSASign +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *TLSEcdheEcdsaWithAes128GcmSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return KeyExchangeAlgorithmEcdhe +} + +// ECC uses Elliptic Curve Cryptography +func (c *TLSEcdheEcdsaWithAes128GcmSha256) ECC() bool { + return true +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheEcdsaWithAes128GcmSha256) ID() ID { + return TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +} + +func (c *TLSEcdheEcdsaWithAes128GcmSha256) String() string { + return "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *TLSEcdheEcdsaWithAes128GcmSha256) HashFunc() func() hash.Hash { + return sha256.New +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *TLSEcdheEcdsaWithAes128GcmSha256) AuthenticationType() AuthenticationType { + return AuthenticationTypeCertificate +} + +// IsInitialized returns if the CipherSuite has keying material and can +// encrypt/decrypt packets +func (c *TLSEcdheEcdsaWithAes128GcmSha256) IsInitialized() bool { + return c.gcm.Load() != nil +} + +func (c *TLSEcdheEcdsaWithAes128GcmSha256) init(masterSecret, clientRandom, serverRandom []byte, isClient bool, prfMacLen, prfKeyLen, prfIvLen int, hashFunc func() hash.Hash) error { + keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, hashFunc) + if err != nil { + return err + } + + var gcm *ciphersuite.GCM + if isClient { + gcm, err = ciphersuite.NewGCM(keys.ClientWriteKey, keys.ClientWriteIV, keys.ServerWriteKey, keys.ServerWriteIV) + } else { + gcm, err = ciphersuite.NewGCM(keys.ServerWriteKey, keys.ServerWriteIV, keys.ClientWriteKey, keys.ClientWriteIV) + } + c.gcm.Store(gcm) + return err +} + +// Init initializes the internal Cipher with keying material +func (c *TLSEcdheEcdsaWithAes128GcmSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const ( + prfMacLen = 0 + prfKeyLen = 16 + prfIvLen = 4 + ) + + return c.init(masterSecret, clientRandom, serverRandom, isClient, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) +} + +// Encrypt encrypts a single TLS RecordLayer +func (c *TLSEcdheEcdsaWithAes128GcmSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + cipherSuite, ok := c.gcm.Load().(*ciphersuite.GCM) + if !ok { + return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Encrypt(pkt, raw) +} + +// Decrypt decrypts a single TLS RecordLayer +func (c *TLSEcdheEcdsaWithAes128GcmSha256) Decrypt(raw []byte) ([]byte, error) { + cipherSuite, ok := c.gcm.Load().(*ciphersuite.GCM) + if !ok { + return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Decrypt(raw) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go new file mode 100644 index 000000000..a1e21fe9f --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_cbc_sha.go @@ -0,0 +1,111 @@ +package ciphersuite + +import ( + "crypto/sha1" //nolint: gosec,gci + "crypto/sha256" + "fmt" + "hash" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// TLSEcdheEcdsaWithAes256CbcSha represents a TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA CipherSuite +type TLSEcdheEcdsaWithAes256CbcSha struct { + cbc atomic.Value // *cryptoCBC +} + +// CertificateType returns what type of certficate this CipherSuite exchanges +func (c *TLSEcdheEcdsaWithAes256CbcSha) CertificateType() clientcertificate.Type { + return clientcertificate.ECDSASign +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *TLSEcdheEcdsaWithAes256CbcSha) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return KeyExchangeAlgorithmEcdhe +} + +// ECC uses Elliptic Curve Cryptography +func (c *TLSEcdheEcdsaWithAes256CbcSha) ECC() bool { + return true +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheEcdsaWithAes256CbcSha) ID() ID { + return TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +} + +func (c *TLSEcdheEcdsaWithAes256CbcSha) String() string { + return "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA" +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *TLSEcdheEcdsaWithAes256CbcSha) HashFunc() func() hash.Hash { + return sha256.New +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *TLSEcdheEcdsaWithAes256CbcSha) AuthenticationType() AuthenticationType { + return AuthenticationTypeCertificate +} + +// IsInitialized returns if the CipherSuite has keying material and can +// encrypt/decrypt packets +func (c *TLSEcdheEcdsaWithAes256CbcSha) IsInitialized() bool { + return c.cbc.Load() != nil +} + +// Init initializes the internal Cipher with keying material +func (c *TLSEcdheEcdsaWithAes256CbcSha) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const ( + prfMacLen = 20 + prfKeyLen = 32 + prfIvLen = 16 + ) + + keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) + if err != nil { + return err + } + + var cbc *ciphersuite.CBC + if isClient { + cbc, err = ciphersuite.NewCBC( + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + sha1.New, + ) + } else { + cbc, err = ciphersuite.NewCBC( + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + sha1.New, + ) + } + c.cbc.Store(cbc) + + return err +} + +// Encrypt encrypts a single TLS RecordLayer +func (c *TLSEcdheEcdsaWithAes256CbcSha) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { + return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Encrypt(pkt, raw) +} + +// Decrypt decrypts a single TLS RecordLayer +func (c *TLSEcdheEcdsaWithAes256CbcSha) Decrypt(raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { + return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Decrypt(raw) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go new file mode 100644 index 000000000..a2fe30244 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_ecdsa_with_aes_256_gcm_sha384.go @@ -0,0 +1,36 @@ +package ciphersuite + +import ( + "crypto/sha512" + "hash" +) + +// TLSEcdheEcdsaWithAes256GcmSha384 represents a TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 CipherSuite +type TLSEcdheEcdsaWithAes256GcmSha384 struct { + TLSEcdheEcdsaWithAes128GcmSha256 +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheEcdsaWithAes256GcmSha384) ID() ID { + return TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +} + +func (c *TLSEcdheEcdsaWithAes256GcmSha384) String() string { + return "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *TLSEcdheEcdsaWithAes256GcmSha384) HashFunc() func() hash.Hash { + return sha512.New384 +} + +// Init initializes the internal Cipher with keying material +func (c *TLSEcdheEcdsaWithAes256GcmSha384) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const ( + prfMacLen = 0 + prfKeyLen = 32 + prfIvLen = 4 + ) + + return c.init(masterSecret, clientRandom, serverRandom, isClient, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go new file mode 100644 index 000000000..28c049d89 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_psk_with_aes_128_cbc_sha256.go @@ -0,0 +1,115 @@ +package ciphersuite + +import ( + "crypto/sha256" + "fmt" + "hash" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// TLSEcdhePskWithAes128CbcSha256 implements the TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 CipherSuite +type TLSEcdhePskWithAes128CbcSha256 struct { + cbc atomic.Value // *cryptoCBC +} + +// NewTLSEcdhePskWithAes128CbcSha256 creates TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 cipher. +func NewTLSEcdhePskWithAes128CbcSha256() *TLSEcdhePskWithAes128CbcSha256 { + return &TLSEcdhePskWithAes128CbcSha256{} +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSEcdhePskWithAes128CbcSha256) CertificateType() clientcertificate.Type { + return clientcertificate.Type(0) +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *TLSEcdhePskWithAes128CbcSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return (KeyExchangeAlgorithmPsk | KeyExchangeAlgorithmEcdhe) +} + +// ECC uses Elliptic Curve Cryptography +func (c *TLSEcdhePskWithAes128CbcSha256) ECC() bool { + return true +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdhePskWithAes128CbcSha256) ID() ID { + return TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 +} + +func (c *TLSEcdhePskWithAes128CbcSha256) String() string { + return "TLS-ECDHE-PSK-WITH-AES-128-CBC-SHA256" +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *TLSEcdhePskWithAes128CbcSha256) HashFunc() func() hash.Hash { + return sha256.New +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *TLSEcdhePskWithAes128CbcSha256) AuthenticationType() AuthenticationType { + return AuthenticationTypePreSharedKey +} + +// IsInitialized returns if the CipherSuite has keying material and can +// encrypt/decrypt packets +func (c *TLSEcdhePskWithAes128CbcSha256) IsInitialized() bool { + return c.cbc.Load() != nil +} + +// Init initializes the internal Cipher with keying material +func (c *TLSEcdhePskWithAes128CbcSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const ( + prfMacLen = 32 + prfKeyLen = 16 + prfIvLen = 16 + ) + + keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) + if err != nil { + return err + } + + var cbc *ciphersuite.CBC + if isClient { + cbc, err = ciphersuite.NewCBC( + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + c.HashFunc(), + ) + } else { + cbc, err = ciphersuite.NewCBC( + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + c.HashFunc(), + ) + } + c.cbc.Store(cbc) + + return err +} + +// Encrypt encrypts a single TLS RecordLayer +func (c *TLSEcdhePskWithAes128CbcSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { // !c.isInitialized() + return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Encrypt(pkt, raw) +} + +// Decrypt decrypts a single TLS RecordLayer +func (c *TLSEcdhePskWithAes128CbcSha256) Decrypt(raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { // !c.isInitialized() + return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Decrypt(raw) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go new file mode 100644 index 000000000..70400c37d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_128_gcm_sha256.go @@ -0,0 +1,22 @@ +package ciphersuite + +import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + +// TLSEcdheRsaWithAes128GcmSha256 implements the TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 CipherSuite +type TLSEcdheRsaWithAes128GcmSha256 struct { + TLSEcdheEcdsaWithAes128GcmSha256 +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSEcdheRsaWithAes128GcmSha256) CertificateType() clientcertificate.Type { + return clientcertificate.RSASign +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheRsaWithAes128GcmSha256) ID() ID { + return TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +} + +func (c *TLSEcdheRsaWithAes128GcmSha256) String() string { + return "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go new file mode 100644 index 000000000..0d82dc3ad --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_cbc_sha.go @@ -0,0 +1,22 @@ +package ciphersuite + +import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + +// TLSEcdheRsaWithAes256CbcSha implements the TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA CipherSuite +type TLSEcdheRsaWithAes256CbcSha struct { + TLSEcdheEcdsaWithAes256CbcSha +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSEcdheRsaWithAes256CbcSha) CertificateType() clientcertificate.Type { + return clientcertificate.RSASign +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheRsaWithAes256CbcSha) ID() ID { + return TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +} + +func (c *TLSEcdheRsaWithAes256CbcSha) String() string { + return "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA" +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go new file mode 100644 index 000000000..3473527e7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_ecdhe_rsa_with_aes_256_gcm_sha384.go @@ -0,0 +1,22 @@ +package ciphersuite + +import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + +// TLSEcdheRsaWithAes256GcmSha384 implements the TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 CipherSuite +type TLSEcdheRsaWithAes256GcmSha384 struct { + TLSEcdheEcdsaWithAes256GcmSha384 +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSEcdheRsaWithAes256GcmSha384) CertificateType() clientcertificate.Type { + return clientcertificate.RSASign +} + +// ID returns the ID of the CipherSuite +func (c *TLSEcdheRsaWithAes256GcmSha384) ID() ID { + return TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +} + +func (c *TLSEcdheRsaWithAes256GcmSha384) String() string { + return "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go new file mode 100644 index 000000000..5c63ad513 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_cbc_sha256.go @@ -0,0 +1,110 @@ +package ciphersuite + +import ( + "crypto/sha256" + "fmt" + "hash" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// TLSPskWithAes128CbcSha256 implements the TLS_PSK_WITH_AES_128_CBC_SHA256 CipherSuite +type TLSPskWithAes128CbcSha256 struct { + cbc atomic.Value // *cryptoCBC +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSPskWithAes128CbcSha256) CertificateType() clientcertificate.Type { + return clientcertificate.Type(0) +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *TLSPskWithAes128CbcSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return KeyExchangeAlgorithmPsk +} + +// ECC uses Elliptic Curve Cryptography +func (c *TLSPskWithAes128CbcSha256) ECC() bool { + return false +} + +// ID returns the ID of the CipherSuite +func (c *TLSPskWithAes128CbcSha256) ID() ID { + return TLS_PSK_WITH_AES_128_CBC_SHA256 +} + +func (c *TLSPskWithAes128CbcSha256) String() string { + return "TLS_PSK_WITH_AES_128_CBC_SHA256" +} + +// HashFunc returns the hashing func for this CipherSuite +func (c *TLSPskWithAes128CbcSha256) HashFunc() func() hash.Hash { + return sha256.New +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *TLSPskWithAes128CbcSha256) AuthenticationType() AuthenticationType { + return AuthenticationTypePreSharedKey +} + +// IsInitialized returns if the CipherSuite has keying material and can +// encrypt/decrypt packets +func (c *TLSPskWithAes128CbcSha256) IsInitialized() bool { + return c.cbc.Load() != nil +} + +// Init initializes the internal Cipher with keying material +func (c *TLSPskWithAes128CbcSha256) Init(masterSecret, clientRandom, serverRandom []byte, isClient bool) error { + const ( + prfMacLen = 32 + prfKeyLen = 16 + prfIvLen = 16 + ) + + keys, err := prf.GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom, prfMacLen, prfKeyLen, prfIvLen, c.HashFunc()) + if err != nil { + return err + } + + var cbc *ciphersuite.CBC + if isClient { + cbc, err = ciphersuite.NewCBC( + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + c.HashFunc(), + ) + } else { + cbc, err = ciphersuite.NewCBC( + keys.ServerWriteKey, keys.ServerWriteIV, keys.ServerMACKey, + keys.ClientWriteKey, keys.ClientWriteIV, keys.ClientMACKey, + c.HashFunc(), + ) + } + c.cbc.Store(cbc) + + return err +} + +// Encrypt encrypts a single TLS RecordLayer +func (c *TLSPskWithAes128CbcSha256) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { + return nil, fmt.Errorf("%w, unable to encrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Encrypt(pkt, raw) +} + +// Decrypt decrypts a single TLS RecordLayer +func (c *TLSPskWithAes128CbcSha256) Decrypt(raw []byte) ([]byte, error) { + cipherSuite, ok := c.cbc.Load().(*ciphersuite.CBC) + if !ok { + return nil, fmt.Errorf("%w, unable to decrypt", errCipherSuiteNotInit) + } + + return cipherSuite.Decrypt(raw) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go new file mode 100644 index 000000000..02b96404d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm.go @@ -0,0 +1,11 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// NewTLSPskWithAes128Ccm returns the TLS_PSK_WITH_AES_128_CCM CipherSuite +func NewTLSPskWithAes128Ccm() *Aes128Ccm { + return newAes128Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_128_CCM, true, ciphersuite.CCMTagLength, KeyExchangeAlgorithmPsk, false) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go new file mode 100644 index 000000000..faf9cb41a --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_ccm8.go @@ -0,0 +1,11 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// NewTLSPskWithAes128Ccm8 returns the TLS_PSK_WITH_AES_128_CCM_8 CipherSuite +func NewTLSPskWithAes128Ccm8() *Aes128Ccm { + return newAes128Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_128_CCM_8, true, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmPsk, false) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go new file mode 100644 index 000000000..98f7a4afe --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_128_gcm_sha256.go @@ -0,0 +1,32 @@ +package ciphersuite + +import "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + +// TLSPskWithAes128GcmSha256 implements the TLS_PSK_WITH_AES_128_GCM_SHA256 CipherSuite +type TLSPskWithAes128GcmSha256 struct { + TLSEcdheEcdsaWithAes128GcmSha256 +} + +// CertificateType returns what type of certificate this CipherSuite exchanges +func (c *TLSPskWithAes128GcmSha256) CertificateType() clientcertificate.Type { + return clientcertificate.Type(0) +} + +// KeyExchangeAlgorithm controls what key exchange algorithm is using during the handshake +func (c *TLSPskWithAes128GcmSha256) KeyExchangeAlgorithm() KeyExchangeAlgorithm { + return KeyExchangeAlgorithmPsk +} + +// ID returns the ID of the CipherSuite +func (c *TLSPskWithAes128GcmSha256) ID() ID { + return TLS_PSK_WITH_AES_128_GCM_SHA256 +} + +func (c *TLSPskWithAes128GcmSha256) String() string { + return "TLS_PSK_WITH_AES_128_GCM_SHA256" +} + +// AuthenticationType controls what authentication method is using during the handshake +func (c *TLSPskWithAes128GcmSha256) AuthenticationType() AuthenticationType { + return AuthenticationTypePreSharedKey +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go new file mode 100644 index 000000000..9058ff25d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/tls_psk_with_aes_256_ccm8.go @@ -0,0 +1,11 @@ +package ciphersuite + +import ( + "github.com/pion/dtls/v2/pkg/crypto/ciphersuite" + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" +) + +// NewTLSPskWithAes256Ccm8 returns the TLS_PSK_WITH_AES_256_CCM_8 CipherSuite +func NewTLSPskWithAes256Ccm8() *Aes256Ccm { + return newAes256Ccm(clientcertificate.Type(0), TLS_PSK_WITH_AES_256_CCM_8, true, ciphersuite.CCMTagLength8, KeyExchangeAlgorithmPsk, false) +} diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go new file mode 100644 index 000000000..75d599fe3 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/authentication_type.go @@ -0,0 +1,11 @@ +package types + +// AuthenticationType controls what authentication method is using during the handshake +type AuthenticationType int + +// AuthenticationType Enums +const ( + AuthenticationTypeCertificate AuthenticationType = iota + 1 + AuthenticationTypePreSharedKey + AuthenticationTypeAnonymous +) diff --git a/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go new file mode 100644 index 000000000..fbf83471f --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/ciphersuite/types/key_exchange_algorithm.go @@ -0,0 +1,17 @@ +// Package types provides types for TLS Ciphers +package types + +// KeyExchangeAlgorithm controls what exchange algorithm was chosen. +type KeyExchangeAlgorithm int + +// KeyExchangeAlgorithm Bitmask +const ( + KeyExchangeAlgorithmNone KeyExchangeAlgorithm = 0 + KeyExchangeAlgorithmPsk KeyExchangeAlgorithm = iota << 1 + KeyExchangeAlgorithmEcdhe +) + +// Has check if keyExchangeAlgorithm is supported. +func (a KeyExchangeAlgorithm) Has(v KeyExchangeAlgorithm) bool { + return (a & v) == v +} diff --git a/vendor/github.com/pion/dtls/v2/internal/closer/closer.go b/vendor/github.com/pion/dtls/v2/internal/closer/closer.go new file mode 100644 index 000000000..b99e13e44 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/closer/closer.go @@ -0,0 +1,45 @@ +// Package closer provides signaling channel for shutdown +package closer + +import ( + "context" +) + +// Closer allows for each signaling a channel for shutdown +type Closer struct { + ctx context.Context + closeFunc func() +} + +// NewCloser creates a new instance of Closer +func NewCloser() *Closer { + ctx, closeFunc := context.WithCancel(context.Background()) + return &Closer{ + ctx: ctx, + closeFunc: closeFunc, + } +} + +// NewCloserWithParent creates a new instance of Closer with a parent context +func NewCloserWithParent(ctx context.Context) *Closer { + ctx, closeFunc := context.WithCancel(ctx) + return &Closer{ + ctx: ctx, + closeFunc: closeFunc, + } +} + +// Done returns a channel signaling when it is done +func (c *Closer) Done() <-chan struct{} { + return c.ctx.Done() +} + +// Err returns an error of the context +func (c *Closer) Err() error { + return c.ctx.Err() +} + +// Close sends a signal to trigger the ctx done channel +func (c *Closer) Close() { + c.closeFunc() +} diff --git a/vendor/github.com/pion/dtls/v2/internal/util/util.go b/vendor/github.com/pion/dtls/v2/internal/util/util.go new file mode 100644 index 000000000..746a670f4 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/internal/util/util.go @@ -0,0 +1,39 @@ +// Package util contains small helpers used across the repo +package util + +import ( + "encoding/binary" +) + +// BigEndianUint24 returns the value of a big endian uint24 +func BigEndianUint24(raw []byte) uint32 { + if len(raw) < 3 { + return 0 + } + + rawCopy := make([]byte, 4) + copy(rawCopy[1:], raw) + return binary.BigEndian.Uint32(rawCopy) +} + +// PutBigEndianUint24 encodes a uint24 and places into out +func PutBigEndianUint24(out []byte, in uint32) { + tmp := make([]byte, 4) + binary.BigEndian.PutUint32(tmp, in) + copy(out, tmp[1:]) +} + +// PutBigEndianUint48 encodes a uint64 and places into out +func PutBigEndianUint48(out []byte, in uint64) { + tmp := make([]byte, 8) + binary.BigEndian.PutUint64(tmp, in) + copy(out, tmp[2:]) +} + +// Max returns the larger value +func Max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/pion/dtls/v2/listener.go b/vendor/github.com/pion/dtls/v2/listener.go new file mode 100644 index 000000000..bf80345b1 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/listener.go @@ -0,0 +1,80 @@ +package dtls + +import ( + "net" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + "github.com/pion/udp" +) + +// Listen creates a DTLS listener +func Listen(network string, laddr *net.UDPAddr, config *Config) (net.Listener, error) { + if err := validateConfig(config); err != nil { + return nil, err + } + + lc := udp.ListenConfig{ + AcceptFilter: func(packet []byte) bool { + pkts, err := recordlayer.UnpackDatagram(packet) + if err != nil || len(pkts) < 1 { + return false + } + h := &recordlayer.Header{} + if err := h.Unmarshal(pkts[0]); err != nil { + return false + } + return h.ContentType == protocol.ContentTypeHandshake + }, + } + parent, err := lc.Listen(network, laddr) + if err != nil { + return nil, err + } + return &listener{ + config: config, + parent: parent, + }, nil +} + +// NewListener creates a DTLS listener which accepts connections from an inner Listener. +func NewListener(inner net.Listener, config *Config) (net.Listener, error) { + if err := validateConfig(config); err != nil { + return nil, err + } + + return &listener{ + config: config, + parent: inner, + }, nil +} + +// listener represents a DTLS listener +type listener struct { + config *Config + parent net.Listener +} + +// Accept waits for and returns the next connection to the listener. +// You have to either close or read on all connection that are created. +// Connection handshake will timeout using ConnectContextMaker in the Config. +// If you want to specify the timeout duration, set ConnectContextMaker. +func (l *listener) Accept() (net.Conn, error) { + c, err := l.parent.Accept() + if err != nil { + return nil, err + } + return Server(c, l.config) +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +// Already Accepted connections are not closed. +func (l *listener) Close() error { + return l.parent.Close() +} + +// Addr returns the listener's network address. +func (l *listener) Addr() net.Addr { + return l.parent.Addr() +} diff --git a/vendor/github.com/pion/dtls/v2/packet.go b/vendor/github.com/pion/dtls/v2/packet.go new file mode 100644 index 000000000..8366a3c3d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/packet.go @@ -0,0 +1,9 @@ +package dtls + +import "github.com/pion/dtls/v2/pkg/protocol/recordlayer" + +type packet struct { + record *recordlayer.RecordLayer + shouldEncrypt bool + resetLocalSequenceNumber bool +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go new file mode 100644 index 000000000..20e3436e2 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ccm/ccm.go @@ -0,0 +1,251 @@ +// Package ccm implements a CCM, Counter with CBC-MAC +// as per RFC 3610. +// +// See https://tools.ietf.org/html/rfc3610 +// +// This code was lifted from https://github.com/bocajim/dtls/blob/a3300364a283fcb490d28a93d7fcfa7ba437fbbe/ccm/ccm.go +// and as such was not written by the Pions authors. Like Pions this +// code is licensed under MIT. +// +// A request for including CCM into the Go standard library +// can be found as issue #27484 on the https://github.com/golang/go/ +// repository. +package ccm + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" + "math" +) + +// ccm represents a Counter with CBC-MAC with a specific key. +type ccm struct { + b cipher.Block + M uint8 + L uint8 +} + +const ccmBlockSize = 16 + +// CCM is a block cipher in Counter with CBC-MAC mode. +// Providing authenticated encryption with associated data via the cipher.AEAD interface. +type CCM interface { + cipher.AEAD + // MaxLength returns the maxium length of plaintext in calls to Seal. + // The maximum length of ciphertext in calls to Open is MaxLength()+Overhead(). + // The maximum length is related to CCM's `L` parameter (15-noncesize) and + // is 1<<(8*L) - 1 (but also limited by the maxium size of an int). + MaxLength() int +} + +var ( + errInvalidBlockSize = errors.New("ccm: NewCCM requires 128-bit block cipher") + errInvalidTagSize = errors.New("ccm: tagsize must be 4, 6, 8, 10, 12, 14, or 16") + errInvalidNonceSize = errors.New("ccm: invalid nonce size") +) + +// NewCCM returns the given 128-bit block cipher wrapped in CCM. +// The tagsize must be an even integer between 4 and 16 inclusive +// and is used as CCM's `M` parameter. +// The noncesize must be an integer between 7 and 13 inclusive, +// 15-noncesize is used as CCM's `L` parameter. +func NewCCM(b cipher.Block, tagsize, noncesize int) (CCM, error) { + if b.BlockSize() != ccmBlockSize { + return nil, errInvalidBlockSize + } + if tagsize < 4 || tagsize > 16 || tagsize&1 != 0 { + return nil, errInvalidTagSize + } + lensize := 15 - noncesize + if lensize < 2 || lensize > 8 { + return nil, errInvalidNonceSize + } + c := &ccm{b: b, M: uint8(tagsize), L: uint8(lensize)} + return c, nil +} + +func (c *ccm) NonceSize() int { return 15 - int(c.L) } +func (c *ccm) Overhead() int { return int(c.M) } +func (c *ccm) MaxLength() int { return maxlen(c.L, c.Overhead()) } + +func maxlen(l uint8, tagsize int) int { + max := (uint64(1) << (8 * l)) - 1 + if m64 := uint64(math.MaxInt64) - uint64(tagsize); l > 8 || max > m64 { + max = m64 // The maximum lentgh on a 64bit arch + } + if max != uint64(int(max)) { + return math.MaxInt32 - tagsize // We have only 32bit int's + } + return int(max) +} + +// MaxNonceLength returns the maximum nonce length for a given plaintext length. +// A return value <= 0 indicates that plaintext length is too large for +// any nonce length. +func MaxNonceLength(pdatalen int) int { + const tagsize = 16 + for L := 2; L <= 8; L++ { + if maxlen(uint8(L), tagsize) >= pdatalen { + return 15 - L + } + } + return 0 +} + +func (c *ccm) cbcRound(mac, data []byte) { + for i := 0; i < ccmBlockSize; i++ { + mac[i] ^= data[i] + } + c.b.Encrypt(mac, mac) +} + +func (c *ccm) cbcData(mac, data []byte) { + for len(data) >= ccmBlockSize { + c.cbcRound(mac, data[:ccmBlockSize]) + data = data[ccmBlockSize:] + } + if len(data) > 0 { + var block [ccmBlockSize]byte + copy(block[:], data) + c.cbcRound(mac, block[:]) + } +} + +var errPlaintextTooLong = errors.New("ccm: plaintext too large") + +func (c *ccm) tag(nonce, plaintext, adata []byte) ([]byte, error) { + var mac [ccmBlockSize]byte + + if len(adata) > 0 { + mac[0] |= 1 << 6 + } + mac[0] |= (c.M - 2) << 2 + mac[0] |= c.L - 1 + if len(nonce) != c.NonceSize() { + return nil, errInvalidNonceSize + } + if len(plaintext) > c.MaxLength() { + return nil, errPlaintextTooLong + } + binary.BigEndian.PutUint64(mac[ccmBlockSize-8:], uint64(len(plaintext))) + copy(mac[1:ccmBlockSize-c.L], nonce) + c.b.Encrypt(mac[:], mac[:]) + + var block [ccmBlockSize]byte + if n := uint64(len(adata)); n > 0 { + // First adata block includes adata length + i := 2 + if n <= 0xfeff { + binary.BigEndian.PutUint16(block[:i], uint16(n)) + } else { + block[0] = 0xfe + block[1] = 0xff + if n < uint64(1<<32) { + i = 2 + 4 + binary.BigEndian.PutUint32(block[2:i], uint32(n)) + } else { + i = 2 + 8 + binary.BigEndian.PutUint64(block[2:i], n) + } + } + i = copy(block[i:], adata) + c.cbcRound(mac[:], block[:]) + c.cbcData(mac[:], adata[i:]) + } + + if len(plaintext) > 0 { + c.cbcData(mac[:], plaintext) + } + + return mac[:c.M], nil +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +// From crypto/cipher/gcm.go +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal encrypts and authenticates plaintext, authenticates the +// additional data and appends the result to dst, returning the updated +// slice. The nonce must be NonceSize() bytes long and unique for all +// time, for a given key. +// The plaintext must be no longer than MaxLength() bytes long. +// +// The plaintext and dst may alias exactly or not at all. +func (c *ccm) Seal(dst, nonce, plaintext, adata []byte) []byte { + tag, err := c.tag(nonce, plaintext, adata) + if err != nil { + // The cipher.AEAD interface doesn't allow for an error return. + panic(err) // nolint + } + + var iv, s0 [ccmBlockSize]byte + iv[0] = c.L - 1 + copy(iv[1:ccmBlockSize-c.L], nonce) + c.b.Encrypt(s0[:], iv[:]) + for i := 0; i < int(c.M); i++ { + tag[i] ^= s0[i] + } + iv[len(iv)-1] |= 1 + stream := cipher.NewCTR(c.b, iv[:]) + ret, out := sliceForAppend(dst, len(plaintext)+int(c.M)) + stream.XORKeyStream(out, plaintext) + copy(out[len(plaintext):], tag) + return ret +} + +var ( + errOpen = errors.New("ccm: message authentication failed") + errCiphertextTooShort = errors.New("ccm: ciphertext too short") + errCiphertextTooLong = errors.New("ccm: ciphertext too long") +) + +func (c *ccm) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(ciphertext) < int(c.M) { + return nil, errCiphertextTooShort + } + if len(ciphertext) > c.MaxLength()+c.Overhead() { + return nil, errCiphertextTooLong + } + + tag := make([]byte, int(c.M)) + copy(tag, ciphertext[len(ciphertext)-int(c.M):]) + ciphertextWithoutTag := ciphertext[:len(ciphertext)-int(c.M)] + + var iv, s0 [ccmBlockSize]byte + iv[0] = c.L - 1 + copy(iv[1:ccmBlockSize-c.L], nonce) + c.b.Encrypt(s0[:], iv[:]) + for i := 0; i < int(c.M); i++ { + tag[i] ^= s0[i] + } + iv[len(iv)-1] |= 1 + stream := cipher.NewCTR(c.b, iv[:]) + + // Cannot decrypt directly to dst since we're not supposed to + // reveal the plaintext to the caller if authentication fails. + plaintext := make([]byte, len(ciphertextWithoutTag)) + stream.XORKeyStream(plaintext, ciphertextWithoutTag) + expectedTag, err := c.tag(nonce, plaintext, adata) + if err != nil { + return nil, err + } + + if subtle.ConstantTimeCompare(tag, expectedTag) != 1 { + return nil, errOpen + } + return append(dst, plaintext...), nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go new file mode 100644 index 000000000..9cb123b98 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/cbc.go @@ -0,0 +1,174 @@ +package ciphersuite + +import ( //nolint:gci + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "encoding/binary" + "hash" + + "github.com/pion/dtls/v2/internal/util" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// block ciphers using cipher block chaining. +type cbcMode interface { + cipher.BlockMode + SetIV([]byte) +} + +// CBC Provides an API to Encrypt/Decrypt DTLS 1.2 Packets +type CBC struct { + writeCBC, readCBC cbcMode + writeMac, readMac []byte + h prf.HashFunc +} + +// NewCBC creates a DTLS CBC Cipher +func NewCBC(localKey, localWriteIV, localMac, remoteKey, remoteWriteIV, remoteMac []byte, h prf.HashFunc) (*CBC, error) { + writeBlock, err := aes.NewCipher(localKey) + if err != nil { + return nil, err + } + + readBlock, err := aes.NewCipher(remoteKey) + if err != nil { + return nil, err + } + + writeCBC, ok := cipher.NewCBCEncrypter(writeBlock, localWriteIV).(cbcMode) + if !ok { + return nil, errFailedToCast + } + + readCBC, ok := cipher.NewCBCDecrypter(readBlock, remoteWriteIV).(cbcMode) + if !ok { + return nil, errFailedToCast + } + + return &CBC{ + writeCBC: writeCBC, + writeMac: localMac, + + readCBC: readCBC, + readMac: remoteMac, + h: h, + }, nil +} + +// Encrypt encrypt a DTLS RecordLayer message +func (c *CBC) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + payload := raw[recordlayer.HeaderSize:] + raw = raw[:recordlayer.HeaderSize] + blockSize := c.writeCBC.BlockSize() + + // Generate + Append MAC + h := pkt.Header + + MAC, err := c.hmac(h.Epoch, h.SequenceNumber, h.ContentType, h.Version, payload, c.writeMac, c.h) + if err != nil { + return nil, err + } + payload = append(payload, MAC...) + + // Generate + Append padding + padding := make([]byte, blockSize-len(payload)%blockSize) + paddingLen := len(padding) + for i := 0; i < paddingLen; i++ { + padding[i] = byte(paddingLen - 1) + } + payload = append(payload, padding...) + + // Generate IV + iv := make([]byte, blockSize) + if _, err := rand.Read(iv); err != nil { + return nil, err + } + + // Set IV + Encrypt + Prepend IV + c.writeCBC.SetIV(iv) + c.writeCBC.CryptBlocks(payload, payload) + payload = append(iv, payload...) + + // Prepend unencrypte header with encrypted payload + raw = append(raw, payload...) + + // Update recordLayer size to include IV+MAC+Padding + binary.BigEndian.PutUint16(raw[recordlayer.HeaderSize-2:], uint16(len(raw)-recordlayer.HeaderSize)) + + return raw, nil +} + +// Decrypt decrypts a DTLS RecordLayer message +func (c *CBC) Decrypt(in []byte) ([]byte, error) { + body := in[recordlayer.HeaderSize:] + blockSize := c.readCBC.BlockSize() + mac := c.h() + + var h recordlayer.Header + err := h.Unmarshal(in) + switch { + case err != nil: + return nil, err + case h.ContentType == protocol.ContentTypeChangeCipherSpec: + // Nothing to encrypt with ChangeCipherSpec + return in, nil + case len(body)%blockSize != 0 || len(body) < blockSize+util.Max(mac.Size()+1, blockSize): + return nil, errNotEnoughRoomForNonce + } + + // Set + remove per record IV + c.readCBC.SetIV(body[:blockSize]) + body = body[blockSize:] + + // Decrypt + c.readCBC.CryptBlocks(body, body) + + // Padding+MAC needs to be checked in constant time + // Otherwise we reveal information about the level of correctness + paddingLen, paddingGood := examinePadding(body) + if paddingGood != 255 { + return nil, errInvalidMAC + } + + macSize := mac.Size() + if len(body) < macSize { + return nil, errInvalidMAC + } + + dataEnd := len(body) - macSize - paddingLen + + expectedMAC := body[dataEnd : dataEnd+macSize] + actualMAC, err := c.hmac(h.Epoch, h.SequenceNumber, h.ContentType, h.Version, body[:dataEnd], c.readMac, c.h) + + // Compute Local MAC and compare + if err != nil || !hmac.Equal(actualMAC, expectedMAC) { + return nil, errInvalidMAC + } + + return append(in[:recordlayer.HeaderSize], body[:dataEnd]...), nil +} + +func (c *CBC) hmac(epoch uint16, sequenceNumber uint64, contentType protocol.ContentType, protocolVersion protocol.Version, payload []byte, key []byte, hf func() hash.Hash) ([]byte, error) { + h := hmac.New(hf, key) + + msg := make([]byte, 13) + + binary.BigEndian.PutUint16(msg, epoch) + util.PutBigEndianUint48(msg[2:], sequenceNumber) + msg[8] = byte(contentType) + msg[9] = protocolVersion.Major + msg[10] = protocolVersion.Minor + binary.BigEndian.PutUint16(msg[11:], uint16(len(payload))) + + if _, err := h.Write(msg); err != nil { + return nil, err + } else if _, err := h.Write(payload); err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go new file mode 100644 index 000000000..354b1cc50 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ccm.go @@ -0,0 +1,104 @@ +package ciphersuite + +import ( + "crypto/aes" + "crypto/rand" + "encoding/binary" + "fmt" + + "github.com/pion/dtls/v2/pkg/crypto/ccm" + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +// CCMTagLen is the length of Authentication Tag +type CCMTagLen int + +// CCM Enums +const ( + CCMTagLength8 CCMTagLen = 8 + CCMTagLength CCMTagLen = 16 + ccmNonceLength = 12 +) + +// CCM Provides an API to Encrypt/Decrypt DTLS 1.2 Packets +type CCM struct { + localCCM, remoteCCM ccm.CCM + localWriteIV, remoteWriteIV []byte + tagLen CCMTagLen +} + +// NewCCM creates a DTLS GCM Cipher +func NewCCM(tagLen CCMTagLen, localKey, localWriteIV, remoteKey, remoteWriteIV []byte) (*CCM, error) { + localBlock, err := aes.NewCipher(localKey) + if err != nil { + return nil, err + } + localCCM, err := ccm.NewCCM(localBlock, int(tagLen), ccmNonceLength) + if err != nil { + return nil, err + } + + remoteBlock, err := aes.NewCipher(remoteKey) + if err != nil { + return nil, err + } + remoteCCM, err := ccm.NewCCM(remoteBlock, int(tagLen), ccmNonceLength) + if err != nil { + return nil, err + } + + return &CCM{ + localCCM: localCCM, + localWriteIV: localWriteIV, + remoteCCM: remoteCCM, + remoteWriteIV: remoteWriteIV, + tagLen: tagLen, + }, nil +} + +// Encrypt encrypt a DTLS RecordLayer message +func (c *CCM) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + payload := raw[recordlayer.HeaderSize:] + raw = raw[:recordlayer.HeaderSize] + + nonce := append(append([]byte{}, c.localWriteIV[:4]...), make([]byte, 8)...) + if _, err := rand.Read(nonce[4:]); err != nil { + return nil, err + } + + additionalData := generateAEADAdditionalData(&pkt.Header, len(payload)) + encryptedPayload := c.localCCM.Seal(nil, nonce, payload, additionalData) + + encryptedPayload = append(nonce[4:], encryptedPayload...) + raw = append(raw, encryptedPayload...) + + // Update recordLayer size to include explicit nonce + binary.BigEndian.PutUint16(raw[recordlayer.HeaderSize-2:], uint16(len(raw)-recordlayer.HeaderSize)) + return raw, nil +} + +// Decrypt decrypts a DTLS RecordLayer message +func (c *CCM) Decrypt(in []byte) ([]byte, error) { + var h recordlayer.Header + err := h.Unmarshal(in) + switch { + case err != nil: + return nil, err + case h.ContentType == protocol.ContentTypeChangeCipherSpec: + // Nothing to encrypt with ChangeCipherSpec + return in, nil + case len(in) <= (8 + recordlayer.HeaderSize): + return nil, errNotEnoughRoomForNonce + } + + nonce := append(append([]byte{}, c.remoteWriteIV[:4]...), in[recordlayer.HeaderSize:recordlayer.HeaderSize+8]...) + out := in[recordlayer.HeaderSize+8:] + + additionalData := generateAEADAdditionalData(&h, len(out)-int(c.tagLen)) + out, err = c.remoteCCM.Open(out[:0], nonce, out, additionalData) + if err != nil { + return nil, fmt.Errorf("%w: %v", errDecryptPacket, err) + } + return append(in[:recordlayer.HeaderSize], out...), nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go new file mode 100644 index 000000000..afe63d8ae --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/ciphersuite.go @@ -0,0 +1,73 @@ +// Package ciphersuite provides the crypto operations needed for a DTLS CipherSuite +package ciphersuite + +import ( + "encoding/binary" + "errors" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +var ( + errNotEnoughRoomForNonce = &protocol.InternalError{Err: errors.New("buffer not long enough to contain nonce")} //nolint:goerr113 + errDecryptPacket = &protocol.TemporaryError{Err: errors.New("failed to decrypt packet")} //nolint:goerr113 + errInvalidMAC = &protocol.TemporaryError{Err: errors.New("invalid mac")} //nolint:goerr113 + errFailedToCast = &protocol.FatalError{Err: errors.New("failed to cast")} //nolint:goerr113 +) + +func generateAEADAdditionalData(h *recordlayer.Header, payloadLen int) []byte { + var additionalData [13]byte + // SequenceNumber MUST be set first + // we only want uint48, clobbering an extra 2 (using uint64, Golang doesn't have uint48) + binary.BigEndian.PutUint64(additionalData[:], h.SequenceNumber) + binary.BigEndian.PutUint16(additionalData[:], h.Epoch) + additionalData[8] = byte(h.ContentType) + additionalData[9] = h.Version.Major + additionalData[10] = h.Version.Minor + binary.BigEndian.PutUint16(additionalData[len(additionalData)-2:], uint16(payloadLen)) + + return additionalData[:] +} + +// examinePadding returns, in constant time, the length of the padding to remove +// from the end of payload. It also returns a byte which is equal to 255 if the +// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2. +// +// https://github.com/golang/go/blob/039c2081d1178f90a8fa2f4e6958693129f8de33/src/crypto/tls/conn.go#L245 +func examinePadding(payload []byte) (toRemove int, good byte) { + if len(payload) < 1 { + return 0, 0 + } + + paddingLen := payload[len(payload)-1] + t := uint(len(payload)-1) - uint(paddingLen) + // if len(payload) >= (paddingLen - 1) then the MSB of t is zero + good = byte(int32(^t) >> 31) + + // The maximum possible padding length plus the actual length field + toCheck := 256 + // The length of the padded data is public, so we can use an if here + if toCheck > len(payload) { + toCheck = len(payload) + } + + for i := 0; i < toCheck; i++ { + t := uint(paddingLen) - uint(i) + // if i <= paddingLen then the MSB of t is zero + mask := byte(int32(^t) >> 31) + b := payload[len(payload)-1-i] + good &^= mask&paddingLen ^ mask&b + } + + // We AND together the bits of good and replicate the result across + // all the bits. + good &= good << 4 + good &= good << 2 + good &= good << 1 + good = uint8(int8(good) >> 7) + + toRemove = int(paddingLen) + 1 + + return toRemove, good +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go new file mode 100644 index 000000000..af986d46e --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/ciphersuite/gcm.go @@ -0,0 +1,100 @@ +package ciphersuite + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/binary" + "fmt" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/recordlayer" +) + +const ( + gcmTagLength = 16 + gcmNonceLength = 12 +) + +// GCM Provides an API to Encrypt/Decrypt DTLS 1.2 Packets +type GCM struct { + localGCM, remoteGCM cipher.AEAD + localWriteIV, remoteWriteIV []byte +} + +// NewGCM creates a DTLS GCM Cipher +func NewGCM(localKey, localWriteIV, remoteKey, remoteWriteIV []byte) (*GCM, error) { + localBlock, err := aes.NewCipher(localKey) + if err != nil { + return nil, err + } + localGCM, err := cipher.NewGCM(localBlock) + if err != nil { + return nil, err + } + + remoteBlock, err := aes.NewCipher(remoteKey) + if err != nil { + return nil, err + } + remoteGCM, err := cipher.NewGCM(remoteBlock) + if err != nil { + return nil, err + } + + return &GCM{ + localGCM: localGCM, + localWriteIV: localWriteIV, + remoteGCM: remoteGCM, + remoteWriteIV: remoteWriteIV, + }, nil +} + +// Encrypt encrypt a DTLS RecordLayer message +func (g *GCM) Encrypt(pkt *recordlayer.RecordLayer, raw []byte) ([]byte, error) { + payload := raw[recordlayer.HeaderSize:] + raw = raw[:recordlayer.HeaderSize] + + nonce := make([]byte, gcmNonceLength) + copy(nonce, g.localWriteIV[:4]) + if _, err := rand.Read(nonce[4:]); err != nil { + return nil, err + } + + additionalData := generateAEADAdditionalData(&pkt.Header, len(payload)) + encryptedPayload := g.localGCM.Seal(nil, nonce, payload, additionalData) + r := make([]byte, len(raw)+len(nonce[4:])+len(encryptedPayload)) + copy(r, raw) + copy(r[len(raw):], nonce[4:]) + copy(r[len(raw)+len(nonce[4:]):], encryptedPayload) + + // Update recordLayer size to include explicit nonce + binary.BigEndian.PutUint16(r[recordlayer.HeaderSize-2:], uint16(len(r)-recordlayer.HeaderSize)) + return r, nil +} + +// Decrypt decrypts a DTLS RecordLayer message +func (g *GCM) Decrypt(in []byte) ([]byte, error) { + var h recordlayer.Header + err := h.Unmarshal(in) + switch { + case err != nil: + return nil, err + case h.ContentType == protocol.ContentTypeChangeCipherSpec: + // Nothing to encrypt with ChangeCipherSpec + return in, nil + case len(in) <= (8 + recordlayer.HeaderSize): + return nil, errNotEnoughRoomForNonce + } + + nonce := make([]byte, 0, gcmNonceLength) + nonce = append(append(nonce, g.remoteWriteIV[:4]...), in[recordlayer.HeaderSize:recordlayer.HeaderSize+8]...) + out := in[recordlayer.HeaderSize+8:] + + additionalData := generateAEADAdditionalData(&h, len(out)-gcmTagLength) + out, err = g.remoteGCM.Open(out[:0], nonce, out, additionalData) + if err != nil { + return nil, fmt.Errorf("%w: %v", errDecryptPacket, err) + } + return append(in[:recordlayer.HeaderSize], out...), nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go new file mode 100644 index 000000000..c222c01c7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/clientcertificate/client_certificate.go @@ -0,0 +1,22 @@ +// Package clientcertificate provides all the support Client Certificate types +package clientcertificate + +// Type is used to communicate what +// type of certificate is being transported +// +//https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-2 +type Type byte + +// ClientCertificateType enums +const ( + RSASign Type = 1 + ECDSASign Type = 64 +) + +// Types returns all valid ClientCertificate Types +func Types() map[Type]bool { + return map[Type]bool{ + RSASign: true, + ECDSASign: true, + } +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go new file mode 100644 index 000000000..023bf9035 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/elliptic/elliptic.go @@ -0,0 +1,112 @@ +// Package elliptic provides elliptic curve cryptography for DTLS +package elliptic + +import ( + "crypto/elliptic" + "crypto/rand" + "errors" + "fmt" + + "golang.org/x/crypto/curve25519" +) + +var errInvalidNamedCurve = errors.New("invalid named curve") + +// CurvePointFormat is used to represent the IANA registered curve points +// +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 +type CurvePointFormat byte + +// CurvePointFormat enums +const ( + CurvePointFormatUncompressed CurvePointFormat = 0 +) + +// Keypair is a Curve with a Private/Public Keypair +type Keypair struct { + Curve Curve + PublicKey []byte + PrivateKey []byte +} + +// CurveType is used to represent the IANA registered curve types for TLS +// +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-10 +type CurveType byte + +// CurveType enums +const ( + CurveTypeNamedCurve CurveType = 0x03 +) + +// CurveTypes returns all known curves +func CurveTypes() map[CurveType]struct{} { + return map[CurveType]struct{}{ + CurveTypeNamedCurve: {}, + } +} + +// Curve is used to represent the IANA registered curves for TLS +// +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +type Curve uint16 + +// Curve enums +const ( + P256 Curve = 0x0017 + P384 Curve = 0x0018 + X25519 Curve = 0x001d +) + +func (c Curve) String() string { + switch c { + case P256: + return "P-256" + case P384: + return "P-384" + case X25519: + return "X25519" + } + return fmt.Sprintf("%#x", uint16(c)) +} + +// Curves returns all curves we implement +func Curves() map[Curve]bool { + return map[Curve]bool{ + X25519: true, + P256: true, + P384: true, + } +} + +// GenerateKeypair generates a keypair for the given Curve +func GenerateKeypair(c Curve) (*Keypair, error) { + switch c { //nolint:revive + case X25519: + tmp := make([]byte, 32) + if _, err := rand.Read(tmp); err != nil { + return nil, err + } + + var public, private [32]byte + copy(private[:], tmp) + + curve25519.ScalarBaseMult(&public, &private) + return &Keypair{X25519, public[:], private[:]}, nil + case P256: + return ellipticCurveKeypair(P256, elliptic.P256(), elliptic.P256()) + case P384: + return ellipticCurveKeypair(P384, elliptic.P384(), elliptic.P384()) + default: + return nil, errInvalidNamedCurve + } +} + +func ellipticCurveKeypair(nc Curve, c1, c2 elliptic.Curve) (*Keypair, error) { + privateKey, x, y, err := elliptic.GenerateKey(c1, rand.Reader) + if err != nil { + return nil, err + } + + return &Keypair{nc, elliptic.Marshal(c2, x, y), privateKey}, nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go new file mode 100644 index 000000000..660326f78 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/hash/hash.go @@ -0,0 +1,126 @@ +// Package hash provides TLS HashAlgorithm as defined in TLS 1.2 +package hash + +import ( //nolint:gci + "crypto" + "crypto/md5" //nolint:gosec + "crypto/sha1" //nolint:gosec + "crypto/sha256" + "crypto/sha512" +) + +// Algorithm is used to indicate the hash algorithm used +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-18 +type Algorithm uint16 + +// Supported hash algorithms +const ( + None Algorithm = 0 // Blacklisted + MD5 Algorithm = 1 // Blacklisted + SHA1 Algorithm = 2 // Blacklisted + SHA224 Algorithm = 3 + SHA256 Algorithm = 4 + SHA384 Algorithm = 5 + SHA512 Algorithm = 6 + Ed25519 Algorithm = 8 +) + +// String makes hashAlgorithm printable +func (a Algorithm) String() string { + switch a { + case None: + return "none" + case MD5: + return "md5" // [RFC3279] + case SHA1: + return "sha-1" // [RFC3279] + case SHA224: + return "sha-224" // [RFC4055] + case SHA256: + return "sha-256" // [RFC4055] + case SHA384: + return "sha-384" // [RFC4055] + case SHA512: + return "sha-512" // [RFC4055] + case Ed25519: + return "null" + default: + return "unknown or unsupported hash algorithm" + } +} + +// Digest performs a digest on the passed value +func (a Algorithm) Digest(b []byte) []byte { + switch a { + case None: + return nil + case MD5: + hash := md5.Sum(b) // #nosec + return hash[:] + case SHA1: + hash := sha1.Sum(b) // #nosec + return hash[:] + case SHA224: + hash := sha256.Sum224(b) + return hash[:] + case SHA256: + hash := sha256.Sum256(b) + return hash[:] + case SHA384: + hash := sha512.Sum384(b) + return hash[:] + case SHA512: + hash := sha512.Sum512(b) + return hash[:] + default: + return nil + } +} + +// Insecure returns if the given HashAlgorithm is considered secure in DTLS 1.2 +func (a Algorithm) Insecure() bool { + switch a { + case None, MD5, SHA1: + return true + default: + return false + } +} + +// CryptoHash returns the crypto.Hash implementation for the given HashAlgorithm +func (a Algorithm) CryptoHash() crypto.Hash { + switch a { + case None: + return crypto.Hash(0) + case MD5: + return crypto.MD5 + case SHA1: + return crypto.SHA1 + case SHA224: + return crypto.SHA224 + case SHA256: + return crypto.SHA256 + case SHA384: + return crypto.SHA384 + case SHA512: + return crypto.SHA512 + case Ed25519: + return crypto.Hash(0) + default: + return crypto.Hash(0) + } +} + +// Algorithms returns all the supported Hash Algorithms +func Algorithms() map[Algorithm]struct{} { + return map[Algorithm]struct{}{ + None: {}, + MD5: {}, + SHA1: {}, + SHA224: {}, + SHA256: {}, + SHA384: {}, + SHA512: {}, + Ed25519: {}, + } +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go new file mode 100644 index 000000000..11f53a190 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/prf/prf.go @@ -0,0 +1,252 @@ +// Package prf implements TLS 1.2 Pseudorandom functions +package prf + +import ( //nolint:gci + ellipticStdlib "crypto/elliptic" + "crypto/hmac" + "encoding/binary" + "errors" + "fmt" + "hash" + "math" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/protocol" + "golang.org/x/crypto/curve25519" +) + +const ( + masterSecretLabel = "master secret" + extendedMasterSecretLabel = "extended master secret" + keyExpansionLabel = "key expansion" + verifyDataClientLabel = "client finished" + verifyDataServerLabel = "server finished" +) + +// HashFunc allows callers to decide what hash is used in PRF +type HashFunc func() hash.Hash + +// EncryptionKeys is all the state needed for a TLS CipherSuite +type EncryptionKeys struct { + MasterSecret []byte + ClientMACKey []byte + ServerMACKey []byte + ClientWriteKey []byte + ServerWriteKey []byte + ClientWriteIV []byte + ServerWriteIV []byte +} + +var errInvalidNamedCurve = &protocol.FatalError{Err: errors.New("invalid named curve")} //nolint:goerr113 + +func (e *EncryptionKeys) String() string { + return fmt.Sprintf(`encryptionKeys: +- masterSecret: %#v +- clientMACKey: %#v +- serverMACKey: %#v +- clientWriteKey: %#v +- serverWriteKey: %#v +- clientWriteIV: %#v +- serverWriteIV: %#v +`, + e.MasterSecret, + e.ClientMACKey, + e.ServerMACKey, + e.ClientWriteKey, + e.ServerWriteKey, + e.ClientWriteIV, + e.ServerWriteIV) +} + +// PSKPreMasterSecret generates the PSK Premaster Secret +// The premaster secret is formed as follows: if the PSK is N octets +// long, concatenate a uint16 with the value N, N zero octets, a second +// uint16 with the value N, and the PSK itself. +// +// https://tools.ietf.org/html/rfc4279#section-2 +func PSKPreMasterSecret(psk []byte) []byte { + pskLen := uint16(len(psk)) + + out := append(make([]byte, 2+pskLen+2), psk...) + binary.BigEndian.PutUint16(out, pskLen) + binary.BigEndian.PutUint16(out[2+pskLen:], pskLen) + + return out +} + +// EcdhePSKPreMasterSecret implements TLS 1.2 Premaster Secret generation given a psk, a keypair and a curve +// +// https://datatracker.ietf.org/doc/html/rfc5489#section-2 +func EcdhePSKPreMasterSecret(psk, publicKey, privateKey []byte, curve elliptic.Curve) ([]byte, error) { + preMasterSecret, err := PreMasterSecret(publicKey, privateKey, curve) + if err != nil { + return nil, err + } + out := make([]byte, 2+len(preMasterSecret)+2+len(psk)) + + // write preMasterSecret length + offset := 0 + binary.BigEndian.PutUint16(out[offset:], uint16(len(preMasterSecret))) + offset += 2 + + // write preMasterSecret + copy(out[offset:], preMasterSecret) + offset += len(preMasterSecret) + + // write psk length + binary.BigEndian.PutUint16(out[offset:], uint16(len(psk))) + offset += 2 + + // write psk + copy(out[offset:], psk) + return out, nil +} + +// PreMasterSecret implements TLS 1.2 Premaster Secret generation given a keypair and a curve +func PreMasterSecret(publicKey, privateKey []byte, curve elliptic.Curve) ([]byte, error) { + switch curve { + case elliptic.X25519: + return curve25519.X25519(privateKey, publicKey) + case elliptic.P256: + return ellipticCurvePreMasterSecret(publicKey, privateKey, ellipticStdlib.P256(), ellipticStdlib.P256()) + case elliptic.P384: + return ellipticCurvePreMasterSecret(publicKey, privateKey, ellipticStdlib.P384(), ellipticStdlib.P384()) + default: + return nil, errInvalidNamedCurve + } +} + +func ellipticCurvePreMasterSecret(publicKey, privateKey []byte, c1, c2 ellipticStdlib.Curve) ([]byte, error) { + x, y := ellipticStdlib.Unmarshal(c1, publicKey) + if x == nil || y == nil { + return nil, errInvalidNamedCurve + } + + result, _ := c2.ScalarMult(x, y, privateKey) + preMasterSecret := make([]byte, (c2.Params().BitSize+7)>>3) + resultBytes := result.Bytes() + copy(preMasterSecret[len(preMasterSecret)-len(resultBytes):], resultBytes) + return preMasterSecret, nil +} + +// PHash is PRF is the SHA-256 hash function is used for all cipher suites +// defined in this TLS 1.2 document and in TLS documents published prior to this +// document when TLS 1.2 is negotiated. New cipher suites MUST explicitly +// specify a PRF and, in general, SHOULD use the TLS PRF with SHA-256 or a +// stronger standard hash function. +// +// P_hash(secret, seed) = HMAC_hash(secret, A(1) + seed) + +// HMAC_hash(secret, A(2) + seed) + +// HMAC_hash(secret, A(3) + seed) + ... +// +// A() is defined as: +// +// A(0) = seed +// A(i) = HMAC_hash(secret, A(i-1)) +// +// P_hash can be iterated as many times as necessary to produce the +// required quantity of data. For example, if P_SHA256 is being used to +// create 80 bytes of data, it will have to be iterated three times +// (through A(3)), creating 96 bytes of output data; the last 16 bytes +// of the final iteration will then be discarded, leaving 80 bytes of +// output data. +// +// https://tools.ietf.org/html/rfc4346w +func PHash(secret, seed []byte, requestedLength int, h HashFunc) ([]byte, error) { + hmacSHA256 := func(key, data []byte) ([]byte, error) { + mac := hmac.New(h, key) + if _, err := mac.Write(data); err != nil { + return nil, err + } + return mac.Sum(nil), nil + } + + var err error + lastRound := seed + out := []byte{} + + iterations := int(math.Ceil(float64(requestedLength) / float64(h().Size()))) + for i := 0; i < iterations; i++ { + lastRound, err = hmacSHA256(secret, lastRound) + if err != nil { + return nil, err + } + withSecret, err := hmacSHA256(secret, append(lastRound, seed...)) + if err != nil { + return nil, err + } + out = append(out, withSecret...) + } + + return out[:requestedLength], nil +} + +// ExtendedMasterSecret generates a Extended MasterSecret as defined in +// https://tools.ietf.org/html/rfc7627 +func ExtendedMasterSecret(preMasterSecret, sessionHash []byte, h HashFunc) ([]byte, error) { + seed := append([]byte(extendedMasterSecretLabel), sessionHash...) + return PHash(preMasterSecret, seed, 48, h) +} + +// MasterSecret generates a TLS 1.2 MasterSecret +func MasterSecret(preMasterSecret, clientRandom, serverRandom []byte, h HashFunc) ([]byte, error) { + seed := append(append([]byte(masterSecretLabel), clientRandom...), serverRandom...) + return PHash(preMasterSecret, seed, 48, h) +} + +// GenerateEncryptionKeys is the final step TLS 1.2 PRF. Given all state generated so far generates +// the final keys need for encryption +func GenerateEncryptionKeys(masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int, h HashFunc) (*EncryptionKeys, error) { + seed := append(append([]byte(keyExpansionLabel), serverRandom...), clientRandom...) + keyMaterial, err := PHash(masterSecret, seed, (2*macLen)+(2*keyLen)+(2*ivLen), h) + if err != nil { + return nil, err + } + + clientMACKey := keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + + serverMACKey := keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + + clientWriteKey := keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + + serverWriteKey := keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + + clientWriteIV := keyMaterial[:ivLen] + keyMaterial = keyMaterial[ivLen:] + + serverWriteIV := keyMaterial[:ivLen] + + return &EncryptionKeys{ + MasterSecret: masterSecret, + ClientMACKey: clientMACKey, + ServerMACKey: serverMACKey, + ClientWriteKey: clientWriteKey, + ServerWriteKey: serverWriteKey, + ClientWriteIV: clientWriteIV, + ServerWriteIV: serverWriteIV, + }, nil +} + +func prfVerifyData(masterSecret, handshakeBodies []byte, label string, hashFunc HashFunc) ([]byte, error) { + h := hashFunc() + if _, err := h.Write(handshakeBodies); err != nil { + return nil, err + } + + seed := append([]byte(label), h.Sum(nil)...) + return PHash(masterSecret, seed, 12, hashFunc) +} + +// VerifyDataClient is caled on the Client Side to either verify or generate the VerifyData message +func VerifyDataClient(masterSecret, handshakeBodies []byte, h HashFunc) ([]byte, error) { + return prfVerifyData(masterSecret, handshakeBodies, verifyDataClientLabel, h) +} + +// VerifyDataServer is caled on the Server Side to either verify or generate the VerifyData message +func VerifyDataServer(masterSecret, handshakeBodies []byte, h HashFunc) ([]byte, error) { + return prfVerifyData(masterSecret, handshakeBodies, verifyDataServerLabel, h) +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go new file mode 100644 index 000000000..d9150eb8c --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signature/signature.go @@ -0,0 +1,24 @@ +// Package signature provides our implemented Signature Algorithms +package signature + +// Algorithm as defined in TLS 1.2 +// https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-16 +type Algorithm uint16 + +// SignatureAlgorithm enums +const ( + Anonymous Algorithm = 0 + RSA Algorithm = 1 + ECDSA Algorithm = 3 + Ed25519 Algorithm = 7 +) + +// Algorithms returns all implemented Signature Algorithms +func Algorithms() map[Algorithm]struct{} { + return map[Algorithm]struct{}{ + Anonymous: {}, + RSA: {}, + ECDSA: {}, + Ed25519: {}, + } +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go new file mode 100644 index 000000000..9d9d3b309 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/errors.go @@ -0,0 +1,9 @@ +package signaturehash + +import "errors" + +var ( + errNoAvailableSignatureSchemes = errors.New("connection can not be created, no SignatureScheme satisfy this Config") + errInvalidSignatureAlgorithm = errors.New("invalid signature algorithm") + errInvalidHashAlgorithm = errors.New("invalid hash algorithm") +) diff --git a/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go new file mode 100644 index 000000000..7959e1f04 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/crypto/signaturehash/signaturehash.go @@ -0,0 +1,93 @@ +// Package signaturehash provides the SignatureHashAlgorithm as defined in TLS 1.2 +package signaturehash + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "fmt" + + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/signature" +) + +// Algorithm is a signature/hash algorithm pairs which may be used in +// digital signatures. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 +type Algorithm struct { + Hash hash.Algorithm + Signature signature.Algorithm +} + +// Algorithms are all the know SignatureHash Algorithms +func Algorithms() []Algorithm { + return []Algorithm{ + {hash.SHA256, signature.ECDSA}, + {hash.SHA384, signature.ECDSA}, + {hash.SHA512, signature.ECDSA}, + {hash.SHA256, signature.RSA}, + {hash.SHA384, signature.RSA}, + {hash.SHA512, signature.RSA}, + {hash.Ed25519, signature.Ed25519}, + } +} + +// SelectSignatureScheme returns most preferred and compatible scheme. +func SelectSignatureScheme(sigs []Algorithm, privateKey crypto.PrivateKey) (Algorithm, error) { + for _, ss := range sigs { + if ss.isCompatible(privateKey) { + return ss, nil + } + } + return Algorithm{}, errNoAvailableSignatureSchemes +} + +// isCompatible checks that given private key is compatible with the signature scheme. +func (a *Algorithm) isCompatible(privateKey crypto.PrivateKey) bool { + switch privateKey.(type) { + case ed25519.PrivateKey: + return a.Signature == signature.Ed25519 + case *ecdsa.PrivateKey: + return a.Signature == signature.ECDSA + case *rsa.PrivateKey: + return a.Signature == signature.RSA + default: + return false + } +} + +// ParseSignatureSchemes translates []tls.SignatureScheme to []signatureHashAlgorithm. +// It returns default signature scheme list if no SignatureScheme is passed. +func ParseSignatureSchemes(sigs []tls.SignatureScheme, insecureHashes bool) ([]Algorithm, error) { + if len(sigs) == 0 { + return Algorithms(), nil + } + out := []Algorithm{} + for _, ss := range sigs { + sig := signature.Algorithm(ss & 0xFF) + if _, ok := signature.Algorithms()[sig]; !ok { + return nil, + fmt.Errorf("SignatureScheme %04x: %w", ss, errInvalidSignatureAlgorithm) + } + h := hash.Algorithm(ss >> 8) + if _, ok := hash.Algorithms()[h]; !ok || (ok && h == hash.None) { + return nil, fmt.Errorf("SignatureScheme %04x: %w", ss, errInvalidHashAlgorithm) + } + if h.Insecure() && !insecureHashes { + continue + } + out = append(out, Algorithm{ + Hash: h, + Signature: sig, + }) + } + + if len(out) == 0 { + return nil, errNoAvailableSignatureSchemes + } + + return out, nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go new file mode 100644 index 000000000..663c6b379 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/alert/alert.go @@ -0,0 +1,163 @@ +// Package alert implements TLS alert protocol https://tools.ietf.org/html/rfc5246#section-7.2 +package alert + +import ( + "errors" + "fmt" + + "github.com/pion/dtls/v2/pkg/protocol" +) + +var errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + +// Level is the level of the TLS Alert +type Level byte + +// Level enums +const ( + Warning Level = 1 + Fatal Level = 2 +) + +func (l Level) String() string { + switch l { + case Warning: + return "Warning" + case Fatal: + return "Fatal" + default: + return "Invalid alert level" + } +} + +// Description is the extended info of the TLS Alert +type Description byte + +// Description enums +const ( + CloseNotify Description = 0 + UnexpectedMessage Description = 10 + BadRecordMac Description = 20 + DecryptionFailed Description = 21 + RecordOverflow Description = 22 + DecompressionFailure Description = 30 + HandshakeFailure Description = 40 + NoCertificate Description = 41 + BadCertificate Description = 42 + UnsupportedCertificate Description = 43 + CertificateRevoked Description = 44 + CertificateExpired Description = 45 + CertificateUnknown Description = 46 + IllegalParameter Description = 47 + UnknownCA Description = 48 + AccessDenied Description = 49 + DecodeError Description = 50 + DecryptError Description = 51 + ExportRestriction Description = 60 + ProtocolVersion Description = 70 + InsufficientSecurity Description = 71 + InternalError Description = 80 + UserCanceled Description = 90 + NoRenegotiation Description = 100 + UnsupportedExtension Description = 110 + NoApplicationProtocol Description = 120 +) + +func (d Description) String() string { + switch d { + case CloseNotify: + return "CloseNotify" + case UnexpectedMessage: + return "UnexpectedMessage" + case BadRecordMac: + return "BadRecordMac" + case DecryptionFailed: + return "DecryptionFailed" + case RecordOverflow: + return "RecordOverflow" + case DecompressionFailure: + return "DecompressionFailure" + case HandshakeFailure: + return "HandshakeFailure" + case NoCertificate: + return "NoCertificate" + case BadCertificate: + return "BadCertificate" + case UnsupportedCertificate: + return "UnsupportedCertificate" + case CertificateRevoked: + return "CertificateRevoked" + case CertificateExpired: + return "CertificateExpired" + case CertificateUnknown: + return "CertificateUnknown" + case IllegalParameter: + return "IllegalParameter" + case UnknownCA: + return "UnknownCA" + case AccessDenied: + return "AccessDenied" + case DecodeError: + return "DecodeError" + case DecryptError: + return "DecryptError" + case ExportRestriction: + return "ExportRestriction" + case ProtocolVersion: + return "ProtocolVersion" + case InsufficientSecurity: + return "InsufficientSecurity" + case InternalError: + return "InternalError" + case UserCanceled: + return "UserCanceled" + case NoRenegotiation: + return "NoRenegotiation" + case UnsupportedExtension: + return "UnsupportedExtension" + case NoApplicationProtocol: + return "NoApplicationProtocol" + default: + return "Invalid alert description" + } +} + +// Alert is one of the content types supported by the TLS record layer. +// Alert messages convey the severity of the message +// (warning or fatal) and a description of the alert. Alert messages +// with a level of fatal result in the immediate termination of the +// connection. In this case, other connections corresponding to the +// session may continue, but the session identifier MUST be invalidated, +// preventing the failed session from being used to establish new +// connections. Like other messages, alert messages are encrypted and +// compressed, as specified by the current connection state. +// https://tools.ietf.org/html/rfc5246#section-7.2 +type Alert struct { + Level Level + Description Description +} + +// ContentType returns the ContentType of this Content +func (a Alert) ContentType() protocol.ContentType { + return protocol.ContentTypeAlert +} + +// Marshal returns the encoded alert +func (a *Alert) Marshal() ([]byte, error) { + return []byte{byte(a.Level), byte(a.Description)}, nil +} + +// Unmarshal populates the alert from binary data +func (a *Alert) Unmarshal(data []byte) error { + if len(data) != 2 { + return errBufferTooSmall + } + + a.Level = Level(data[0]) + a.Description = Description(data[1]) + return nil +} + +func (a *Alert) String() string { + return fmt.Sprintf("Alert %s: %s", a.Level, a.Description) +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go new file mode 100644 index 000000000..e5fd6f549 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/application_data.go @@ -0,0 +1,26 @@ +package protocol + +// ApplicationData messages are carried by the record layer and are +// fragmented, compressed, and encrypted based on the current connection +// state. The messages are treated as transparent data to the record +// layer. +// https://tools.ietf.org/html/rfc5246#section-10 +type ApplicationData struct { + Data []byte +} + +// ContentType returns the ContentType of this content +func (a ApplicationData) ContentType() ContentType { + return ContentTypeApplicationData +} + +// Marshal encodes the ApplicationData to binary +func (a *ApplicationData) Marshal() ([]byte, error) { + return append([]byte{}, a.Data...), nil +} + +// Unmarshal populates the ApplicationData from binary +func (a *ApplicationData) Unmarshal(data []byte) error { + a.Data = append([]byte{}, data...) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go new file mode 100644 index 000000000..b42647a05 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/change_cipher_spec.go @@ -0,0 +1,27 @@ +package protocol + +// ChangeCipherSpec protocol exists to signal transitions in +// ciphering strategies. The protocol consists of a single message, +// which is encrypted and compressed under the current (not the pending) +// connection state. The message consists of a single byte of value 1. +// https://tools.ietf.org/html/rfc5246#section-7.1 +type ChangeCipherSpec struct{} + +// ContentType returns the ContentType of this content +func (c ChangeCipherSpec) ContentType() ContentType { + return ContentTypeChangeCipherSpec +} + +// Marshal encodes the ChangeCipherSpec to binary +func (c *ChangeCipherSpec) Marshal() ([]byte, error) { + return []byte{0x01}, nil +} + +// Unmarshal populates the ChangeCipherSpec from binary +func (c *ChangeCipherSpec) Unmarshal(data []byte) error { + if len(data) == 1 && data[0] == 0x01 { + return nil + } + + return errInvalidCipherSpec +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go new file mode 100644 index 000000000..678e816cb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/compression_method.go @@ -0,0 +1,48 @@ +package protocol + +// CompressionMethodID is the ID for a CompressionMethod +type CompressionMethodID byte + +const ( + compressionMethodNull CompressionMethodID = 0 +) + +// CompressionMethod represents a TLS Compression Method +type CompressionMethod struct { + ID CompressionMethodID +} + +// CompressionMethods returns all supported CompressionMethods +func CompressionMethods() map[CompressionMethodID]*CompressionMethod { + return map[CompressionMethodID]*CompressionMethod{ + compressionMethodNull: {ID: compressionMethodNull}, + } +} + +// DecodeCompressionMethods the given compression methods +func DecodeCompressionMethods(buf []byte) ([]*CompressionMethod, error) { + if len(buf) < 1 { + return nil, errBufferTooSmall + } + compressionMethodsCount := int(buf[0]) + c := []*CompressionMethod{} + for i := 0; i < compressionMethodsCount; i++ { + if len(buf) <= i+1 { + return nil, errBufferTooSmall + } + id := CompressionMethodID(buf[i+1]) + if compressionMethod, ok := CompressionMethods()[id]; ok { + c = append(c, compressionMethod) + } + } + return c, nil +} + +// EncodeCompressionMethods the given compression methods +func EncodeCompressionMethods(c []*CompressionMethod) []byte { + out := []byte{byte(len(c))} + for i := len(c); i > 0; i-- { + out = append(out, byte(c[i-1].ID)) + } + return out +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go new file mode 100644 index 000000000..47e5c96bb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/content.go @@ -0,0 +1,21 @@ +package protocol + +// ContentType represents the IANA Registered ContentTypes +// +// https://tools.ietf.org/html/rfc4346#section-6.2.1 +type ContentType uint8 + +// ContentType enums +const ( + ContentTypeChangeCipherSpec ContentType = 20 + ContentTypeAlert ContentType = 21 + ContentTypeHandshake ContentType = 22 + ContentTypeApplicationData ContentType = 23 +) + +// Content is the top level distinguisher for a DTLS Datagram +type Content interface { + ContentType() ContentType + Marshal() ([]byte, error) + Unmarshal(data []byte) error +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go new file mode 100644 index 000000000..fbeb0774c --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/errors.go @@ -0,0 +1,106 @@ +package protocol + +import ( + "errors" + "fmt" + "net" +) + +var ( + errBufferTooSmall = &TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errInvalidCipherSpec = &FatalError{Err: errors.New("cipher spec invalid")} //nolint:goerr113 +) + +// FatalError indicates that the DTLS connection is no longer available. +// It is mainly caused by wrong configuration of server or client. +type FatalError struct { + Err error +} + +// InternalError indicates and internal error caused by the implementation, and the DTLS connection is no longer available. +// It is mainly caused by bugs or tried to use unimplemented features. +type InternalError struct { + Err error +} + +// TemporaryError indicates that the DTLS connection is still available, but the request was failed temporary. +type TemporaryError struct { + Err error +} + +// TimeoutError indicates that the request was timed out. +type TimeoutError struct { + Err error +} + +// HandshakeError indicates that the handshake failed. +type HandshakeError struct { + Err error +} + +// Timeout implements net.Error.Timeout() +func (*FatalError) Timeout() bool { return false } + +// Temporary implements net.Error.Temporary() +func (*FatalError) Temporary() bool { return false } + +// Unwrap implements Go1.13 error unwrapper. +func (e *FatalError) Unwrap() error { return e.Err } + +func (e *FatalError) Error() string { return fmt.Sprintf("dtls fatal: %v", e.Err) } + +// Timeout implements net.Error.Timeout() +func (*InternalError) Timeout() bool { return false } + +// Temporary implements net.Error.Temporary() +func (*InternalError) Temporary() bool { return false } + +// Unwrap implements Go1.13 error unwrapper. +func (e *InternalError) Unwrap() error { return e.Err } + +func (e *InternalError) Error() string { return fmt.Sprintf("dtls internal: %v", e.Err) } + +// Timeout implements net.Error.Timeout() +func (*TemporaryError) Timeout() bool { return false } + +// Temporary implements net.Error.Temporary() +func (*TemporaryError) Temporary() bool { return true } + +// Unwrap implements Go1.13 error unwrapper. +func (e *TemporaryError) Unwrap() error { return e.Err } + +func (e *TemporaryError) Error() string { return fmt.Sprintf("dtls temporary: %v", e.Err) } + +// Timeout implements net.Error.Timeout() +func (*TimeoutError) Timeout() bool { return true } + +// Temporary implements net.Error.Temporary() +func (*TimeoutError) Temporary() bool { return true } + +// Unwrap implements Go1.13 error unwrapper. +func (e *TimeoutError) Unwrap() error { return e.Err } + +func (e *TimeoutError) Error() string { return fmt.Sprintf("dtls timeout: %v", e.Err) } + +// Timeout implements net.Error.Timeout() +func (e *HandshakeError) Timeout() bool { + var netErr net.Error + if errors.As(e.Err, &netErr) { + return netErr.Timeout() + } + return false +} + +// Temporary implements net.Error.Temporary() +func (e *HandshakeError) Temporary() bool { + var netErr net.Error + if errors.As(e.Err, &netErr) { + return netErr.Temporary() //nolint + } + return false +} + +// Unwrap implements Go1.13 error unwrapper. +func (e *HandshakeError) Unwrap() error { return e.Err } + +func (e *HandshakeError) Error() string { return fmt.Sprintf("handshake error: %v", e.Err) } diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go new file mode 100644 index 000000000..8d7e1123e --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/alpn.go @@ -0,0 +1,77 @@ +package extension + +import ( + "golang.org/x/crypto/cryptobyte" +) + +// ALPN is a TLS extension for application-layer protocol negotiation within +// the TLS handshake. +// +// https://tools.ietf.org/html/rfc7301 +type ALPN struct { + ProtocolNameList []string +} + +// TypeValue returns the extension TypeValue +func (a ALPN) TypeValue() TypeValue { + return ALPNTypeValue +} + +// Marshal encodes the extension +func (a *ALPN) Marshal() ([]byte, error) { + var b cryptobyte.Builder + b.AddUint16(uint16(a.TypeValue())) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + for _, proto := range a.ProtocolNameList { + p := proto // Satisfy range scope lint + b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(p)) + }) + } + }) + }) + return b.Bytes() +} + +// Unmarshal populates the extension from encoded data +func (a *ALPN) Unmarshal(data []byte) error { + val := cryptobyte.String(data) + + var extension uint16 + val.ReadUint16(&extension) + if TypeValue(extension) != a.TypeValue() { + return errInvalidExtensionType + } + + var extData cryptobyte.String + val.ReadUint16LengthPrefixed(&extData) + + var protoList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { + return ErrALPNInvalidFormat + } + for !protoList.Empty() { + var proto cryptobyte.String + if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() { + return ErrALPNInvalidFormat + } + a.ProtocolNameList = append(a.ProtocolNameList, string(proto)) + } + return nil +} + +// ALPNProtocolSelection negotiates a shared protocol according to #3.2 of rfc7301 +func ALPNProtocolSelection(supportedProtocols, peerSupportedProtocols []string) (string, error) { + if len(supportedProtocols) == 0 || len(peerSupportedProtocols) == 0 { + return "", nil + } + for _, s := range supportedProtocols { + for _, c := range peerSupportedProtocols { + if s == c { + return s, nil + } + } + } + return "", errALPNNoAppProto +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go new file mode 100644 index 000000000..82d8b3408 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/errors.go @@ -0,0 +1,17 @@ +package extension + +import ( + "errors" + + "github.com/pion/dtls/v2/pkg/protocol" +) + +var ( + // ErrALPNInvalidFormat is raised when the ALPN format is invalid + ErrALPNInvalidFormat = &protocol.FatalError{Err: errors.New("invalid alpn format")} //nolint:goerr113 + errALPNNoAppProto = &protocol.FatalError{Err: errors.New("no application protocol")} //nolint:goerr113 + errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errInvalidExtensionType = &protocol.FatalError{Err: errors.New("invalid extension type")} //nolint:goerr113 + errInvalidSNIFormat = &protocol.FatalError{Err: errors.New("invalid server name format")} //nolint:goerr113 + errLengthMismatch = &protocol.InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113 +) diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go new file mode 100644 index 000000000..ec4c1ff5c --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/extension.go @@ -0,0 +1,99 @@ +// Package extension implements the extension values in the ClientHello/ServerHello +package extension + +import "encoding/binary" + +// TypeValue is the 2 byte value for a TLS Extension as registered in the IANA +// +// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml +type TypeValue uint16 + +// TypeValue constants +const ( + ServerNameTypeValue TypeValue = 0 + SupportedEllipticCurvesTypeValue TypeValue = 10 + SupportedPointFormatsTypeValue TypeValue = 11 + SupportedSignatureAlgorithmsTypeValue TypeValue = 13 + UseSRTPTypeValue TypeValue = 14 + ALPNTypeValue TypeValue = 16 + UseExtendedMasterSecretTypeValue TypeValue = 23 + RenegotiationInfoTypeValue TypeValue = 65281 +) + +// Extension represents a single TLS extension +type Extension interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + TypeValue() TypeValue +} + +// Unmarshal many extensions at once +func Unmarshal(buf []byte) ([]Extension, error) { + switch { + case len(buf) == 0: + return []Extension{}, nil + case len(buf) < 2: + return nil, errBufferTooSmall + } + + declaredLen := binary.BigEndian.Uint16(buf) + if len(buf)-2 != int(declaredLen) { + return nil, errLengthMismatch + } + + extensions := []Extension{} + unmarshalAndAppend := func(data []byte, e Extension) error { + err := e.Unmarshal(data) + if err != nil { + return err + } + extensions = append(extensions, e) + return nil + } + + for offset := 2; offset < len(buf); { + if len(buf) < (offset + 2) { + return nil, errBufferTooSmall + } + var err error + switch TypeValue(binary.BigEndian.Uint16(buf[offset:])) { + case ServerNameTypeValue: + err = unmarshalAndAppend(buf[offset:], &ServerName{}) + case SupportedEllipticCurvesTypeValue: + err = unmarshalAndAppend(buf[offset:], &SupportedEllipticCurves{}) + case UseSRTPTypeValue: + err = unmarshalAndAppend(buf[offset:], &UseSRTP{}) + case ALPNTypeValue: + err = unmarshalAndAppend(buf[offset:], &ALPN{}) + case UseExtendedMasterSecretTypeValue: + err = unmarshalAndAppend(buf[offset:], &UseExtendedMasterSecret{}) + case RenegotiationInfoTypeValue: + err = unmarshalAndAppend(buf[offset:], &RenegotiationInfo{}) + default: + } + if err != nil { + return nil, err + } + if len(buf) < (offset + 4) { + return nil, errBufferTooSmall + } + extensionLength := binary.BigEndian.Uint16(buf[offset+2:]) + offset += (4 + int(extensionLength)) + } + return extensions, nil +} + +// Marshal many extensions at once +func Marshal(e []Extension) ([]byte, error) { + extensions := []byte{} + for _, e := range e { + raw, err := e.Marshal() + if err != nil { + return nil, err + } + extensions = append(extensions, raw...) + } + out := []byte{0x00, 0x00} + binary.BigEndian.PutUint16(out, uint16(len(extensions))) + return append(out, extensions...), nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go new file mode 100644 index 000000000..8378c3d94 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/renegotiation_info.go @@ -0,0 +1,43 @@ +package extension + +import "encoding/binary" + +const ( + renegotiationInfoHeaderSize = 5 +) + +// RenegotiationInfo allows a Client/Server to +// communicate their renegotation support +// +// https://tools.ietf.org/html/rfc5746 +type RenegotiationInfo struct { + RenegotiatedConnection uint8 +} + +// TypeValue returns the extension TypeValue +func (r RenegotiationInfo) TypeValue() TypeValue { + return RenegotiationInfoTypeValue +} + +// Marshal encodes the extension +func (r *RenegotiationInfo) Marshal() ([]byte, error) { + out := make([]byte, renegotiationInfoHeaderSize) + + binary.BigEndian.PutUint16(out, uint16(r.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(1)) // length + out[4] = r.RenegotiatedConnection + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (r *RenegotiationInfo) Unmarshal(data []byte) error { + if len(data) < renegotiationInfoHeaderSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != r.TypeValue() { + return errInvalidExtensionType + } + + r.RenegotiatedConnection = data[4] + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go new file mode 100644 index 000000000..9a1cc2926 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/server_name.go @@ -0,0 +1,78 @@ +package extension + +import ( + "strings" + + "golang.org/x/crypto/cryptobyte" +) + +const serverNameTypeDNSHostName = 0 + +// ServerName allows the client to inform the server the specific +// name it wishes to contact. Useful if multiple DNS names resolve +// to one IP +// +// https://tools.ietf.org/html/rfc6066#section-3 +type ServerName struct { + ServerName string +} + +// TypeValue returns the extension TypeValue +func (s ServerName) TypeValue() TypeValue { + return ServerNameTypeValue +} + +// Marshal encodes the extension +func (s *ServerName) Marshal() ([]byte, error) { + var b cryptobyte.Builder + b.AddUint16(uint16(s.TypeValue())) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddUint8(serverNameTypeDNSHostName) + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte(s.ServerName)) + }) + }) + }) + return b.Bytes() +} + +// Unmarshal populates the extension from encoded data +func (s *ServerName) Unmarshal(data []byte) error { + val := cryptobyte.String(data) + var extension uint16 + val.ReadUint16(&extension) + if TypeValue(extension) != s.TypeValue() { + return errInvalidExtensionType + } + + var extData cryptobyte.String + val.ReadUint16LengthPrefixed(&extData) + + var nameList cryptobyte.String + if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() { + return errInvalidSNIFormat + } + for !nameList.Empty() { + var nameType uint8 + var serverName cryptobyte.String + if !nameList.ReadUint8(&nameType) || + !nameList.ReadUint16LengthPrefixed(&serverName) || + serverName.Empty() { + return errInvalidSNIFormat + } + if nameType != serverNameTypeDNSHostName { + continue + } + if len(s.ServerName) != 0 { + // Multiple names of the same name_type are prohibited. + return errInvalidSNIFormat + } + s.ServerName = string(serverName) + // An SNI value may not include a trailing dot. + if strings.HasSuffix(s.ServerName, ".") { + return errInvalidSNIFormat + } + } + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go new file mode 100644 index 000000000..2c4d1d4a6 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/srtp_protection_profile.go @@ -0,0 +1,21 @@ +package extension + +// SRTPProtectionProfile defines the parameters and options that are in effect for the SRTP processing +// https://tools.ietf.org/html/rfc5764#section-4.1.2 +type SRTPProtectionProfile uint16 + +const ( + SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = 0x0001 // nolint + SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = 0x0002 // nolint + SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = 0x0007 // nolint + SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = 0x0008 // nolint +) + +func srtpProtectionProfiles() map[SRTPProtectionProfile]bool { + return map[SRTPProtectionProfile]bool{ + SRTP_AES128_CM_HMAC_SHA1_80: true, + SRTP_AES128_CM_HMAC_SHA1_32: true, + SRTP_AEAD_AES_128_GCM: true, + SRTP_AEAD_AES_256_GCM: true, + } +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go new file mode 100644 index 000000000..8f077fcc7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_elliptic_curves.go @@ -0,0 +1,62 @@ +package extension + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" +) + +const ( + supportedGroupsHeaderSize = 6 +) + +// SupportedEllipticCurves allows a Client/Server to communicate +// what curves they both support +// +// https://tools.ietf.org/html/rfc8422#section-5.1.1 +type SupportedEllipticCurves struct { + EllipticCurves []elliptic.Curve +} + +// TypeValue returns the extension TypeValue +func (s SupportedEllipticCurves) TypeValue() TypeValue { + return SupportedEllipticCurvesTypeValue +} + +// Marshal encodes the extension +func (s *SupportedEllipticCurves) Marshal() ([]byte, error) { + out := make([]byte, supportedGroupsHeaderSize) + + binary.BigEndian.PutUint16(out, uint16(s.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(2+(len(s.EllipticCurves)*2))) + binary.BigEndian.PutUint16(out[4:], uint16(len(s.EllipticCurves)*2)) + + for _, v := range s.EllipticCurves { + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(v)) + } + + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (s *SupportedEllipticCurves) Unmarshal(data []byte) error { + if len(data) <= supportedGroupsHeaderSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() { + return errInvalidExtensionType + } + + groupCount := int(binary.BigEndian.Uint16(data[4:]) / 2) + if supportedGroupsHeaderSize+(groupCount*2) > len(data) { + return errLengthMismatch + } + + for i := 0; i < groupCount; i++ { + supportedGroupID := elliptic.Curve(binary.BigEndian.Uint16(data[(supportedGroupsHeaderSize + (i * 2)):])) + if _, ok := elliptic.Curves()[supportedGroupID]; ok { + s.EllipticCurves = append(s.EllipticCurves, supportedGroupID) + } + } + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go new file mode 100644 index 000000000..873d07827 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_point_formats.go @@ -0,0 +1,62 @@ +package extension + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" +) + +const ( + supportedPointFormatsSize = 5 +) + +// SupportedPointFormats allows a Client/Server to negotiate +// the EllipticCurvePointFormats +// +// https://tools.ietf.org/html/rfc4492#section-5.1.2 +type SupportedPointFormats struct { + PointFormats []elliptic.CurvePointFormat +} + +// TypeValue returns the extension TypeValue +func (s SupportedPointFormats) TypeValue() TypeValue { + return SupportedPointFormatsTypeValue +} + +// Marshal encodes the extension +func (s *SupportedPointFormats) Marshal() ([]byte, error) { + out := make([]byte, supportedPointFormatsSize) + + binary.BigEndian.PutUint16(out, uint16(s.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(1+(len(s.PointFormats)))) + out[4] = byte(len(s.PointFormats)) + + for _, v := range s.PointFormats { + out = append(out, byte(v)) + } + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (s *SupportedPointFormats) Unmarshal(data []byte) error { + if len(data) <= supportedPointFormatsSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() { + return errInvalidExtensionType + } + + pointFormatCount := int(binary.BigEndian.Uint16(data[4:])) + if supportedGroupsHeaderSize+(pointFormatCount) > len(data) { + return errLengthMismatch + } + + for i := 0; i < pointFormatCount; i++ { + p := elliptic.CurvePointFormat(data[supportedPointFormatsSize+i]) + switch p { + case elliptic.CurvePointFormatUncompressed: + s.PointFormats = append(s.PointFormats, p) + default: + } + } + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go new file mode 100644 index 000000000..ee284f6e1 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/supported_signature_algorithms.go @@ -0,0 +1,70 @@ +package extension + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/signature" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" +) + +const ( + supportedSignatureAlgorithmsHeaderSize = 6 +) + +// SupportedSignatureAlgorithms allows a Client/Server to +// negotiate what SignatureHash Algorithms they both support +// +// https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 +type SupportedSignatureAlgorithms struct { + SignatureHashAlgorithms []signaturehash.Algorithm +} + +// TypeValue returns the extension TypeValue +func (s SupportedSignatureAlgorithms) TypeValue() TypeValue { + return SupportedSignatureAlgorithmsTypeValue +} + +// Marshal encodes the extension +func (s *SupportedSignatureAlgorithms) Marshal() ([]byte, error) { + out := make([]byte, supportedSignatureAlgorithmsHeaderSize) + + binary.BigEndian.PutUint16(out, uint16(s.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(2+(len(s.SignatureHashAlgorithms)*2))) + binary.BigEndian.PutUint16(out[4:], uint16(len(s.SignatureHashAlgorithms)*2)) + for _, v := range s.SignatureHashAlgorithms { + out = append(out, []byte{0x00, 0x00}...) + out[len(out)-2] = byte(v.Hash) + out[len(out)-1] = byte(v.Signature) + } + + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (s *SupportedSignatureAlgorithms) Unmarshal(data []byte) error { + if len(data) <= supportedSignatureAlgorithmsHeaderSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != s.TypeValue() { + return errInvalidExtensionType + } + + algorithmCount := int(binary.BigEndian.Uint16(data[4:]) / 2) + if supportedSignatureAlgorithmsHeaderSize+(algorithmCount*2) > len(data) { + return errLengthMismatch + } + for i := 0; i < algorithmCount; i++ { + supportedHashAlgorithm := hash.Algorithm(data[supportedSignatureAlgorithmsHeaderSize+(i*2)]) + supportedSignatureAlgorithm := signature.Algorithm(data[supportedSignatureAlgorithmsHeaderSize+(i*2)+1]) + if _, ok := hash.Algorithms()[supportedHashAlgorithm]; ok { + if _, ok := signature.Algorithms()[supportedSignatureAlgorithm]; ok { + s.SignatureHashAlgorithms = append(s.SignatureHashAlgorithms, signaturehash.Algorithm{ + Hash: supportedHashAlgorithm, + Signature: supportedSignatureAlgorithm, + }) + } + } + } + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go new file mode 100644 index 000000000..04ddc956a --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_master_secret.go @@ -0,0 +1,45 @@ +package extension + +import "encoding/binary" + +const ( + useExtendedMasterSecretHeaderSize = 4 +) + +// UseExtendedMasterSecret defines a TLS extension that contextually binds the +// master secret to a log of the full handshake that computes it, thus +// preventing MITM attacks. +type UseExtendedMasterSecret struct { + Supported bool +} + +// TypeValue returns the extension TypeValue +func (u UseExtendedMasterSecret) TypeValue() TypeValue { + return UseExtendedMasterSecretTypeValue +} + +// Marshal encodes the extension +func (u *UseExtendedMasterSecret) Marshal() ([]byte, error) { + if !u.Supported { + return []byte{}, nil + } + + out := make([]byte, useExtendedMasterSecretHeaderSize) + + binary.BigEndian.PutUint16(out, uint16(u.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(0)) // length + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (u *UseExtendedMasterSecret) Unmarshal(data []byte) error { + if len(data) < useExtendedMasterSecretHeaderSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != u.TypeValue() { + return errInvalidExtensionType + } + + u.Supported = true + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go new file mode 100644 index 000000000..729fa3a98 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/extension/use_srtp.go @@ -0,0 +1,59 @@ +package extension + +import "encoding/binary" + +const ( + useSRTPHeaderSize = 6 +) + +// UseSRTP allows a Client/Server to negotiate what SRTPProtectionProfiles +// they both support +// +// https://tools.ietf.org/html/rfc8422 +type UseSRTP struct { + ProtectionProfiles []SRTPProtectionProfile +} + +// TypeValue returns the extension TypeValue +func (u UseSRTP) TypeValue() TypeValue { + return UseSRTPTypeValue +} + +// Marshal encodes the extension +func (u *UseSRTP) Marshal() ([]byte, error) { + out := make([]byte, useSRTPHeaderSize) + + binary.BigEndian.PutUint16(out, uint16(u.TypeValue())) + binary.BigEndian.PutUint16(out[2:], uint16(2+(len(u.ProtectionProfiles)*2)+ /* MKI Length */ 1)) + binary.BigEndian.PutUint16(out[4:], uint16(len(u.ProtectionProfiles)*2)) + + for _, v := range u.ProtectionProfiles { + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(v)) + } + + out = append(out, 0x00) /* MKI Length */ + return out, nil +} + +// Unmarshal populates the extension from encoded data +func (u *UseSRTP) Unmarshal(data []byte) error { + if len(data) <= useSRTPHeaderSize { + return errBufferTooSmall + } else if TypeValue(binary.BigEndian.Uint16(data)) != u.TypeValue() { + return errInvalidExtensionType + } + + profileCount := int(binary.BigEndian.Uint16(data[4:]) / 2) + if supportedGroupsHeaderSize+(profileCount*2) > len(data) { + return errLengthMismatch + } + + for i := 0; i < profileCount; i++ { + supportedProfile := SRTPProtectionProfile(binary.BigEndian.Uint16(data[(useSRTPHeaderSize + (i * 2)):])) + if _, ok := srtpProtectionProfiles()[supportedProfile]; ok { + u.ProtectionProfiles = append(u.ProtectionProfiles, supportedProfile) + } + } + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go new file mode 100644 index 000000000..e8fbdeae7 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/cipher_suite.go @@ -0,0 +1,29 @@ +package handshake + +import "encoding/binary" + +func decodeCipherSuiteIDs(buf []byte) ([]uint16, error) { + if len(buf) < 2 { + return nil, errBufferTooSmall + } + cipherSuitesCount := int(binary.BigEndian.Uint16(buf[0:])) / 2 + rtrn := make([]uint16, cipherSuitesCount) + for i := 0; i < cipherSuitesCount; i++ { + if len(buf) < (i*2 + 4) { + return nil, errBufferTooSmall + } + + rtrn[i] = binary.BigEndian.Uint16(buf[(i*2)+2:]) + } + return rtrn, nil +} + +func encodeCipherSuiteIDs(cipherSuiteIDs []uint16) []byte { + out := []byte{0x00, 0x00} + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(cipherSuiteIDs)*2)) + for _, id := range cipherSuiteIDs { + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], id) + } + return out +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go new file mode 100644 index 000000000..ac77c0434 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/errors.go @@ -0,0 +1,25 @@ +package handshake + +import ( + "errors" + + "github.com/pion/dtls/v2/pkg/protocol" +) + +// Typed errors +var ( + errUnableToMarshalFragmented = &protocol.InternalError{Err: errors.New("unable to marshal fragmented handshakes")} //nolint:goerr113 + errHandshakeMessageUnset = &protocol.InternalError{Err: errors.New("handshake message unset, unable to marshal")} //nolint:goerr113 + errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errLengthMismatch = &protocol.InternalError{Err: errors.New("data length and declared length do not match")} //nolint:goerr113 + errInvalidClientKeyExchange = &protocol.FatalError{Err: errors.New("unable to determine if ClientKeyExchange is a public key or PSK Identity")} //nolint:goerr113 + errInvalidHashAlgorithm = &protocol.FatalError{Err: errors.New("invalid hash algorithm")} //nolint:goerr113 + errInvalidSignatureAlgorithm = &protocol.FatalError{Err: errors.New("invalid signature algorithm")} //nolint:goerr113 + errCookieTooLong = &protocol.FatalError{Err: errors.New("cookie must not be longer then 255 bytes")} //nolint:goerr113 + errInvalidEllipticCurveType = &protocol.FatalError{Err: errors.New("invalid or unknown elliptic curve type")} //nolint:goerr113 + errInvalidNamedCurve = &protocol.FatalError{Err: errors.New("invalid named curve")} //nolint:goerr113 + errCipherSuiteUnset = &protocol.FatalError{Err: errors.New("server hello can not be created without a cipher suite")} //nolint:goerr113 + errCompressionMethodUnset = &protocol.FatalError{Err: errors.New("server hello can not be created without a compression method")} //nolint:goerr113 + errInvalidCompressionMethod = &protocol.FatalError{Err: errors.New("invalid or unknown compression method")} //nolint:goerr113 + errNotImplemented = &protocol.InternalError{Err: errors.New("feature has not been implemented yet")} //nolint:goerr113 +) diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go new file mode 100644 index 000000000..55f1a7c1c --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/handshake.go @@ -0,0 +1,147 @@ +// Package handshake provides the DTLS wire protocol for handshakes +package handshake + +import ( + "github.com/pion/dtls/v2/internal/ciphersuite/types" + "github.com/pion/dtls/v2/internal/util" + "github.com/pion/dtls/v2/pkg/protocol" +) + +// Type is the unique identifier for each handshake message +// https://tools.ietf.org/html/rfc5246#section-7.4 +type Type uint8 + +// Types of DTLS Handshake messages we know about +const ( + TypeHelloRequest Type = 0 + TypeClientHello Type = 1 + TypeServerHello Type = 2 + TypeHelloVerifyRequest Type = 3 + TypeCertificate Type = 11 + TypeServerKeyExchange Type = 12 + TypeCertificateRequest Type = 13 + TypeServerHelloDone Type = 14 + TypeCertificateVerify Type = 15 + TypeClientKeyExchange Type = 16 + TypeFinished Type = 20 +) + +// String returns the string representation of this type +func (t Type) String() string { + switch t { + case TypeHelloRequest: + return "HelloRequest" + case TypeClientHello: + return "ClientHello" + case TypeServerHello: + return "ServerHello" + case TypeHelloVerifyRequest: + return "HelloVerifyRequest" + case TypeCertificate: + return "TypeCertificate" + case TypeServerKeyExchange: + return "ServerKeyExchange" + case TypeCertificateRequest: + return "CertificateRequest" + case TypeServerHelloDone: + return "ServerHelloDone" + case TypeCertificateVerify: + return "CertificateVerify" + case TypeClientKeyExchange: + return "ClientKeyExchange" + case TypeFinished: + return "Finished" + } + return "" +} + +// Message is the body of a Handshake datagram +type Message interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Type() Type +} + +// Handshake protocol is responsible for selecting a cipher spec and +// generating a master secret, which together comprise the primary +// cryptographic parameters associated with a secure session. The +// handshake protocol can also optionally authenticate parties who have +// certificates signed by a trusted certificate authority. +// https://tools.ietf.org/html/rfc5246#section-7.3 +type Handshake struct { + Header Header + Message Message + + KeyExchangeAlgorithm types.KeyExchangeAlgorithm +} + +// ContentType returns what kind of content this message is carying +func (h Handshake) ContentType() protocol.ContentType { + return protocol.ContentTypeHandshake +} + +// Marshal encodes a handshake into a binary message +func (h *Handshake) Marshal() ([]byte, error) { + if h.Message == nil { + return nil, errHandshakeMessageUnset + } else if h.Header.FragmentOffset != 0 { + return nil, errUnableToMarshalFragmented + } + + msg, err := h.Message.Marshal() + if err != nil { + return nil, err + } + + h.Header.Length = uint32(len(msg)) + h.Header.FragmentLength = h.Header.Length + h.Header.Type = h.Message.Type() + header, err := h.Header.Marshal() + if err != nil { + return nil, err + } + + return append(header, msg...), nil +} + +// Unmarshal decodes a handshake from a binary message +func (h *Handshake) Unmarshal(data []byte) error { + if err := h.Header.Unmarshal(data); err != nil { + return err + } + + reportedLen := util.BigEndianUint24(data[1:]) + if uint32(len(data)-HeaderLength) != reportedLen { + return errLengthMismatch + } else if reportedLen != h.Header.FragmentLength { + return errLengthMismatch + } + + switch Type(data[0]) { + case TypeHelloRequest: + return errNotImplemented + case TypeClientHello: + h.Message = &MessageClientHello{} + case TypeHelloVerifyRequest: + h.Message = &MessageHelloVerifyRequest{} + case TypeServerHello: + h.Message = &MessageServerHello{} + case TypeCertificate: + h.Message = &MessageCertificate{} + case TypeServerKeyExchange: + h.Message = &MessageServerKeyExchange{KeyExchangeAlgorithm: h.KeyExchangeAlgorithm} + case TypeCertificateRequest: + h.Message = &MessageCertificateRequest{} + case TypeServerHelloDone: + h.Message = &MessageServerHelloDone{} + case TypeClientKeyExchange: + h.Message = &MessageClientKeyExchange{KeyExchangeAlgorithm: h.KeyExchangeAlgorithm} + case TypeFinished: + h.Message = &MessageFinished{} + case TypeCertificateVerify: + h.Message = &MessageCertificateVerify{} + default: + return errNotImplemented + } + return h.Message.Unmarshal(data[HeaderLength:]) +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go new file mode 100644 index 000000000..cb6a22489 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/header.go @@ -0,0 +1,50 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/internal/util" +) + +// HeaderLength msg_len for Handshake messages assumes an extra +// 12 bytes for sequence, fragment and version information vs TLS +const HeaderLength = 12 + +// Header is the static first 12 bytes of each RecordLayer +// of type Handshake. These fields allow us to support message loss, reordering, and +// message fragmentation, +// +// https://tools.ietf.org/html/rfc6347#section-4.2.2 +type Header struct { + Type Type + Length uint32 // uint24 in spec + MessageSequence uint16 + FragmentOffset uint32 // uint24 in spec + FragmentLength uint32 // uint24 in spec +} + +// Marshal encodes the Header +func (h *Header) Marshal() ([]byte, error) { + out := make([]byte, HeaderLength) + + out[0] = byte(h.Type) + util.PutBigEndianUint24(out[1:], h.Length) + binary.BigEndian.PutUint16(out[4:], h.MessageSequence) + util.PutBigEndianUint24(out[6:], h.FragmentOffset) + util.PutBigEndianUint24(out[9:], h.FragmentLength) + return out, nil +} + +// Unmarshal populates the header from encoded data +func (h *Header) Unmarshal(data []byte) error { + if len(data) < HeaderLength { + return errBufferTooSmall + } + + h.Type = Type(data[0]) + h.Length = util.BigEndianUint24(data[1:]) + h.MessageSequence = binary.BigEndian.Uint16(data[4:]) + h.FragmentOffset = util.BigEndianUint24(data[6:]) + h.FragmentLength = util.BigEndianUint24(data[9:]) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go new file mode 100644 index 000000000..05fb74656 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate.go @@ -0,0 +1,66 @@ +package handshake + +import ( + "github.com/pion/dtls/v2/internal/util" +) + +// MessageCertificate is a DTLS Handshake Message +// it can contain either a Client or Server Certificate +// +// https://tools.ietf.org/html/rfc5246#section-7.4.2 +type MessageCertificate struct { + Certificate [][]byte +} + +// Type returns the Handshake Type +func (m MessageCertificate) Type() Type { + return TypeCertificate +} + +const ( + handshakeMessageCertificateLengthFieldSize = 3 +) + +// Marshal encodes the Handshake +func (m *MessageCertificate) Marshal() ([]byte, error) { + out := make([]byte, handshakeMessageCertificateLengthFieldSize) + + for _, r := range m.Certificate { + // Certificate Length + out = append(out, make([]byte, handshakeMessageCertificateLengthFieldSize)...) + util.PutBigEndianUint24(out[len(out)-handshakeMessageCertificateLengthFieldSize:], uint32(len(r))) + + // Certificate body + out = append(out, append([]byte{}, r...)...) + } + + // Total Payload Size + util.PutBigEndianUint24(out[0:], uint32(len(out[handshakeMessageCertificateLengthFieldSize:]))) + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageCertificate) Unmarshal(data []byte) error { + if len(data) < handshakeMessageCertificateLengthFieldSize { + return errBufferTooSmall + } + + if certificateBodyLen := int(util.BigEndianUint24(data)); certificateBodyLen+handshakeMessageCertificateLengthFieldSize != len(data) { + return errLengthMismatch + } + + offset := handshakeMessageCertificateLengthFieldSize + for offset < len(data) { + certificateLen := int(util.BigEndianUint24(data[offset:])) + offset += handshakeMessageCertificateLengthFieldSize + + if offset+certificateLen > len(data) { + return errLengthMismatch + } + + m.Certificate = append(m.Certificate, append([]byte{}, data[offset:offset+certificateLen]...)) + offset += certificateLen + } + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go new file mode 100644 index 000000000..86d687f09 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_request.go @@ -0,0 +1,141 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/crypto/clientcertificate" + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/signature" + "github.com/pion/dtls/v2/pkg/crypto/signaturehash" +) + +/* +MessageCertificateRequest is so a non-anonymous server can optionally +request a certificate from the client, if appropriate for the selected cipher +suite. This message, if sent, will immediately follow the ServerKeyExchange +message (if it is sent; otherwise, this message follows the +server's Certificate message). + +https://tools.ietf.org/html/rfc5246#section-7.4.4 +*/ +type MessageCertificateRequest struct { + CertificateTypes []clientcertificate.Type + SignatureHashAlgorithms []signaturehash.Algorithm + CertificateAuthoritiesNames [][]byte +} + +const ( + messageCertificateRequestMinLength = 5 +) + +// Type returns the Handshake Type +func (m MessageCertificateRequest) Type() Type { + return TypeCertificateRequest +} + +// Marshal encodes the Handshake +func (m *MessageCertificateRequest) Marshal() ([]byte, error) { + out := []byte{byte(len(m.CertificateTypes))} + for _, v := range m.CertificateTypes { + out = append(out, byte(v)) + } + + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(m.SignatureHashAlgorithms)*2)) + for _, v := range m.SignatureHashAlgorithms { + out = append(out, byte(v.Hash)) + out = append(out, byte(v.Signature)) + } + + // Distinguished Names + casLength := 0 + for _, ca := range m.CertificateAuthoritiesNames { + casLength += len(ca) + 2 + } + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(casLength)) + if casLength > 0 { + for _, ca := range m.CertificateAuthoritiesNames { + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(ca))) + out = append(out, ca...) + } + } + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageCertificateRequest) Unmarshal(data []byte) error { + if len(data) < messageCertificateRequestMinLength { + return errBufferTooSmall + } + + offset := 0 + certificateTypesLength := int(data[0]) + offset++ + + if (offset + certificateTypesLength) > len(data) { + return errBufferTooSmall + } + + for i := 0; i < certificateTypesLength; i++ { + certType := clientcertificate.Type(data[offset+i]) + if _, ok := clientcertificate.Types()[certType]; ok { + m.CertificateTypes = append(m.CertificateTypes, certType) + } + } + offset += certificateTypesLength + if len(data) < offset+2 { + return errBufferTooSmall + } + signatureHashAlgorithmsLength := int(binary.BigEndian.Uint16(data[offset:])) + offset += 2 + + if (offset + signatureHashAlgorithmsLength) > len(data) { + return errBufferTooSmall + } + + for i := 0; i < signatureHashAlgorithmsLength; i += 2 { + if len(data) < (offset + i + 2) { + return errBufferTooSmall + } + h := hash.Algorithm(data[offset+i]) + s := signature.Algorithm(data[offset+i+1]) + + if _, ok := hash.Algorithms()[h]; !ok { + continue + } else if _, ok := signature.Algorithms()[s]; !ok { + continue + } + m.SignatureHashAlgorithms = append(m.SignatureHashAlgorithms, signaturehash.Algorithm{Signature: s, Hash: h}) + } + + offset += signatureHashAlgorithmsLength + if len(data) < offset+2 { + return errBufferTooSmall + } + casLength := int(binary.BigEndian.Uint16(data[offset:])) + offset += 2 + if (offset + casLength) > len(data) { + return errBufferTooSmall + } + cas := make([]byte, casLength) + copy(cas, data[offset:offset+casLength]) + m.CertificateAuthoritiesNames = nil + for len(cas) > 0 { + if len(cas) < 2 { + return errBufferTooSmall + } + caLen := binary.BigEndian.Uint16(cas) + cas = cas[2:] + + if len(cas) < int(caLen) { + return errBufferTooSmall + } + + m.CertificateAuthoritiesNames = append(m.CertificateAuthoritiesNames, cas[:caLen]) + cas = cas[caLen:] + } + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go new file mode 100644 index 000000000..fb5e4639d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_certificate_verify.go @@ -0,0 +1,61 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/signature" +) + +// MessageCertificateVerify provide explicit verification of a +// client certificate. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.8 +type MessageCertificateVerify struct { + HashAlgorithm hash.Algorithm + SignatureAlgorithm signature.Algorithm + Signature []byte +} + +const handshakeMessageCertificateVerifyMinLength = 4 + +// Type returns the Handshake Type +func (m MessageCertificateVerify) Type() Type { + return TypeCertificateVerify +} + +// Marshal encodes the Handshake +func (m *MessageCertificateVerify) Marshal() ([]byte, error) { + out := make([]byte, 1+1+2+len(m.Signature)) + + out[0] = byte(m.HashAlgorithm) + out[1] = byte(m.SignatureAlgorithm) + binary.BigEndian.PutUint16(out[2:], uint16(len(m.Signature))) + copy(out[4:], m.Signature) + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageCertificateVerify) Unmarshal(data []byte) error { + if len(data) < handshakeMessageCertificateVerifyMinLength { + return errBufferTooSmall + } + + m.HashAlgorithm = hash.Algorithm(data[0]) + if _, ok := hash.Algorithms()[m.HashAlgorithm]; !ok { + return errInvalidHashAlgorithm + } + + m.SignatureAlgorithm = signature.Algorithm(data[1]) + if _, ok := signature.Algorithms()[m.SignatureAlgorithm]; !ok { + return errInvalidSignatureAlgorithm + } + + signatureLength := int(binary.BigEndian.Uint16(data[2:])) + if (signatureLength + 4) != len(data) { + return errBufferTooSmall + } + + m.Signature = append([]byte{}, data[4:]...) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go new file mode 100644 index 000000000..1deca38aa --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_hello.go @@ -0,0 +1,138 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/extension" +) + +/* +MessageClientHello is for when a client first connects to a server it is +required to send the client hello as its first message. The client can also send a +client hello in response to a hello request or on its own +initiative in order to renegotiate the security parameters in an +existing connection. +*/ +type MessageClientHello struct { + Version protocol.Version + Random Random + Cookie []byte + + SessionID []byte + + CipherSuiteIDs []uint16 + CompressionMethods []*protocol.CompressionMethod + Extensions []extension.Extension +} + +const handshakeMessageClientHelloVariableWidthStart = 34 + +// Type returns the Handshake Type +func (m MessageClientHello) Type() Type { + return TypeClientHello +} + +// Marshal encodes the Handshake +func (m *MessageClientHello) Marshal() ([]byte, error) { + if len(m.Cookie) > 255 { + return nil, errCookieTooLong + } + + out := make([]byte, handshakeMessageClientHelloVariableWidthStart) + out[0] = m.Version.Major + out[1] = m.Version.Minor + + rand := m.Random.MarshalFixed() + copy(out[2:], rand[:]) + + out = append(out, byte(len(m.SessionID))) + out = append(out, m.SessionID...) + + out = append(out, byte(len(m.Cookie))) + out = append(out, m.Cookie...) + out = append(out, encodeCipherSuiteIDs(m.CipherSuiteIDs)...) + out = append(out, protocol.EncodeCompressionMethods(m.CompressionMethods)...) + + extensions, err := extension.Marshal(m.Extensions) + if err != nil { + return nil, err + } + + return append(out, extensions...), nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageClientHello) Unmarshal(data []byte) error { + if len(data) < 2+RandomLength { + return errBufferTooSmall + } + + m.Version.Major = data[0] + m.Version.Minor = data[1] + + var random [RandomLength]byte + copy(random[:], data[2:]) + m.Random.UnmarshalFixed(random) + + // rest of packet has variable width sections + currOffset := handshakeMessageClientHelloVariableWidthStart + + currOffset++ + if len(data) <= currOffset { + return errBufferTooSmall + } + n := int(data[currOffset-1]) + if len(data) <= currOffset+n { + return errBufferTooSmall + } + m.SessionID = append([]byte{}, data[currOffset:currOffset+n]...) + currOffset += len(m.SessionID) + + currOffset++ + if len(data) <= currOffset { + return errBufferTooSmall + } + n = int(data[currOffset-1]) + if len(data) <= currOffset+n { + return errBufferTooSmall + } + m.Cookie = append([]byte{}, data[currOffset:currOffset+n]...) + currOffset += len(m.Cookie) + + // Cipher Suites + if len(data) < currOffset { + return errBufferTooSmall + } + cipherSuiteIDs, err := decodeCipherSuiteIDs(data[currOffset:]) + if err != nil { + return err + } + m.CipherSuiteIDs = cipherSuiteIDs + if len(data) < currOffset+2 { + return errBufferTooSmall + } + currOffset += int(binary.BigEndian.Uint16(data[currOffset:])) + 2 + + // Compression Methods + if len(data) < currOffset { + return errBufferTooSmall + } + compressionMethods, err := protocol.DecodeCompressionMethods(data[currOffset:]) + if err != nil { + return err + } + m.CompressionMethods = compressionMethods + if len(data) < currOffset { + return errBufferTooSmall + } + currOffset += int(data[currOffset]) + 1 + + // Extensions + extensions, err := extension.Unmarshal(data[currOffset:]) + if err != nil { + return err + } + m.Extensions = extensions + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go new file mode 100644 index 000000000..34c5c48ef --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_client_key_exchange.go @@ -0,0 +1,78 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/internal/ciphersuite/types" +) + +// MessageClientKeyExchange is a DTLS Handshake Message +// With this message, the premaster secret is set, either by direct +// transmission of the RSA-encrypted secret or by the transmission of +// Diffie-Hellman parameters that will allow each side to agree upon +// the same premaster secret. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.7 +type MessageClientKeyExchange struct { + IdentityHint []byte + PublicKey []byte + + // for unmarshaling + KeyExchangeAlgorithm types.KeyExchangeAlgorithm +} + +// Type returns the Handshake Type +func (m MessageClientKeyExchange) Type() Type { + return TypeClientKeyExchange +} + +// Marshal encodes the Handshake +func (m *MessageClientKeyExchange) Marshal() (out []byte, err error) { + if m.IdentityHint == nil && m.PublicKey == nil { + return nil, errInvalidClientKeyExchange + } + + if m.IdentityHint != nil { + out = append([]byte{0x00, 0x00}, m.IdentityHint...) + binary.BigEndian.PutUint16(out, uint16(len(out)-2)) + } + + if m.PublicKey != nil { + out = append(out, byte(len(m.PublicKey))) + out = append(out, m.PublicKey...) + } + + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageClientKeyExchange) Unmarshal(data []byte) error { + switch { + case len(data) < 2: + return errBufferTooSmall + case m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmNone: + return errCipherSuiteUnset + } + + offset := 0 + if m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmPsk) { + pskLength := int(binary.BigEndian.Uint16(data)) + if pskLength > len(data)-2 { + return errBufferTooSmall + } + + m.IdentityHint = append([]byte{}, data[2:pskLength+2]...) + offset += pskLength + 2 + } + + if m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmEcdhe) { + publicKeyLength := int(data[offset]) + if publicKeyLength > len(data)-1-offset { + return errBufferTooSmall + } + + m.PublicKey = append([]byte{}, data[offset+1:]...) + } + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go new file mode 100644 index 000000000..c65d42abb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_finished.go @@ -0,0 +1,27 @@ +package handshake + +// MessageFinished is a DTLS Handshake Message +// this message is the first one protected with the just +// negotiated algorithms, keys, and secrets. Recipients of Finished +// messages MUST verify that the contents are correct. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.9 +type MessageFinished struct { + VerifyData []byte +} + +// Type returns the Handshake Type +func (m MessageFinished) Type() Type { + return TypeFinished +} + +// Marshal encodes the Handshake +func (m *MessageFinished) Marshal() ([]byte, error) { + return append([]byte{}, m.VerifyData...), nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageFinished) Unmarshal(data []byte) error { + m.VerifyData = append([]byte{}, data...) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go new file mode 100644 index 000000000..20c63773d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_hello_verify_request.go @@ -0,0 +1,62 @@ +package handshake + +import ( + "github.com/pion/dtls/v2/pkg/protocol" +) + +// MessageHelloVerifyRequest is as follows: +// +// struct { +// ProtocolVersion server_version; +// opaque cookie<0..2^8-1>; +// } HelloVerifyRequest; +// +// The HelloVerifyRequest message type is hello_verify_request(3). +// +// When the client sends its ClientHello message to the server, the server +// MAY respond with a HelloVerifyRequest message. This message contains +// a stateless cookie generated using the technique of [PHOTURIS]. The +// client MUST retransmit the ClientHello with the cookie added. +// +// https://tools.ietf.org/html/rfc6347#section-4.2.1 +type MessageHelloVerifyRequest struct { + Version protocol.Version + Cookie []byte +} + +// Type returns the Handshake Type +func (m MessageHelloVerifyRequest) Type() Type { + return TypeHelloVerifyRequest +} + +// Marshal encodes the Handshake +func (m *MessageHelloVerifyRequest) Marshal() ([]byte, error) { + if len(m.Cookie) > 255 { + return nil, errCookieTooLong + } + + out := make([]byte, 3+len(m.Cookie)) + out[0] = m.Version.Major + out[1] = m.Version.Minor + out[2] = byte(len(m.Cookie)) + copy(out[3:], m.Cookie) + + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageHelloVerifyRequest) Unmarshal(data []byte) error { + if len(data) < 3 { + return errBufferTooSmall + } + m.Version.Major = data[0] + m.Version.Minor = data[1] + cookieLength := int(data[2]) + if len(data) < cookieLength+3 { + return errBufferTooSmall + } + m.Cookie = make([]byte, cookieLength) + + copy(m.Cookie, data[3:3+cookieLength]) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go new file mode 100644 index 000000000..b4157e25d --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello.go @@ -0,0 +1,119 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/extension" +) + +// MessageServerHello is sent in response to a ClientHello +// message when it was able to find an acceptable set of algorithms. +// If it cannot find such a match, it will respond with a handshake +// failure alert. +// +// https://tools.ietf.org/html/rfc5246#section-7.4.1.3 +type MessageServerHello struct { + Version protocol.Version + Random Random + + SessionID []byte + + CipherSuiteID *uint16 + CompressionMethod *protocol.CompressionMethod + Extensions []extension.Extension +} + +const messageServerHelloVariableWidthStart = 2 + RandomLength + +// Type returns the Handshake Type +func (m MessageServerHello) Type() Type { + return TypeServerHello +} + +// Marshal encodes the Handshake +func (m *MessageServerHello) Marshal() ([]byte, error) { + if m.CipherSuiteID == nil { + return nil, errCipherSuiteUnset + } else if m.CompressionMethod == nil { + return nil, errCompressionMethodUnset + } + + out := make([]byte, messageServerHelloVariableWidthStart) + out[0] = m.Version.Major + out[1] = m.Version.Minor + + rand := m.Random.MarshalFixed() + copy(out[2:], rand[:]) + + out = append(out, byte(len(m.SessionID))) + out = append(out, m.SessionID...) + + out = append(out, []byte{0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], *m.CipherSuiteID) + + out = append(out, byte(m.CompressionMethod.ID)) + + extensions, err := extension.Marshal(m.Extensions) + if err != nil { + return nil, err + } + + return append(out, extensions...), nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageServerHello) Unmarshal(data []byte) error { + if len(data) < 2+RandomLength { + return errBufferTooSmall + } + + m.Version.Major = data[0] + m.Version.Minor = data[1] + + var random [RandomLength]byte + copy(random[:], data[2:]) + m.Random.UnmarshalFixed(random) + + currOffset := messageServerHelloVariableWidthStart + currOffset++ + if len(data) <= currOffset { + return errBufferTooSmall + } + + n := int(data[currOffset-1]) + if len(data) <= currOffset+n { + return errBufferTooSmall + } + m.SessionID = append([]byte{}, data[currOffset:currOffset+n]...) + currOffset += len(m.SessionID) + + if len(data) < currOffset+2 { + return errBufferTooSmall + } + m.CipherSuiteID = new(uint16) + *m.CipherSuiteID = binary.BigEndian.Uint16(data[currOffset:]) + currOffset += 2 + + if len(data) <= currOffset { + return errBufferTooSmall + } + if compressionMethod, ok := protocol.CompressionMethods()[protocol.CompressionMethodID(data[currOffset])]; ok { + m.CompressionMethod = compressionMethod + currOffset++ + } else { + return errInvalidCompressionMethod + } + + if len(data) <= currOffset { + m.Extensions = []extension.Extension{} + return nil + } + + extensions, err := extension.Unmarshal(data[currOffset:]) + if err != nil { + return err + } + m.Extensions = extensions + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go new file mode 100644 index 000000000..0f65b198e --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_hello_done.go @@ -0,0 +1,21 @@ +package handshake + +// MessageServerHelloDone is final non-encrypted message from server +// this communicates server has sent all its handshake messages and next +// should be MessageFinished +type MessageServerHelloDone struct{} + +// Type returns the Handshake Type +func (m MessageServerHelloDone) Type() Type { + return TypeServerHelloDone +} + +// Marshal encodes the Handshake +func (m *MessageServerHelloDone) Marshal() ([]byte, error) { + return []byte{}, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageServerHelloDone) Unmarshal(data []byte) error { + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go new file mode 100644 index 000000000..e84734a73 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/message_server_key_exchange.go @@ -0,0 +1,145 @@ +package handshake + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/internal/ciphersuite/types" + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/hash" + "github.com/pion/dtls/v2/pkg/crypto/signature" +) + +// MessageServerKeyExchange supports ECDH and PSK +type MessageServerKeyExchange struct { + IdentityHint []byte + + EllipticCurveType elliptic.CurveType + NamedCurve elliptic.Curve + PublicKey []byte + HashAlgorithm hash.Algorithm + SignatureAlgorithm signature.Algorithm + Signature []byte + + // for unmarshaling + KeyExchangeAlgorithm types.KeyExchangeAlgorithm +} + +// Type returns the Handshake Type +func (m MessageServerKeyExchange) Type() Type { + return TypeServerKeyExchange +} + +// Marshal encodes the Handshake +func (m *MessageServerKeyExchange) Marshal() ([]byte, error) { + var out []byte + if m.IdentityHint != nil { + out = append([]byte{0x00, 0x00}, m.IdentityHint...) + binary.BigEndian.PutUint16(out, uint16(len(out)-2)) + } + + if m.EllipticCurveType == 0 || len(m.PublicKey) == 0 { + return out, nil + } + out = append(out, byte(m.EllipticCurveType), 0x00, 0x00) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(m.NamedCurve)) + + out = append(out, byte(len(m.PublicKey))) + out = append(out, m.PublicKey...) + switch { + case m.HashAlgorithm != hash.None && len(m.Signature) == 0: + return nil, errInvalidHashAlgorithm + case m.HashAlgorithm == hash.None && len(m.Signature) > 0: + return nil, errInvalidHashAlgorithm + case m.SignatureAlgorithm == signature.Anonymous && (m.HashAlgorithm != hash.None || len(m.Signature) > 0): + return nil, errInvalidSignatureAlgorithm + case m.SignatureAlgorithm == signature.Anonymous: + return out, nil + } + + out = append(out, []byte{byte(m.HashAlgorithm), byte(m.SignatureAlgorithm), 0x00, 0x00}...) + binary.BigEndian.PutUint16(out[len(out)-2:], uint16(len(m.Signature))) + out = append(out, m.Signature...) + + return out, nil +} + +// Unmarshal populates the message from encoded data +func (m *MessageServerKeyExchange) Unmarshal(data []byte) error { + switch { + case len(data) < 2: + return errBufferTooSmall + case m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmNone: + return errCipherSuiteUnset + } + + hintLength := binary.BigEndian.Uint16(data) + if int(hintLength) <= len(data)-2 && m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmPsk) { + m.IdentityHint = append([]byte{}, data[2:2+hintLength]...) + data = data[2+hintLength:] + } + if m.KeyExchangeAlgorithm == types.KeyExchangeAlgorithmPsk { + if len(data) == 0 { + return nil + } + return errLengthMismatch + } + + if !m.KeyExchangeAlgorithm.Has(types.KeyExchangeAlgorithmEcdhe) { + return errLengthMismatch + } + + if _, ok := elliptic.CurveTypes()[elliptic.CurveType(data[0])]; ok { + m.EllipticCurveType = elliptic.CurveType(data[0]) + } else { + return errInvalidEllipticCurveType + } + + if len(data[1:]) < 2 { + return errBufferTooSmall + } + m.NamedCurve = elliptic.Curve(binary.BigEndian.Uint16(data[1:3])) + if _, ok := elliptic.Curves()[m.NamedCurve]; !ok { + return errInvalidNamedCurve + } + if len(data) < 4 { + return errBufferTooSmall + } + + publicKeyLength := int(data[3]) + offset := 4 + publicKeyLength + if len(data) < offset { + return errBufferTooSmall + } + m.PublicKey = append([]byte{}, data[4:offset]...) + + // Anon connection doesn't contains hashAlgorithm, signatureAlgorithm, signature + if len(data) == offset { + return nil + } else if len(data) <= offset { + return errBufferTooSmall + } + + m.HashAlgorithm = hash.Algorithm(data[offset]) + if _, ok := hash.Algorithms()[m.HashAlgorithm]; !ok { + return errInvalidHashAlgorithm + } + offset++ + if len(data) <= offset { + return errBufferTooSmall + } + m.SignatureAlgorithm = signature.Algorithm(data[offset]) + if _, ok := signature.Algorithms()[m.SignatureAlgorithm]; !ok { + return errInvalidSignatureAlgorithm + } + offset++ + if len(data) < offset+2 { + return errBufferTooSmall + } + signatureLength := int(binary.BigEndian.Uint16(data[offset:])) + offset += 2 + if len(data) < offset+signatureLength { + return errBufferTooSmall + } + m.Signature = append([]byte{}, data[offset:offset+signatureLength]...) + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go new file mode 100644 index 000000000..0ade936eb --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/handshake/random.go @@ -0,0 +1,49 @@ +package handshake + +import ( + "crypto/rand" + "encoding/binary" + "time" +) + +// Consts for Random in Handshake +const ( + RandomBytesLength = 28 + RandomLength = RandomBytesLength + 4 +) + +// Random value that is used in ClientHello and ServerHello +// +// https://tools.ietf.org/html/rfc4346#section-7.4.1.2 +type Random struct { + GMTUnixTime time.Time + RandomBytes [RandomBytesLength]byte +} + +// MarshalFixed encodes the Handshake +func (r *Random) MarshalFixed() [RandomLength]byte { + var out [RandomLength]byte + + binary.BigEndian.PutUint32(out[0:], uint32(r.GMTUnixTime.Unix())) + copy(out[4:], r.RandomBytes[:]) + + return out +} + +// UnmarshalFixed populates the message from encoded data +func (r *Random) UnmarshalFixed(data [RandomLength]byte) { + r.GMTUnixTime = time.Unix(int64(binary.BigEndian.Uint32(data[0:])), 0) + copy(r.RandomBytes[:], data[4:]) +} + +// Populate fills the handshakeRandom with random values +// may be called multiple times +func (r *Random) Populate() error { + r.GMTUnixTime = time.Now() + + tmp := make([]byte, RandomBytesLength) + _, err := rand.Read(tmp) + copy(r.RandomBytes[:], tmp) + + return err +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go new file mode 100644 index 000000000..7033d4058 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/errors.go @@ -0,0 +1,16 @@ +// Package recordlayer implements the TLS Record Layer https://tools.ietf.org/html/rfc5246#section-6 +package recordlayer + +import ( + "errors" + + "github.com/pion/dtls/v2/pkg/protocol" +) + +var ( + errBufferTooSmall = &protocol.TemporaryError{Err: errors.New("buffer is too small")} //nolint:goerr113 + errInvalidPacketLength = &protocol.TemporaryError{Err: errors.New("packet length and declared length do not match")} //nolint:goerr113 + errSequenceNumberOverflow = &protocol.InternalError{Err: errors.New("sequence number overflow")} //nolint:goerr113 + errUnsupportedProtocolVersion = &protocol.FatalError{Err: errors.New("unsupported protocol version")} //nolint:goerr113 + errInvalidContentType = &protocol.TemporaryError{Err: errors.New("invalid content type")} //nolint:goerr113 +) diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go new file mode 100644 index 000000000..65047d767 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/header.go @@ -0,0 +1,61 @@ +package recordlayer + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/internal/util" + "github.com/pion/dtls/v2/pkg/protocol" +) + +// Header implements a TLS RecordLayer header +type Header struct { + ContentType protocol.ContentType + ContentLen uint16 + Version protocol.Version + Epoch uint16 + SequenceNumber uint64 // uint48 in spec +} + +// RecordLayer enums +const ( + HeaderSize = 13 + MaxSequenceNumber = 0x0000FFFFFFFFFFFF +) + +// Marshal encodes a TLS RecordLayer Header to binary +func (h *Header) Marshal() ([]byte, error) { + if h.SequenceNumber > MaxSequenceNumber { + return nil, errSequenceNumberOverflow + } + + out := make([]byte, HeaderSize) + out[0] = byte(h.ContentType) + out[1] = h.Version.Major + out[2] = h.Version.Minor + binary.BigEndian.PutUint16(out[3:], h.Epoch) + util.PutBigEndianUint48(out[5:], h.SequenceNumber) + binary.BigEndian.PutUint16(out[HeaderSize-2:], h.ContentLen) + return out, nil +} + +// Unmarshal populates a TLS RecordLayer Header from binary +func (h *Header) Unmarshal(data []byte) error { + if len(data) < HeaderSize { + return errBufferTooSmall + } + h.ContentType = protocol.ContentType(data[0]) + h.Version.Major = data[1] + h.Version.Minor = data[2] + h.Epoch = binary.BigEndian.Uint16(data[3:]) + + // SequenceNumber is stored as uint48, make into uint64 + seqCopy := make([]byte, 8) + copy(seqCopy[2:], data[5:11]) + h.SequenceNumber = binary.BigEndian.Uint64(seqCopy) + + if !h.Version.Equal(protocol.Version1_0) && !h.Version.Equal(protocol.Version1_2) { + return errUnsupportedProtocolVersion + } + + return nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go new file mode 100644 index 000000000..67e5a727b --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/recordlayer/recordlayer.go @@ -0,0 +1,99 @@ +package recordlayer + +import ( + "encoding/binary" + + "github.com/pion/dtls/v2/pkg/protocol" + "github.com/pion/dtls/v2/pkg/protocol/alert" + "github.com/pion/dtls/v2/pkg/protocol/handshake" +) + +// RecordLayer which handles all data transport. +// The record layer is assumed to sit directly on top of some +// reliable transport such as TCP. The record layer can carry four types of content: +// +// 1. Handshake messages—used for algorithm negotiation and key establishment. +// 2. ChangeCipherSpec messages—really part of the handshake but technically a separate kind of message. +// 3. Alert messages—used to signal that errors have occurred +// 4. Application layer data +// +// The DTLS record layer is extremely similar to that of TLS 1.1. The +// only change is the inclusion of an explicit sequence number in the +// record. This sequence number allows the recipient to correctly +// verify the TLS MAC. +// +// https://tools.ietf.org/html/rfc4347#section-4.1 +type RecordLayer struct { + Header Header + Content protocol.Content +} + +// Marshal encodes the RecordLayer to binary +func (r *RecordLayer) Marshal() ([]byte, error) { + contentRaw, err := r.Content.Marshal() + if err != nil { + return nil, err + } + + r.Header.ContentLen = uint16(len(contentRaw)) + r.Header.ContentType = r.Content.ContentType() + + headerRaw, err := r.Header.Marshal() + if err != nil { + return nil, err + } + + return append(headerRaw, contentRaw...), nil +} + +// Unmarshal populates the RecordLayer from binary +func (r *RecordLayer) Unmarshal(data []byte) error { + if len(data) < HeaderSize { + return errBufferTooSmall + } + if err := r.Header.Unmarshal(data); err != nil { + return err + } + + switch protocol.ContentType(data[0]) { + case protocol.ContentTypeChangeCipherSpec: + r.Content = &protocol.ChangeCipherSpec{} + case protocol.ContentTypeAlert: + r.Content = &alert.Alert{} + case protocol.ContentTypeHandshake: + r.Content = &handshake.Handshake{} + case protocol.ContentTypeApplicationData: + r.Content = &protocol.ApplicationData{} + default: + return errInvalidContentType + } + + return r.Content.Unmarshal(data[HeaderSize:]) +} + +// UnpackDatagram extracts all RecordLayer messages from a single datagram. +// Note that as with TLS, multiple handshake messages may be placed in +// the same DTLS record, provided that there is room and that they are +// part of the same flight. Thus, there are two acceptable ways to pack +// two DTLS messages into the same datagram: in the same record or in +// separate records. +// https://tools.ietf.org/html/rfc6347#section-4.2.3 +func UnpackDatagram(buf []byte) ([][]byte, error) { + out := [][]byte{} + + for offset := 0; len(buf) != offset; { + if len(buf)-offset <= HeaderSize { + return nil, errInvalidPacketLength + } + + pktLen := (HeaderSize + int(binary.BigEndian.Uint16(buf[offset+11:]))) + if offset+pktLen > len(buf) { + return nil, errInvalidPacketLength + } + + out = append(out, buf[offset:offset+pktLen]) + offset += pktLen + } + + return out, nil +} diff --git a/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go b/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go new file mode 100644 index 000000000..d5ddb1d00 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/pkg/protocol/version.go @@ -0,0 +1,21 @@ +// Package protocol provides the DTLS wire format +package protocol + +// Version enums +var ( + Version1_0 = Version{Major: 0xfe, Minor: 0xff} //nolint:gochecknoglobals + Version1_2 = Version{Major: 0xfe, Minor: 0xfd} //nolint:gochecknoglobals +) + +// Version is the minor/major value in the RecordLayer +// and ClientHello/ServerHello +// +// https://tools.ietf.org/html/rfc4346#section-6.2.1 +type Version struct { + Major, Minor uint8 +} + +// Equal determines if two protocol versions are equal +func (v Version) Equal(x Version) bool { + return v.Major == x.Major && v.Minor == x.Minor +} diff --git a/vendor/github.com/pion/dtls/v2/renovate.json b/vendor/github.com/pion/dtls/v2/renovate.json new file mode 100644 index 000000000..f1bb98c6a --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/renovate.json @@ -0,0 +1,6 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>pion/renovate-config" + ] +} diff --git a/vendor/github.com/pion/dtls/v2/resume.go b/vendor/github.com/pion/dtls/v2/resume.go new file mode 100644 index 000000000..40e55e449 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/resume.go @@ -0,0 +1,19 @@ +package dtls + +import ( + "context" + "net" +) + +// Resume imports an already established dtls connection using a specific dtls state +func Resume(state *State, conn net.Conn, config *Config) (*Conn, error) { + if err := state.initCipherSuite(); err != nil { + return nil, err + } + c, err := createConn(context.Background(), conn, config, state.isClient, state) + if err != nil { + return nil, err + } + + return c, nil +} diff --git a/vendor/github.com/pion/dtls/v2/session.go b/vendor/github.com/pion/dtls/v2/session.go new file mode 100644 index 000000000..f52120cd8 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/session.go @@ -0,0 +1,21 @@ +package dtls + +// Session store data needed in resumption +type Session struct { + // ID store session id + ID []byte + // Secret store session master secret + Secret []byte +} + +// SessionStore defines methods needed for session resumption. +type SessionStore interface { + // Set save a session. + // For client, use server name as key. + // For server, use session id. + Set(key []byte, s Session) error + // Get fetch a session. + Get(key []byte) (Session, error) + // Del clean saved session. + Del(key []byte) error +} diff --git a/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go new file mode 100644 index 000000000..92cc7c191 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/srtp_protection_profile.go @@ -0,0 +1,14 @@ +package dtls + +import "github.com/pion/dtls/v2/pkg/protocol/extension" + +// SRTPProtectionProfile defines the parameters and options that are in effect for the SRTP processing +// https://tools.ietf.org/html/rfc5764#section-4.1.2 +type SRTPProtectionProfile = extension.SRTPProtectionProfile + +const ( + SRTP_AES128_CM_HMAC_SHA1_80 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_80 // nolint:revive,stylecheck + SRTP_AES128_CM_HMAC_SHA1_32 SRTPProtectionProfile = extension.SRTP_AES128_CM_HMAC_SHA1_32 // nolint:revive,stylecheck + SRTP_AEAD_AES_128_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_128_GCM // nolint:revive,stylecheck + SRTP_AEAD_AES_256_GCM SRTPProtectionProfile = extension.SRTP_AEAD_AES_256_GCM // nolint:revive,stylecheck +) diff --git a/vendor/github.com/pion/dtls/v2/state.go b/vendor/github.com/pion/dtls/v2/state.go new file mode 100644 index 000000000..22c65c744 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/state.go @@ -0,0 +1,215 @@ +package dtls + +import ( + "bytes" + "encoding/gob" + "sync/atomic" + + "github.com/pion/dtls/v2/pkg/crypto/elliptic" + "github.com/pion/dtls/v2/pkg/crypto/prf" + "github.com/pion/dtls/v2/pkg/protocol/handshake" + "github.com/pion/transport/v2/replaydetector" +) + +// State holds the dtls connection state and implements both encoding.BinaryMarshaler and encoding.BinaryUnmarshaler +type State struct { + localEpoch, remoteEpoch atomic.Value + localSequenceNumber []uint64 // uint48 + localRandom, remoteRandom handshake.Random + masterSecret []byte + cipherSuite CipherSuite // nil if a cipherSuite hasn't been chosen + + srtpProtectionProfile SRTPProtectionProfile // Negotiated SRTPProtectionProfile + PeerCertificates [][]byte + IdentityHint []byte + SessionID []byte + + isClient bool + + preMasterSecret []byte + extendedMasterSecret bool + + namedCurve elliptic.Curve + localKeypair *elliptic.Keypair + cookie []byte + handshakeSendSequence int + handshakeRecvSequence int + serverName string + remoteRequestedCertificate bool // Did we get a CertificateRequest + localCertificatesVerify []byte // cache CertificateVerify + localVerifyData []byte // cached VerifyData + localKeySignature []byte // cached keySignature + peerCertificatesVerified bool + + replayDetector []replaydetector.ReplayDetector + + peerSupportedProtocols []string + NegotiatedProtocol string +} + +type serializedState struct { + LocalEpoch uint16 + RemoteEpoch uint16 + LocalRandom [handshake.RandomLength]byte + RemoteRandom [handshake.RandomLength]byte + CipherSuiteID uint16 + MasterSecret []byte + SequenceNumber uint64 + SRTPProtectionProfile uint16 + PeerCertificates [][]byte + IdentityHint []byte + SessionID []byte + IsClient bool +} + +func (s *State) clone() *State { + serialized := s.serialize() + state := &State{} + state.deserialize(*serialized) + + return state +} + +func (s *State) serialize() *serializedState { + // Marshal random values + localRnd := s.localRandom.MarshalFixed() + remoteRnd := s.remoteRandom.MarshalFixed() + + epoch := s.getLocalEpoch() + return &serializedState{ + LocalEpoch: s.getLocalEpoch(), + RemoteEpoch: s.getRemoteEpoch(), + CipherSuiteID: uint16(s.cipherSuite.ID()), + MasterSecret: s.masterSecret, + SequenceNumber: atomic.LoadUint64(&s.localSequenceNumber[epoch]), + LocalRandom: localRnd, + RemoteRandom: remoteRnd, + SRTPProtectionProfile: uint16(s.srtpProtectionProfile), + PeerCertificates: s.PeerCertificates, + IdentityHint: s.IdentityHint, + SessionID: s.SessionID, + IsClient: s.isClient, + } +} + +func (s *State) deserialize(serialized serializedState) { + // Set epoch values + epoch := serialized.LocalEpoch + s.localEpoch.Store(serialized.LocalEpoch) + s.remoteEpoch.Store(serialized.RemoteEpoch) + + for len(s.localSequenceNumber) <= int(epoch) { + s.localSequenceNumber = append(s.localSequenceNumber, uint64(0)) + } + + // Set random values + localRandom := &handshake.Random{} + localRandom.UnmarshalFixed(serialized.LocalRandom) + s.localRandom = *localRandom + + remoteRandom := &handshake.Random{} + remoteRandom.UnmarshalFixed(serialized.RemoteRandom) + s.remoteRandom = *remoteRandom + + s.isClient = serialized.IsClient + + // Set master secret + s.masterSecret = serialized.MasterSecret + + // Set cipher suite + s.cipherSuite = cipherSuiteForID(CipherSuiteID(serialized.CipherSuiteID), nil) + + atomic.StoreUint64(&s.localSequenceNumber[epoch], serialized.SequenceNumber) + s.srtpProtectionProfile = SRTPProtectionProfile(serialized.SRTPProtectionProfile) + + // Set remote certificate + s.PeerCertificates = serialized.PeerCertificates + s.IdentityHint = serialized.IdentityHint + s.SessionID = serialized.SessionID +} + +func (s *State) initCipherSuite() error { + if s.cipherSuite.IsInitialized() { + return nil + } + + localRandom := s.localRandom.MarshalFixed() + remoteRandom := s.remoteRandom.MarshalFixed() + + var err error + if s.isClient { + err = s.cipherSuite.Init(s.masterSecret, localRandom[:], remoteRandom[:], true) + } else { + err = s.cipherSuite.Init(s.masterSecret, remoteRandom[:], localRandom[:], false) + } + if err != nil { + return err + } + return nil +} + +// MarshalBinary is a binary.BinaryMarshaler.MarshalBinary implementation +func (s *State) MarshalBinary() ([]byte, error) { + serialized := s.serialize() + + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(*serialized); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalBinary is a binary.BinaryUnmarshaler.UnmarshalBinary implementation +func (s *State) UnmarshalBinary(data []byte) error { + enc := gob.NewDecoder(bytes.NewBuffer(data)) + var serialized serializedState + if err := enc.Decode(&serialized); err != nil { + return err + } + + s.deserialize(serialized) + if err := s.initCipherSuite(); err != nil { + return err + } + return nil +} + +// ExportKeyingMaterial returns length bytes of exported key material in a new +// slice as defined in RFC 5705. +// This allows protocols to use DTLS for key establishment, but +// then use some of the keying material for their own purposes +func (s *State) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) { + if s.getLocalEpoch() == 0 { + return nil, errHandshakeInProgress + } else if len(context) != 0 { + return nil, errContextUnsupported + } else if _, ok := invalidKeyingLabels()[label]; ok { + return nil, errReservedExportKeyingMaterial + } + + localRandom := s.localRandom.MarshalFixed() + remoteRandom := s.remoteRandom.MarshalFixed() + + seed := []byte(label) + if s.isClient { + seed = append(append(seed, localRandom[:]...), remoteRandom[:]...) + } else { + seed = append(append(seed, remoteRandom[:]...), localRandom[:]...) + } + return prf.PHash(s.masterSecret, seed, length, s.cipherSuite.HashFunc()) +} + +func (s *State) getRemoteEpoch() uint16 { + if remoteEpoch, ok := s.remoteEpoch.Load().(uint16); ok { + return remoteEpoch + } + return 0 +} + +func (s *State) getLocalEpoch() uint16 { + if localEpoch, ok := s.localEpoch.Load().(uint16); ok { + return localEpoch + } + return 0 +} diff --git a/vendor/github.com/pion/dtls/v2/util.go b/vendor/github.com/pion/dtls/v2/util.go new file mode 100644 index 000000000..dda055ab3 --- /dev/null +++ b/vendor/github.com/pion/dtls/v2/util.go @@ -0,0 +1,38 @@ +package dtls + +func findMatchingSRTPProfile(a, b []SRTPProtectionProfile) (SRTPProtectionProfile, bool) { + for _, aProfile := range a { + for _, bProfile := range b { + if aProfile == bProfile { + return aProfile, true + } + } + } + return 0, false +} + +func findMatchingCipherSuite(a, b []CipherSuite) (CipherSuite, bool) { + for _, aSuite := range a { + for _, bSuite := range b { + if aSuite.ID() == bSuite.ID() { + return aSuite, true + } + } + } + return nil, false +} + +func splitBytes(bytes []byte, splitLen int) [][]byte { + splitBytes := make([][]byte, 0) + numBytes := len(bytes) + for i := 0; i < numBytes; i += splitLen { + j := i + splitLen + if j > numBytes { + j = numBytes + } + + splitBytes = append(splitBytes, bytes[i:j]) + } + + return splitBytes +} diff --git a/vendor/github.com/pion/logging/.golangci.yml b/vendor/github.com/pion/logging/.golangci.yml new file mode 100644 index 000000000..ffb0058e6 --- /dev/null +++ b/vendor/github.com/pion/logging/.golangci.yml @@ -0,0 +1,13 @@ +linters-settings: + govet: + check-shadowing: true + misspell: + locale: US + +linters: + enable-all: true + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 50 diff --git a/vendor/github.com/pion/logging/.travis.yml b/vendor/github.com/pion/logging/.travis.yml new file mode 100644 index 000000000..b96a1edb9 --- /dev/null +++ b/vendor/github.com/pion/logging/.travis.yml @@ -0,0 +1,19 @@ +language: go + +go: + - "1.x" # use the latest Go release + +env: + - GO111MODULE=on + +before_script: + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.15.0 + +script: + - golangci-lint run ./... +# - rm -rf examples # Remove examples, no test coverage for them + - go test -coverpkg=$(go list ./... | tr '\n' ',') -coverprofile=cover.out -v -race -covermode=atomic ./... + - bash <(curl -s https://codecov.io/bash) + - bash .github/assert-contributors.sh + - bash .github/lint-disallowed-functions-in-library.sh + - bash .github/lint-commit-message.sh diff --git a/vendor/github.com/pion/logging/LICENSE b/vendor/github.com/pion/logging/LICENSE new file mode 100644 index 000000000..ab602974d --- /dev/null +++ b/vendor/github.com/pion/logging/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pion/logging/README.md b/vendor/github.com/pion/logging/README.md new file mode 100644 index 000000000..c15471d61 --- /dev/null +++ b/vendor/github.com/pion/logging/README.md @@ -0,0 +1,41 @@ +

+
+ Pion Logging +
+

+

The Pion logging library

+

+ Pion transport + Slack Widget +
+ Build Status + GoDoc + Coverage Status + Go Report Card + License: MIT +

+
+ +### Roadmap +The library is used as a part of our WebRTC implementation. Please refer to that [roadmap](https://github.com/pion/webrtc/issues/9) to track our major milestones. + +### Community +Pion has an active community on the [Golang Slack](https://invite.slack.golangbridge.org/). Sign up and join the **#pion** channel for discussions and support. You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion). + +We are always looking to support **your projects**. Please reach out if you have something to build! + +If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly) + +### Contributing +Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible: + +* [John Bradley](https://github.com/kc5nra) - *Original Author* +* [Sean DuBois](https://github.com/Sean-Der) - *Original Author* +* [Michael MacDonald](https://github.com/mjmac) - *Original Author* +* [Woodrow Douglass](https://github.com/wdouglass) - *Test coverage* +* [Michiel De Backker](https://github.com/backkem) - *Docs* +* [Hugo Arregui](https://github.com/hugoArregui) - *Custom Logs* +* [Justin Okamoto](https://github.com/justinokamoto) - *Disabled Logs Update* + +### License +MIT License - see [LICENSE](LICENSE) for full text diff --git a/vendor/github.com/pion/logging/logger.go b/vendor/github.com/pion/logging/logger.go new file mode 100644 index 000000000..35f650581 --- /dev/null +++ b/vendor/github.com/pion/logging/logger.go @@ -0,0 +1,228 @@ +package logging + +import ( + "fmt" + "io" + "log" + "os" + "strings" + "sync" +) + +// Use this abstraction to ensure thread-safe access to the logger's io.Writer +// (which could change at runtime) +type loggerWriter struct { + sync.RWMutex + output io.Writer +} + +func (lw *loggerWriter) SetOutput(output io.Writer) { + lw.Lock() + defer lw.Unlock() + lw.output = output +} + +func (lw *loggerWriter) Write(data []byte) (int, error) { + lw.RLock() + defer lw.RUnlock() + return lw.output.Write(data) +} + +// DefaultLeveledLogger encapsulates functionality for providing logging at +// user-defined levels +type DefaultLeveledLogger struct { + level LogLevel + writer *loggerWriter + trace *log.Logger + debug *log.Logger + info *log.Logger + warn *log.Logger + err *log.Logger +} + +// WithTraceLogger is a chainable configuration function which sets the +// Trace-level logger +func (ll *DefaultLeveledLogger) WithTraceLogger(log *log.Logger) *DefaultLeveledLogger { + ll.trace = log + return ll +} + +// WithDebugLogger is a chainable configuration function which sets the +// Debug-level logger +func (ll *DefaultLeveledLogger) WithDebugLogger(log *log.Logger) *DefaultLeveledLogger { + ll.debug = log + return ll +} + +// WithInfoLogger is a chainable configuration function which sets the +// Info-level logger +func (ll *DefaultLeveledLogger) WithInfoLogger(log *log.Logger) *DefaultLeveledLogger { + ll.info = log + return ll +} + +// WithWarnLogger is a chainable configuration function which sets the +// Warn-level logger +func (ll *DefaultLeveledLogger) WithWarnLogger(log *log.Logger) *DefaultLeveledLogger { + ll.warn = log + return ll +} + +// WithErrorLogger is a chainable configuration function which sets the +// Error-level logger +func (ll *DefaultLeveledLogger) WithErrorLogger(log *log.Logger) *DefaultLeveledLogger { + ll.err = log + return ll +} + +// WithOutput is a chainable configuration function which sets the logger's +// logging output to the supplied io.Writer +func (ll *DefaultLeveledLogger) WithOutput(output io.Writer) *DefaultLeveledLogger { + ll.writer.SetOutput(output) + return ll +} + +func (ll *DefaultLeveledLogger) logf(logger *log.Logger, level LogLevel, format string, args ...interface{}) { + if ll.level.Get() < level { + return + } + + callDepth := 3 // this frame + wrapper func + caller + msg := fmt.Sprintf(format, args...) + if err := logger.Output(callDepth, msg); err != nil { + fmt.Fprintf(os.Stderr, "Unable to log: %s", err) + } +} + +// SetLevel sets the logger's logging level +func (ll *DefaultLeveledLogger) SetLevel(newLevel LogLevel) { + ll.level.Set(newLevel) +} + +// Trace emits the preformatted message if the logger is at or below LogLevelTrace +func (ll *DefaultLeveledLogger) Trace(msg string) { + ll.logf(ll.trace, LogLevelTrace, msg) +} + +// Tracef formats and emits a message if the logger is at or below LogLevelTrace +func (ll *DefaultLeveledLogger) Tracef(format string, args ...interface{}) { + ll.logf(ll.trace, LogLevelTrace, format, args...) +} + +// Debug emits the preformatted message if the logger is at or below LogLevelDebug +func (ll *DefaultLeveledLogger) Debug(msg string) { + ll.logf(ll.debug, LogLevelDebug, msg) +} + +// Debugf formats and emits a message if the logger is at or below LogLevelDebug +func (ll *DefaultLeveledLogger) Debugf(format string, args ...interface{}) { + ll.logf(ll.debug, LogLevelDebug, format, args...) +} + +// Info emits the preformatted message if the logger is at or below LogLevelInfo +func (ll *DefaultLeveledLogger) Info(msg string) { + ll.logf(ll.info, LogLevelInfo, msg) +} + +// Infof formats and emits a message if the logger is at or below LogLevelInfo +func (ll *DefaultLeveledLogger) Infof(format string, args ...interface{}) { + ll.logf(ll.info, LogLevelInfo, format, args...) +} + +// Warn emits the preformatted message if the logger is at or below LogLevelWarn +func (ll *DefaultLeveledLogger) Warn(msg string) { + ll.logf(ll.warn, LogLevelWarn, msg) +} + +// Warnf formats and emits a message if the logger is at or below LogLevelWarn +func (ll *DefaultLeveledLogger) Warnf(format string, args ...interface{}) { + ll.logf(ll.warn, LogLevelWarn, format, args...) +} + +// Error emits the preformatted message if the logger is at or below LogLevelError +func (ll *DefaultLeveledLogger) Error(msg string) { + ll.logf(ll.err, LogLevelError, msg) +} + +// Errorf formats and emits a message if the logger is at or below LogLevelError +func (ll *DefaultLeveledLogger) Errorf(format string, args ...interface{}) { + ll.logf(ll.err, LogLevelError, format, args...) +} + +// NewDefaultLeveledLoggerForScope returns a configured LeveledLogger +func NewDefaultLeveledLoggerForScope(scope string, level LogLevel, writer io.Writer) *DefaultLeveledLogger { + if writer == nil { + writer = os.Stdout + } + logger := &DefaultLeveledLogger{ + writer: &loggerWriter{output: writer}, + level: level, + } + return logger. + WithTraceLogger(log.New(logger.writer, fmt.Sprintf("%s TRACE: ", scope), log.Lmicroseconds|log.Lshortfile)). + WithDebugLogger(log.New(logger.writer, fmt.Sprintf("%s DEBUG: ", scope), log.Lmicroseconds|log.Lshortfile)). + WithInfoLogger(log.New(logger.writer, fmt.Sprintf("%s INFO: ", scope), log.LstdFlags)). + WithWarnLogger(log.New(logger.writer, fmt.Sprintf("%s WARNING: ", scope), log.LstdFlags)). + WithErrorLogger(log.New(logger.writer, fmt.Sprintf("%s ERROR: ", scope), log.LstdFlags)) +} + +// DefaultLoggerFactory define levels by scopes and creates new DefaultLeveledLogger +type DefaultLoggerFactory struct { + Writer io.Writer + DefaultLogLevel LogLevel + ScopeLevels map[string]LogLevel +} + +// NewDefaultLoggerFactory creates a new DefaultLoggerFactory +func NewDefaultLoggerFactory() *DefaultLoggerFactory { + factory := DefaultLoggerFactory{} + factory.DefaultLogLevel = LogLevelError + factory.ScopeLevels = make(map[string]LogLevel) + factory.Writer = os.Stdout + + logLevels := map[string]LogLevel{ + "DISABLE": LogLevelDisabled, + "ERROR": LogLevelError, + "WARN": LogLevelWarn, + "INFO": LogLevelInfo, + "DEBUG": LogLevelDebug, + "TRACE": LogLevelTrace, + } + + for name, level := range logLevels { + env := os.Getenv(fmt.Sprintf("PION_LOG_%s", name)) + + if env == "" { + env = os.Getenv(fmt.Sprintf("PIONS_LOG_%s", name)) + } + + if env == "" { + continue + } + + if strings.ToLower(env) == "all" { + factory.DefaultLogLevel = level + continue + } + + scopes := strings.Split(strings.ToLower(env), ",") + for _, scope := range scopes { + factory.ScopeLevels[scope] = level + } + } + + return &factory +} + +// NewLogger returns a configured LeveledLogger for the given , argsscope +func (f *DefaultLoggerFactory) NewLogger(scope string) LeveledLogger { + logLevel := f.DefaultLogLevel + if f.ScopeLevels != nil { + scopeLevel, found := f.ScopeLevels[scope] + + if found { + logLevel = scopeLevel + } + } + return NewDefaultLeveledLoggerForScope(scope, logLevel, f.Writer) +} diff --git a/vendor/github.com/pion/logging/scoped.go b/vendor/github.com/pion/logging/scoped.go new file mode 100644 index 000000000..678bab426 --- /dev/null +++ b/vendor/github.com/pion/logging/scoped.go @@ -0,0 +1,72 @@ +package logging + +import ( + "sync/atomic" +) + +// LogLevel represents the level at which the logger will emit log messages +type LogLevel int32 + +// Set updates the LogLevel to the supplied value +func (ll *LogLevel) Set(newLevel LogLevel) { + atomic.StoreInt32((*int32)(ll), int32(newLevel)) +} + +// Get retrieves the current LogLevel value +func (ll *LogLevel) Get() LogLevel { + return LogLevel(atomic.LoadInt32((*int32)(ll))) +} + +func (ll LogLevel) String() string { + switch ll { + case LogLevelDisabled: + return "Disabled" + case LogLevelError: + return "Error" + case LogLevelWarn: + return "Warn" + case LogLevelInfo: + return "Info" + case LogLevelDebug: + return "Debug" + case LogLevelTrace: + return "Trace" + default: + return "UNKNOWN" + } +} + +const ( + // LogLevelDisabled completely disables logging of any events + LogLevelDisabled LogLevel = iota + // LogLevelError is for fatal errors which should be handled by user code, + // but are logged to ensure that they are seen + LogLevelError + // LogLevelWarn is for logging abnormal, but non-fatal library operation + LogLevelWarn + // LogLevelInfo is for logging normal library operation (e.g. state transitions, etc.) + LogLevelInfo + // LogLevelDebug is for logging low-level library information (e.g. internal operations) + LogLevelDebug + // LogLevelTrace is for logging very low-level library information (e.g. network traces) + LogLevelTrace +) + +// LeveledLogger is the basic pion Logger interface +type LeveledLogger interface { + Trace(msg string) + Tracef(format string, args ...interface{}) + Debug(msg string) + Debugf(format string, args ...interface{}) + Info(msg string) + Infof(format string, args ...interface{}) + Warn(msg string) + Warnf(format string, args ...interface{}) + Error(msg string) + Errorf(format string, args ...interface{}) +} + +// LoggerFactory is the basic pion LoggerFactory interface +type LoggerFactory interface { + NewLogger(scope string) LeveledLogger +} diff --git a/vendor/github.com/pion/transport/v2/AUTHORS.txt b/vendor/github.com/pion/transport/v2/AUTHORS.txt new file mode 100644 index 000000000..e8bfdeda0 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/AUTHORS.txt @@ -0,0 +1,23 @@ +# Thank you to everyone that made Pion possible. If you are interested in contributing +# we would love to have you https://github.com/pion/webrtc/wiki/Contributing +# +# This file is auto generated, using git to list all individuals contributors. +# see `.github/generate-authors.sh` for the scripting +Adrian Cable +Atsushi Watanabe +backkem +Hugo Arregui +Jeremiah Millay +Jozef Kralik +Juliusz Chroboczek +Luke Curley +Mathis Engelbart +OrlandoCo +Sean DuBois +Sean DuBois +Sean DuBois +Steffen Vogel +Winlin +Woodrow Douglass +Yutaka Takeda +ZHENK diff --git a/vendor/github.com/pion/transport/v2/LICENSE b/vendor/github.com/pion/transport/v2/LICENSE new file mode 100644 index 000000000..ab602974d --- /dev/null +++ b/vendor/github.com/pion/transport/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pion/transport/v2/connctx/connctx.go b/vendor/github.com/pion/transport/v2/connctx/connctx.go new file mode 100644 index 000000000..fc25ee9f0 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/connctx/connctx.go @@ -0,0 +1,172 @@ +// Package connctx wraps net.Conn using context.Context. +package connctx + +import ( + "context" + "errors" + "io" + "net" + "sync" + "sync/atomic" + "time" +) + +// ErrClosing is returned on Write to closed connection. +var ErrClosing = errors.New("use of closed network connection") + +// Reader is an interface for context controlled reader. +type Reader interface { + ReadContext(context.Context, []byte) (int, error) +} + +// Writer is an interface for context controlled writer. +type Writer interface { + WriteContext(context.Context, []byte) (int, error) +} + +// ReadWriter is a composite of ReadWriter. +type ReadWriter interface { + Reader + Writer +} + +// ConnCtx is a wrapper of net.Conn using context.Context. +type ConnCtx interface { + Reader + Writer + io.Closer + LocalAddr() net.Addr + RemoteAddr() net.Addr + Conn() net.Conn +} + +type connCtx struct { + nextConn net.Conn + closed chan struct{} + closeOnce sync.Once + readMu sync.Mutex + writeMu sync.Mutex +} + +var veryOld = time.Unix(0, 1) //nolint:gochecknoglobals + +// New creates a new ConnCtx wrapping given net.Conn. +func New(conn net.Conn) ConnCtx { + c := &connCtx{ + nextConn: conn, + closed: make(chan struct{}), + } + return c +} + +func (c *connCtx) ReadContext(ctx context.Context, b []byte) (int, error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + select { + case <-c.closed: + return 0, io.EOF + default: + } + + done := make(chan struct{}) + var wg sync.WaitGroup + var errSetDeadline atomic.Value + wg.Add(1) + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + // context canceled + if err := c.nextConn.SetReadDeadline(veryOld); err != nil { + errSetDeadline.Store(err) + return + } + <-done + if err := c.nextConn.SetReadDeadline(time.Time{}); err != nil { + errSetDeadline.Store(err) + } + case <-done: + } + }() + + n, err := c.nextConn.Read(b) + + close(done) + wg.Wait() + if e := ctx.Err(); e != nil && n == 0 { + err = e + } + if err2, ok := errSetDeadline.Load().(error); ok && err == nil && err2 != nil { + err = err2 + } + return n, err +} + +func (c *connCtx) WriteContext(ctx context.Context, b []byte) (int, error) { + c.writeMu.Lock() + defer c.writeMu.Unlock() + + select { + case <-c.closed: + return 0, ErrClosing + default: + } + + done := make(chan struct{}) + var wg sync.WaitGroup + var errSetDeadline atomic.Value + wg.Add(1) + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + // context canceled + if err := c.nextConn.SetWriteDeadline(veryOld); err != nil { + errSetDeadline.Store(err) + return + } + <-done + if err := c.nextConn.SetWriteDeadline(time.Time{}); err != nil { + errSetDeadline.Store(err) + } + case <-done: + } + }() + + n, err := c.nextConn.Write(b) + + close(done) + wg.Wait() + if e := ctx.Err(); e != nil && n == 0 { + err = e + } + if err2, ok := errSetDeadline.Load().(error); ok && err == nil && err2 != nil { + err = err2 + } + return n, err +} + +func (c *connCtx) Close() error { + err := c.nextConn.Close() + c.closeOnce.Do(func() { + c.writeMu.Lock() + c.readMu.Lock() + close(c.closed) + c.readMu.Unlock() + c.writeMu.Unlock() + }) + return err +} + +func (c *connCtx) LocalAddr() net.Addr { + return c.nextConn.LocalAddr() +} + +func (c *connCtx) RemoteAddr() net.Addr { + return c.nextConn.RemoteAddr() +} + +func (c *connCtx) Conn() net.Conn { + return c.nextConn +} diff --git a/vendor/github.com/pion/transport/v2/connctx/pipe.go b/vendor/github.com/pion/transport/v2/connctx/pipe.go new file mode 100644 index 000000000..e2f040928 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/connctx/pipe.go @@ -0,0 +1,11 @@ +package connctx + +import ( + "net" +) + +// Pipe creates piped pair of ConnCtx. +func Pipe() (ConnCtx, ConnCtx) { + ca, cb := net.Pipe() + return New(ca), New(cb) +} diff --git a/vendor/github.com/pion/transport/v2/deadline/deadline.go b/vendor/github.com/pion/transport/v2/deadline/deadline.go new file mode 100644 index 000000000..918b18b68 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/deadline/deadline.go @@ -0,0 +1,114 @@ +// Package deadline provides deadline timer used to implement +// net.Conn compatible connection +package deadline + +import ( + "context" + "sync" + "time" +) + +// Deadline signals updatable deadline timer. +// Also, it implements context.Context. +type Deadline struct { + exceeded chan struct{} + stop chan struct{} + stopped chan bool + deadline time.Time + mu sync.RWMutex +} + +// New creates new deadline timer. +func New() *Deadline { + d := &Deadline{ + exceeded: make(chan struct{}), + stop: make(chan struct{}), + stopped: make(chan bool, 1), + } + d.stopped <- true + return d +} + +// Set new deadline. Zero value means no deadline. +func (d *Deadline) Set(t time.Time) { + d.mu.Lock() + defer d.mu.Unlock() + + d.deadline = t + + close(d.stop) + + select { + case <-d.exceeded: + d.exceeded = make(chan struct{}) + default: + stopped := <-d.stopped + if !stopped { + d.exceeded = make(chan struct{}) + } + } + d.stop = make(chan struct{}) + d.stopped = make(chan bool, 1) + + if t.IsZero() { + d.stopped <- true + return + } + + if dur := time.Until(t); dur > 0 { + exceeded := d.exceeded + stopped := d.stopped + go func() { + timer := time.NewTimer(dur) + select { + case <-timer.C: + close(exceeded) + stopped <- false + case <-d.stop: + if !timer.Stop() { + <-timer.C + } + stopped <- true + } + }() + return + } + + close(d.exceeded) + d.stopped <- false +} + +// Done receives deadline signal. +func (d *Deadline) Done() <-chan struct{} { + d.mu.RLock() + defer d.mu.RUnlock() + return d.exceeded +} + +// Err returns context.DeadlineExceeded if the deadline is exceeded. +// Otherwise, it returns nil. +func (d *Deadline) Err() error { + d.mu.RLock() + defer d.mu.RUnlock() + select { + case <-d.exceeded: + return context.DeadlineExceeded + default: + return nil + } +} + +// Deadline returns current deadline. +func (d *Deadline) Deadline() (time.Time, bool) { + d.mu.RLock() + defer d.mu.RUnlock() + if d.deadline.IsZero() { + return d.deadline, false + } + return d.deadline, true +} + +// Value returns nil. +func (d *Deadline) Value(interface{}) interface{} { + return nil +} diff --git a/vendor/github.com/pion/transport/v2/packetio/buffer.go b/vendor/github.com/pion/transport/v2/packetio/buffer.go new file mode 100644 index 000000000..5da0901c6 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/packetio/buffer.go @@ -0,0 +1,347 @@ +// Package packetio provides packet buffer +package packetio + +import ( + "errors" + "io" + "sync" + "time" + + "github.com/pion/transport/v2/deadline" +) + +var errPacketTooBig = errors.New("packet too big") + +// BufferPacketType allow the Buffer to know which packet protocol is writing. +type BufferPacketType int + +const ( + // RTPBufferPacket indicates the Buffer that is handling RTP packets + RTPBufferPacket BufferPacketType = 1 + // RTCPBufferPacket indicates the Buffer that is handling RTCP packets + RTCPBufferPacket BufferPacketType = 2 +) + +// Buffer allows writing packets to an intermediate buffer, which can then be read form. +// This is verify similar to bytes.Buffer but avoids combining multiple writes into a single read. +type Buffer struct { + mutex sync.Mutex + + // this is a circular buffer. If head <= tail, then the useful + // data is in the interval [head, tail[. If tail < head, then + // the useful data is the union of [head, len[ and [0, tail[. + // In order to avoid ambiguity when head = tail, we always leave + // an unused byte in the buffer. + data []byte + head, tail int + + notify chan struct{} + subs bool + closed bool + + count int + limitCount, limitSize int + + readDeadline *deadline.Deadline +} + +const ( + minSize = 2048 + cutoffSize = 128 * 1024 + maxSize = 4 * 1024 * 1024 +) + +// NewBuffer creates a new Buffer. +func NewBuffer() *Buffer { + return &Buffer{ + notify: make(chan struct{}), + readDeadline: deadline.New(), + } +} + +// available returns true if the buffer is large enough to fit a packet +// of the given size, taking overhead into account. +func (b *Buffer) available(size int) bool { + available := b.head - b.tail + if available <= 0 { + available += len(b.data) + } + // we interpret head=tail as empty, so always keep a byte free + if size+2+1 > available { + return false + } + + return true +} + +// grow increases the size of the buffer. If it returns nil, then the +// buffer has been grown. It returns ErrFull if hits a limit. +func (b *Buffer) grow() error { + var newSize int + if len(b.data) < cutoffSize { + newSize = 2 * len(b.data) + } else { + newSize = 5 * len(b.data) / 4 + } + if newSize < minSize { + newSize = minSize + } + if (b.limitSize <= 0 || sizeHardLimit) && newSize > maxSize { + newSize = maxSize + } + + // one byte slack + if b.limitSize > 0 && newSize > b.limitSize+1 { + newSize = b.limitSize + 1 + } + + if newSize <= len(b.data) { + return ErrFull + } + + newData := make([]byte, newSize) + + var n int + if b.head <= b.tail { + // data was contiguous + n = copy(newData, b.data[b.head:b.tail]) + } else { + // data was discontinuous + n = copy(newData, b.data[b.head:]) + n += copy(newData[n:], b.data[:b.tail]) + } + b.head = 0 + b.tail = n + b.data = newData + + return nil +} + +// Write appends a copy of the packet data to the buffer. +// Returns ErrFull if the packet doesn't fit. +// +// Note that the packet size is limited to 65536 bytes since v0.11.0 due to the internal data structure. +func (b *Buffer) Write(packet []byte) (int, error) { + if len(packet) >= 0x10000 { + return 0, errPacketTooBig + } + + b.mutex.Lock() + + if b.closed { + b.mutex.Unlock() + return 0, io.ErrClosedPipe + } + + if (b.limitCount > 0 && b.count >= b.limitCount) || + (b.limitSize > 0 && b.size()+2+len(packet) > b.limitSize) { + b.mutex.Unlock() + return 0, ErrFull + } + + // grow the buffer until the packet fits + for !b.available(len(packet)) { + err := b.grow() + if err != nil { + b.mutex.Unlock() + return 0, err + } + } + + var notify chan struct{} + + if b.subs { + // readers are waiting. Prepare to notify, but only + // actually do it after we release the lock. + notify = b.notify + b.notify = make(chan struct{}) + b.subs = false + } + + // store the length of the packet + b.data[b.tail] = uint8(len(packet) >> 8) + b.tail++ + if b.tail >= len(b.data) { + b.tail = 0 + } + b.data[b.tail] = uint8(len(packet)) + b.tail++ + if b.tail >= len(b.data) { + b.tail = 0 + } + + // store the packet + n := copy(b.data[b.tail:], packet) + b.tail += n + if b.tail >= len(b.data) { + // we reached the end, wrap around + m := copy(b.data, packet[n:]) + b.tail = m + } + b.count++ + b.mutex.Unlock() + + if notify != nil { + close(notify) + } + + return len(packet), nil +} + +// Read populates the given byte slice, returning the number of bytes read. +// Blocks until data is available or the buffer is closed. +// Returns io.ErrShortBuffer is the packet is too small to copy the Write. +// Returns io.EOF if the buffer is closed. +func (b *Buffer) Read(packet []byte) (n int, err error) { + // Return immediately if the deadline is already exceeded. + select { + case <-b.readDeadline.Done(): + return 0, &netError{ErrTimeout, true, true} + default: + } + + for { + b.mutex.Lock() + + if b.head != b.tail { + // decode the packet size + n1 := b.data[b.head] + b.head++ + if b.head >= len(b.data) { + b.head = 0 + } + n2 := b.data[b.head] + b.head++ + if b.head >= len(b.data) { + b.head = 0 + } + count := int((uint16(n1) << 8) | uint16(n2)) + + // determine the number of bytes we'll actually copy + copied := count + if copied > len(packet) { + copied = len(packet) + } + + // copy the data + if b.head+copied < len(b.data) { + copy(packet, b.data[b.head:b.head+copied]) + } else { + k := copy(packet, b.data[b.head:]) + copy(packet[k:], b.data[:copied-k]) + } + + // advance head, discarding any data that wasn't copied + b.head += count + if b.head >= len(b.data) { + b.head -= len(b.data) + } + + if b.head == b.tail { + // the buffer is empty, reset to beginning + // in order to improve cache locality. + b.head = 0 + b.tail = 0 + } + + b.count-- + + b.mutex.Unlock() + + if copied < count { + return copied, io.ErrShortBuffer + } + return copied, nil + } + + if b.closed { + b.mutex.Unlock() + return 0, io.EOF + } + + notify := b.notify + b.subs = true + b.mutex.Unlock() + + select { + case <-b.readDeadline.Done(): + return 0, &netError{ErrTimeout, true, true} + case <-notify: + } + } +} + +// Close the buffer, unblocking any pending reads. +// Data in the buffer can still be read, Read will return io.EOF only when empty. +func (b *Buffer) Close() (err error) { + b.mutex.Lock() + + if b.closed { + b.mutex.Unlock() + return nil + } + + notify := b.notify + b.closed = true + + b.mutex.Unlock() + + close(notify) + + return nil +} + +// Count returns the number of packets in the buffer. +func (b *Buffer) Count() int { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.count +} + +// SetLimitCount controls the maximum number of packets that can be buffered. +// Causes Write to return ErrFull when this limit is reached. +// A zero value will disable this limit. +func (b *Buffer) SetLimitCount(limit int) { + b.mutex.Lock() + defer b.mutex.Unlock() + + b.limitCount = limit +} + +// Size returns the total byte size of packets in the buffer, including +// a small amount of administrative overhead. +func (b *Buffer) Size() int { + b.mutex.Lock() + defer b.mutex.Unlock() + + return b.size() +} + +func (b *Buffer) size() int { + size := b.tail - b.head + if size < 0 { + size += len(b.data) + } + return size +} + +// SetLimitSize controls the maximum number of bytes that can be buffered. +// Causes Write to return ErrFull when this limit is reached. +// A zero value means 4MB since v0.11.0. +// +// User can set packetioSizeHardLimit build tag to enable 4MB hard limit. +// When packetioSizeHardLimit build tag is set, SetLimitSize exceeding +// the hard limit will be silently discarded. +func (b *Buffer) SetLimitSize(limit int) { + b.mutex.Lock() + defer b.mutex.Unlock() + + b.limitSize = limit +} + +// SetReadDeadline sets the deadline for the Read operation. +// Setting to zero means no deadline. +func (b *Buffer) SetReadDeadline(t time.Time) error { + b.readDeadline.Set(t) + return nil +} diff --git a/vendor/github.com/pion/transport/v2/packetio/errors.go b/vendor/github.com/pion/transport/v2/packetio/errors.go new file mode 100644 index 000000000..06f1b9d98 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/packetio/errors.go @@ -0,0 +1,27 @@ +package packetio + +import ( + "errors" +) + +// netError implements net.Error +type netError struct { + error + timeout, temporary bool +} + +func (e *netError) Timeout() bool { + return e.timeout +} + +func (e *netError) Temporary() bool { + return e.temporary +} + +var ( + // ErrFull is returned when the buffer has hit the configured limits. + ErrFull = errors.New("packetio.Buffer is full, discarding write") + + // ErrTimeout is returned when a deadline has expired + ErrTimeout = errors.New("i/o timeout") +) diff --git a/vendor/github.com/pion/transport/v2/packetio/hardlimit.go b/vendor/github.com/pion/transport/v2/packetio/hardlimit.go new file mode 100644 index 000000000..9f4fc5e74 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/packetio/hardlimit.go @@ -0,0 +1,6 @@ +//go:build packetioSizeHardlimit +// +build packetioSizeHardlimit + +package packetio + +const sizeHardLimit = true diff --git a/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go b/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go new file mode 100644 index 000000000..869e0b667 --- /dev/null +++ b/vendor/github.com/pion/transport/v2/packetio/no_hardlimit.go @@ -0,0 +1,6 @@ +//go:build !packetioSizeHardlimit +// +build !packetioSizeHardlimit + +package packetio + +const sizeHardLimit = false diff --git a/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go b/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go new file mode 100644 index 000000000..a571a1aad --- /dev/null +++ b/vendor/github.com/pion/transport/v2/replaydetector/fixedbig.go @@ -0,0 +1,78 @@ +package replaydetector + +import ( + "fmt" +) + +// fixedBigInt is the fix-sized multi-word integer. +type fixedBigInt struct { + bits []uint64 + n uint + msbMask uint64 +} + +// newFixedBigInt creates a new fix-sized multi-word int. +func newFixedBigInt(n uint) *fixedBigInt { + chunkSize := (n + 63) / 64 + if chunkSize == 0 { + chunkSize = 1 + } + return &fixedBigInt{ + bits: make([]uint64, chunkSize), + n: n, + msbMask: (1 << (64 - n%64)) - 1, + } +} + +// Lsh is the left shift operation. +func (s *fixedBigInt) Lsh(n uint) { + if n == 0 { + return + } + nChunk := int(n / 64) + nN := n % 64 + + for i := len(s.bits) - 1; i >= 0; i-- { + var carry uint64 + if i-nChunk >= 0 { + carry = s.bits[i-nChunk] << nN + if i-nChunk-1 >= 0 { + carry |= s.bits[i-nChunk-1] >> (64 - nN) + } + } + s.bits[i] = (s.bits[i] << n) | carry + } + s.bits[len(s.bits)-1] &= s.msbMask +} + +// Bit returns i-th bit of the fixedBigInt. +func (s *fixedBigInt) Bit(i uint) uint { + if i >= s.n { + return 0 + } + chunk := i / 64 + pos := i % 64 + if s.bits[chunk]&(1<= s.n { + return + } + chunk := i / 64 + pos := i % 64 + s.bits[chunk] |= 1 << pos +} + +// String returns string representation of fixedBigInt. +func (s *fixedBigInt) String() string { + var out string + for i := len(s.bits) - 1; i >= 0; i-- { + out += fmt.Sprintf("%016X", s.bits[i]) + } + return out +} diff --git a/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go b/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go new file mode 100644 index 000000000..297f9f3ae --- /dev/null +++ b/vendor/github.com/pion/transport/v2/replaydetector/replaydetector.go @@ -0,0 +1,116 @@ +// Package replaydetector provides packet replay detection algorithm. +package replaydetector + +// ReplayDetector is the interface of sequence replay detector. +type ReplayDetector interface { + // Check returns true if given sequence number is not replayed. + // Call accept() to mark the packet is received properly. + Check(seq uint64) (accept func(), ok bool) +} + +type slidingWindowDetector struct { + latestSeq uint64 + maxSeq uint64 + windowSize uint + mask *fixedBigInt +} + +// New creates ReplayDetector. +// Created ReplayDetector doesn't allow wrapping. +// It can handle monotonically increasing sequence number up to +// full 64bit number. It is suitable for DTLS replay protection. +func New(windowSize uint, maxSeq uint64) ReplayDetector { + return &slidingWindowDetector{ + maxSeq: maxSeq, + windowSize: windowSize, + mask: newFixedBigInt(windowSize), + } +} + +func (d *slidingWindowDetector) Check(seq uint64) (accept func(), ok bool) { + if seq > d.maxSeq { + // Exceeded upper limit. + return func() {}, false + } + + if seq <= d.latestSeq { + if d.latestSeq >= uint64(d.windowSize)+seq { + return func() {}, false + } + if d.mask.Bit(uint(d.latestSeq-seq)) != 0 { + // The sequence number is duplicated. + return func() {}, false + } + } + + return func() { + if seq > d.latestSeq { + // Update the head of the window. + d.mask.Lsh(uint(seq - d.latestSeq)) + d.latestSeq = seq + } + diff := (d.latestSeq - seq) % d.maxSeq + d.mask.SetBit(uint(diff)) + }, true +} + +// WithWrap creates ReplayDetector allowing sequence wrapping. +// This is suitable for short bit width counter like SRTP and SRTCP. +func WithWrap(windowSize uint, maxSeq uint64) ReplayDetector { + return &wrappedSlidingWindowDetector{ + maxSeq: maxSeq, + windowSize: windowSize, + mask: newFixedBigInt(windowSize), + } +} + +type wrappedSlidingWindowDetector struct { + latestSeq uint64 + maxSeq uint64 + windowSize uint + mask *fixedBigInt + init bool +} + +func (d *wrappedSlidingWindowDetector) Check(seq uint64) (accept func(), ok bool) { + if seq > d.maxSeq { + // Exceeded upper limit. + return func() {}, false + } + if !d.init { + if seq != 0 { + d.latestSeq = seq - 1 + } else { + d.latestSeq = d.maxSeq + } + d.init = true + } + + diff := int64(d.latestSeq) - int64(seq) + // Wrap the number. + if diff > int64(d.maxSeq)/2 { + diff -= int64(d.maxSeq + 1) + } else if diff <= -int64(d.maxSeq)/2 { + diff += int64(d.maxSeq + 1) + } + + if diff >= int64(d.windowSize) { + // Too old. + return func() {}, false + } + if diff >= 0 { + if d.mask.Bit(uint(diff)) != 0 { + // The sequence number is duplicated. + return func() {}, false + } + } + + return func() { + if diff < 0 { + // Update the head of the window. + d.mask.Lsh(uint(-diff)) + d.latestSeq = seq + } + d.mask.SetBit(uint(d.latestSeq - seq)) + }, true +} diff --git a/vendor/github.com/pion/udp/.gitignore b/vendor/github.com/pion/udp/.gitignore new file mode 100644 index 000000000..f977e7485 --- /dev/null +++ b/vendor/github.com/pion/udp/.gitignore @@ -0,0 +1,25 @@ +### JetBrains IDE ### +##################### +.idea/ + +### Emacs Temporary Files ### +############################# +*~ + +### Folders ### +############### +bin/ +vendor/ +node_modules/ + +### Files ### +############# +*.ivf +*.ogg +tags +cover.out +*.sw[poe] +*.wasm +examples/sfu-ws/cert.pem +examples/sfu-ws/key.pem +wasm_exec.js diff --git a/vendor/github.com/pion/udp/.golangci.yml b/vendor/github.com/pion/udp/.golangci.yml new file mode 100644 index 000000000..48696f16b --- /dev/null +++ b/vendor/github.com/pion/udp/.golangci.yml @@ -0,0 +1,116 @@ +linters-settings: + govet: + check-shadowing: true + misspell: + locale: US + exhaustive: + default-signifies-exhaustive: true + gomodguard: + blocked: + modules: + - github.com/pkg/errors: + recommendations: + - errors + +linters: + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bidichk # Checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - contextcheck # check the function whether use a non-inherited context + - decorder # check declaration order and count of types, constants, variables and functions + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + - dupl # Tool for code clone detection + - durationcheck # check for two durations multiplied together + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. + - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - forcetypeassert # finds forced type assertions + - gci # Gci control golang package import order and make it always deterministic. + - gochecknoglobals # Checks that no globals are present in Go code + - gochecknoinits # Checks that no init functions are present in Go code + - gocognit # Computes and checks the cognitive complexity of functions + - goconst # Finds repeated strings that could be replaced by a constant + - gocritic # The most opinionated Go source code linter + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - gofumpt # Gofumpt checks whether code was gofumpt-ed. + - goheader # Checks is file header matches to pattern + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosec # Inspects source code for security problems + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - grouper # An analyzer to analyze expression groups. + - importas # Enforces consistent import aliases + - ineffassign # Detects when assignments to existing variables are not used + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. + - noctx # noctx finds sending http request without context.Context + - predeclared # find code that shadows one of Go's predeclared identifiers + - revive # golint replacement, finds style mistakes + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - stylecheck # Stylecheck is a replacement for golint + - tagliatelle # Checks the struct tags. + - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unparam # Reports unused function parameters + - unused # Checks Go code for unused constants, variables, functions and types + - wastedassign # wastedassign finds wasted assignment statements + - whitespace # Tool for detection of leading and trailing whitespace + disable: + - containedctx # containedctx is a linter that detects struct contained context.Context field + - cyclop # checks function and package cyclomatic complexity + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - funlen # Tool for detection of long functions + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gomnd # An analyzer to detect magic numbers. + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - ireturn # Accept Interfaces, Return Concrete Types + - lll # Reports long lines + - maintidx # maintidx measures the maintainability index of each function. + - makezero # Finds slice declarations with non-zero initial length + - maligned # Tool to detect Go structs that would take less memory if their fields were sorted + - nestif # Reports deeply nested if statements + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - nolintlint # Reports ill-formed or insufficient nolint directives + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - prealloc # Finds slice declarations that could potentially be preallocated + - promlinter # Check Prometheus metrics naming via promlint + - rowserrcheck # checks whether Err of rows is checked successfully + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + - testpackage # linter that makes you use a separate _test package + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - varnamelen # checks that the length of a variable's name matches its scope + - wrapcheck # Checks that errors returned from external packages are wrapped + - wsl # Whitespace Linter - Forces you to use empty lines! + +issues: + exclude-use-default: false + exclude-rules: + # Allow complex tests, better to be self contained + - path: _test\.go + linters: + - gocognit + + # Allow complex main function in examples + - path: examples + text: "of func `main` is high" + linters: + - gocognit + +run: + skip-dirs-use-default: false diff --git a/vendor/github.com/pion/udp/.goreleaser.yml b/vendor/github.com/pion/udp/.goreleaser.yml new file mode 100644 index 000000000..2caa5fbd3 --- /dev/null +++ b/vendor/github.com/pion/udp/.goreleaser.yml @@ -0,0 +1,2 @@ +builds: +- skip: true diff --git a/vendor/github.com/pion/udp/AUTHORS.txt b/vendor/github.com/pion/udp/AUTHORS.txt new file mode 100644 index 000000000..25969ecc5 --- /dev/null +++ b/vendor/github.com/pion/udp/AUTHORS.txt @@ -0,0 +1,15 @@ +# Thank you to everyone that made Pion possible. If you are interested in contributing +# we would love to have you https://github.com/pion/webrtc/wiki/Contributing +# +# This file is auto generated, using git to list all individuals contributors. +# see https://github.com/pion/.goassets/blob/master/scripts/generate-authors.sh for the scripting +Atsushi Watanabe +Daniel Beseda +Jozef Kralik +Mathias Fredriksson +Michiel De Backker +sterling.deng +ZHENK + +# List of contributors not appearing in Git history + diff --git a/vendor/github.com/pion/udp/LICENSE b/vendor/github.com/pion/udp/LICENSE new file mode 100644 index 000000000..81f990d60 --- /dev/null +++ b/vendor/github.com/pion/udp/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pion/udp/README.md b/vendor/github.com/pion/udp/README.md new file mode 100644 index 000000000..af0b9d5fb --- /dev/null +++ b/vendor/github.com/pion/udp/README.md @@ -0,0 +1,35 @@ +

+
+ Pion UDP +
+

+

A connection-oriented listener over a UDP PacketConn

+

+ Pion UDP + + Slack Widget +
+ Build Status + GoDoc + Coverage Status + Go Report Card + + License: MIT +

+
+ +### Roadmap +This package is used in the [DTLS](https://github.com/pion/dtls) and [SCTP](https://github.com/pion/sctp) transport to provide a connection-oriented listener over a UDP. + +### Community +Pion has an active community on the [Golang Slack](https://pion.ly/slack/). You can also use [Pion mailing list](https://groups.google.com/forum/#!forum/pion). + +We are always looking to support **your projects**. Please reach out if you have something to build! + +If you need commercial support or don't want to use public methods you can contact us at [team@pion.ly](mailto:team@pion.ly) + +### Contributing +Check out the **[contributing wiki](https://github.com/pion/webrtc/wiki/Contributing)** to join the group of amazing people making this project possible: + +### License +MIT License - see [LICENSE](LICENSE) for full text diff --git a/vendor/github.com/pion/udp/codecov.yml b/vendor/github.com/pion/udp/codecov.yml new file mode 100644 index 000000000..085200a48 --- /dev/null +++ b/vendor/github.com/pion/udp/codecov.yml @@ -0,0 +1,20 @@ +# +# DO NOT EDIT THIS FILE +# +# It is automatically copied from https://github.com/pion/.goassets repository. +# + +coverage: + status: + project: + default: + # Allow decreasing 2% of total coverage to avoid noise. + threshold: 2% + patch: + default: + target: 70% + only_pulls: true + +ignore: + - "examples/*" + - "examples/**/*" diff --git a/vendor/github.com/pion/udp/conn.go b/vendor/github.com/pion/udp/conn.go new file mode 100644 index 000000000..926a81268 --- /dev/null +++ b/vendor/github.com/pion/udp/conn.go @@ -0,0 +1,312 @@ +// Package udp provides a connection-oriented listener over a UDP PacketConn +package udp + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/pion/transport/v2/deadline" + "github.com/pion/transport/v2/packetio" + pkgSync "github.com/pion/udp/pkg/sync" +) + +const ( + receiveMTU = 8192 + defaultListenBacklog = 128 // same as Linux default +) + +// Typed errors +var ( + ErrClosedListener = errors.New("udp: listener closed") + ErrListenQueueExceeded = errors.New("udp: listen queue exceeded") +) + +// listener augments a connection-oriented Listener over a UDP PacketConn +type listener struct { + pConn *net.UDPConn + + accepting atomic.Value // bool + acceptCh chan *Conn + doneCh chan struct{} + doneOnce sync.Once + acceptFilter func([]byte) bool + readBufferPool *sync.Pool + + connLock sync.Mutex + conns map[string]*Conn + connWG *pkgSync.WaitGroup + + readWG sync.WaitGroup + errClose atomic.Value // error +} + +// Accept waits for and returns the next connection to the listener. +func (l *listener) Accept() (net.Conn, error) { + select { + case c := <-l.acceptCh: + l.connWG.Add(1) + return c, nil + + case <-l.doneCh: + return nil, ErrClosedListener + } +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +func (l *listener) Close() error { + var err error + l.doneOnce.Do(func() { + l.accepting.Store(false) + close(l.doneCh) + + l.connLock.Lock() + // Close unaccepted connections + L_CLOSE: + for { + select { + case c := <-l.acceptCh: + close(c.doneCh) + delete(l.conns, c.rAddr.String()) + + default: + break L_CLOSE + } + } + nConns := len(l.conns) + l.connLock.Unlock() + + l.connWG.Done() + + if nConns == 0 { + // Wait if this is the final connection + l.readWG.Wait() + if errClose, ok := l.errClose.Load().(error); ok { + err = errClose + } + } else { + err = nil + } + }) + + return err +} + +// Addr returns the listener's network address. +func (l *listener) Addr() net.Addr { + return l.pConn.LocalAddr() +} + +// ListenConfig stores options for listening to an address. +type ListenConfig struct { + // Backlog defines the maximum length of the queue of pending + // connections. It is equivalent of the backlog argument of + // POSIX listen function. + // If a connection request arrives when the queue is full, + // the request will be silently discarded, unlike TCP. + // Set zero to use default value 128 which is same as Linux default. + Backlog int + + // AcceptFilter determines whether the new conn should be made for + // the incoming packet. If not set, any packet creates new conn. + AcceptFilter func([]byte) bool +} + +// Listen creates a new listener based on the ListenConfig. +func (lc *ListenConfig) Listen(network string, laddr *net.UDPAddr) (net.Listener, error) { + if lc.Backlog == 0 { + lc.Backlog = defaultListenBacklog + } + + conn, err := net.ListenUDP(network, laddr) + if err != nil { + return nil, err + } + + l := &listener{ + pConn: conn, + acceptCh: make(chan *Conn, lc.Backlog), + conns: make(map[string]*Conn), + doneCh: make(chan struct{}), + acceptFilter: lc.AcceptFilter, + readBufferPool: &sync.Pool{ + New: func() interface{} { + buf := make([]byte, receiveMTU) + return &buf + }, + }, + connWG: pkgSync.NewWaitGroup(), + } + + l.accepting.Store(true) + l.connWG.Add(1) + l.readWG.Add(2) // wait readLoop and Close execution routine + + go l.readLoop() + go func() { + l.connWG.Wait() + if err := l.pConn.Close(); err != nil { + l.errClose.Store(err) + } + l.readWG.Done() + }() + + return l, nil +} + +// Listen creates a new listener using default ListenConfig. +func Listen(network string, laddr *net.UDPAddr) (net.Listener, error) { + return (&ListenConfig{}).Listen(network, laddr) +} + +// readLoop has to tasks: +// 1. Dispatching incoming packets to the correct Conn. +// It can therefore not be ended until all Conns are closed. +// 2. Creating a new Conn when receiving from a new remote. +func (l *listener) readLoop() { + defer l.readWG.Done() + + for { + buf, ok := l.readBufferPool.Get().(*[]byte) + if !ok { + return + } + + n, raddr, err := l.pConn.ReadFrom(*buf) + if err != nil { + return + } + conn, ok, err := l.getConn(raddr, (*buf)[:n]) + if err != nil { + continue + } + if ok { + _, _ = conn.buffer.Write((*buf)[:n]) + } + } +} + +func (l *listener) getConn(raddr net.Addr, buf []byte) (*Conn, bool, error) { + l.connLock.Lock() + defer l.connLock.Unlock() + conn, ok := l.conns[raddr.String()] + if !ok { + if isAccepting, ok := l.accepting.Load().(bool); !isAccepting || !ok { + return nil, false, ErrClosedListener + } + if l.acceptFilter != nil { + if !l.acceptFilter(buf) { + return nil, false, nil + } + } + conn = l.newConn(raddr) + select { + case l.acceptCh <- conn: + l.conns[raddr.String()] = conn + default: + return nil, false, ErrListenQueueExceeded + } + } + return conn, true, nil +} + +// Conn augments a connection-oriented connection over a UDP PacketConn +type Conn struct { + listener *listener + + rAddr net.Addr + + buffer *packetio.Buffer + + doneCh chan struct{} + doneOnce sync.Once + + writeDeadline *deadline.Deadline +} + +func (l *listener) newConn(rAddr net.Addr) *Conn { + return &Conn{ + listener: l, + rAddr: rAddr, + buffer: packetio.NewBuffer(), + doneCh: make(chan struct{}), + writeDeadline: deadline.New(), + } +} + +// Read reads from c into p +func (c *Conn) Read(p []byte) (int, error) { + return c.buffer.Read(p) +} + +// Write writes len(p) bytes from p to the DTLS connection +func (c *Conn) Write(p []byte) (n int, err error) { + select { + case <-c.writeDeadline.Done(): + return 0, context.DeadlineExceeded + default: + } + return c.listener.pConn.WriteTo(p, c.rAddr) +} + +// Close closes the conn and releases any Read calls +func (c *Conn) Close() error { + var err error + c.doneOnce.Do(func() { + c.listener.connWG.Done() + close(c.doneCh) + c.listener.connLock.Lock() + delete(c.listener.conns, c.rAddr.String()) + nConns := len(c.listener.conns) + c.listener.connLock.Unlock() + + if isAccepting, ok := c.listener.accepting.Load().(bool); nConns == 0 && !isAccepting && ok { + // Wait if this is the final connection + c.listener.readWG.Wait() + if errClose, ok := c.listener.errClose.Load().(error); ok { + err = errClose + } + } else { + err = nil + } + + if errBuf := c.buffer.Close(); errBuf != nil && err == nil { + err = errBuf + } + }) + + return err +} + +// LocalAddr implements net.Conn.LocalAddr +func (c *Conn) LocalAddr() net.Addr { + return c.listener.pConn.LocalAddr() +} + +// RemoteAddr implements net.Conn.RemoteAddr +func (c *Conn) RemoteAddr() net.Addr { + return c.rAddr +} + +// SetDeadline implements net.Conn.SetDeadline +func (c *Conn) SetDeadline(t time.Time) error { + c.writeDeadline.Set(t) + return c.SetReadDeadline(t) +} + +// SetReadDeadline implements net.Conn.SetDeadline +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.buffer.SetReadDeadline(t) +} + +// SetWriteDeadline implements net.Conn.SetDeadline +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline.Set(t) + // Write deadline of underlying connection should not be changed + // since the connection can be shared. + return nil +} diff --git a/vendor/github.com/pion/udp/pkg/sync/waitgroup.go b/vendor/github.com/pion/udp/pkg/sync/waitgroup.go new file mode 100644 index 000000000..dcd1c89bb --- /dev/null +++ b/vendor/github.com/pion/udp/pkg/sync/waitgroup.go @@ -0,0 +1,68 @@ +// Package sync extends basic synchronization primitives. +package sync + +import ( + "sync" +) + +// A WaitGroup waits for a collection of goroutines to finish. +// The main goroutine calls Add to set the number of +// goroutines to wait for. Then each of the goroutines +// runs and calls Done when finished. At the same time, +// Wait can be used to block until all goroutines have finished. +// +// WaitGroups in the sync package do not allow adding or +// subtracting from the counter while another goroutine is +// waiting, while this one does. +// +// A WaitGroup must not be copied after first use. +// +// In the terminology of the Go memory model, a call to Done +type WaitGroup struct { + c int64 + mutex sync.Mutex + cond *sync.Cond +} + +// NewWaitGroup creates a new WaitGroup. +func NewWaitGroup() *WaitGroup { + wg := &WaitGroup{} + wg.cond = sync.NewCond(&wg.mutex) + return wg +} + +// Add adds delta, which may be negative, to the WaitGroup counter. +// If the counter becomes zero, all goroutines blocked on Wait are released. +// If the counter goes negative, Add panics. +func (wg *WaitGroup) Add(delta int) { + wg.mutex.Lock() + defer wg.mutex.Unlock() + wg.c += int64(delta) + if wg.c < 0 { + panic("udp: negative WaitGroup counter") // nolint + } + wg.cond.Signal() +} + +// Done decrements the WaitGroup counter by one. +func (wg *WaitGroup) Done() { + wg.Add(-1) +} + +// Wait blocks until the WaitGroup counter is zero. +func (wg *WaitGroup) Wait() { + wg.mutex.Lock() + defer wg.mutex.Unlock() + for { + c := wg.c + switch { + case c == 0: + // wake another goroutine if there is one + wg.cond.Signal() + return + case c < 0: + panic("udp: negative WaitGroup counter") // nolint + } + wg.cond.Wait() + } +} diff --git a/vendor/github.com/pion/udp/renovate.json b/vendor/github.com/pion/udp/renovate.json new file mode 100644 index 000000000..f1bb98c6a --- /dev/null +++ b/vendor/github.com/pion/udp/renovate.json @@ -0,0 +1,6 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>pion/renovate-config" + ] +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8a4f49a4c..519db348a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -844,9 +844,7 @@ func (h *histogram) Write(out *dto.Metric) error { }} } - // If exemplars are not configured, the cap will be 0. - // So append is not needed in this case. - if cap(h.nativeExemplars.exemplars) > 0 { + if h.nativeExemplars.isEnabled() { h.nativeExemplars.Lock() his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) h.nativeExemplars.Unlock() @@ -1665,6 +1663,10 @@ type nativeExemplars struct { exemplars []*dto.Exemplar } +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { if ttl == 0 { ttl = 5 * time.Minute @@ -1686,7 +1688,7 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { } func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { - if n.ttl == -1 { + if !n.isEnabled() { return } diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go new file mode 100644 index 000000000..197d95e5c --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info.go @@ -0,0 +1,125 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" + "runtime/debug" + "strings" + "text/template" +) + +// Build information. Populated at build-time. +var ( + Version string + Revision string + Branch string + BuildUser string + BuildDate string + GoVersion = runtime.Version() + GoOS = runtime.GOOS + GoArch = runtime.GOARCH + + computedRevision string + computedTags string +) + +// versionInfoTmpl contains the template used by Info. +var versionInfoTmpl = ` +{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}}) + build user: {{.buildUser}} + build date: {{.buildDate}} + go version: {{.goVersion}} + platform: {{.platform}} + tags: {{.tags}} +` + +// Print returns version information. +func Print(program string) string { + m := map[string]string{ + "program": program, + "version": Version, + "revision": GetRevision(), + "branch": Branch, + "buildUser": BuildUser, + "buildDate": BuildDate, + "goVersion": GoVersion, + "platform": GoOS + "/" + GoArch, + "tags": GetTags(), + } + t := template.Must(template.New("version").Parse(versionInfoTmpl)) + + var buf bytes.Buffer + if err := t.ExecuteTemplate(&buf, "version", m); err != nil { + panic(err) + } + return strings.TrimSpace(buf.String()) +} + +// Info returns version, branch and revision information. +func Info() string { + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, GetRevision()) +} + +// BuildContext returns goVersion, platform, buildUser and buildDate information. +func BuildContext() string { + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, GetTags()) +} + +func GetRevision() string { + if Revision != "" { + return Revision + } + return computedRevision +} + +func GetTags() string { + return computedTags +} + +func init() { + computedRevision, computedTags = computeRevision() +} + +func computeRevision() (string, string) { + var ( + rev = "unknown" + tags = "unknown" + modified bool + ) + + buildInfo, ok := debug.ReadBuildInfo() + if !ok { + return rev, tags + } + for _, v := range buildInfo.Settings { + if v.Key == "vcs.revision" { + rev = v.Value + } + if v.Key == "vcs.modified" { + if v.Value == "true" { + modified = true + } + } + if v.Key == "-tags" { + tags = v.Value + } + } + if modified { + return rev + "-modified", tags + } + return rev, tags +} diff --git a/vendor/github.com/prometheus/prometheus/LICENSE b/vendor/github.com/prometheus/prometheus/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/prometheus/NOTICE b/vendor/github.com/prometheus/prometheus/NOTICE new file mode 100644 index 000000000..5e4f50989 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/NOTICE @@ -0,0 +1,98 @@ +The Prometheus systems and service monitoring server +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (https://soundcloud.com/). + + +The following components are included in this product: + +Bootstrap +https://getbootstrap.com +Copyright 2011-2014 Twitter, Inc. +Licensed under the MIT License + +bootstrap3-typeahead.js +https://github.com/bassjobsen/Bootstrap-3-Typeahead +Original written by @mdo and @fat +Copyright 2014 Bass Jobsen @bassjobsen +Licensed under the Apache License, Version 2.0 + +fuzzy +https://github.com/mattyork/fuzzy +Original written by @mattyork +Copyright 2012 Matt York +Licensed under the MIT License + +bootstrap-datetimepicker.js +https://github.com/Eonasdan/bootstrap-datetimepicker +Copyright 2015 Jonathan Peterson (@Eonasdan) +Licensed under the MIT License + +moment.js +https://github.com/moment/moment/ +Copyright JS Foundation and other contributors +Licensed under the MIT License + +Rickshaw +https://github.com/shutterstock/rickshaw +Copyright 2011-2014 by Shutterstock Images, LLC +See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details + +mustache.js +https://github.com/janl/mustache.js +Copyright 2009 Chris Wanstrath (Ruby) +Copyright 2010-2014 Jan Lehnardt (JavaScript) +Copyright 2010-2015 The mustache.js community +Licensed under the MIT License + +jQuery +https://jquery.org +Copyright jQuery Foundation and other contributors +Licensed under the MIT License + +Protocol Buffers for Go with Gadgets +https://github.com/gogo/protobuf/ +Copyright (c) 2013, The GoGo Authors. +See source code for license details. + +Go support for leveled logs, analogous to +https://code.google.com/p/google-glog/ +Copyright 2013 Google Inc. +Licensed under the Apache License, Version 2.0 + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +DNS library in Go +https://miek.nl/2014/august/16/go-dns-package/ +Copyright 2009 The Go Authors, 2011 Miek Gieben +See https://github.com/miekg/dns/blob/master/LICENSE for license details. + +LevelDB key/value database in Go +https://github.com/syndtr/goleveldb +Copyright 2012 Suryandaru Triandana +See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details. + +gosnappy - a fork of code.google.com/p/snappy-go +https://github.com/syndtr/gosnappy +Copyright 2011 The Snappy-Go Authors +See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details. + +go-zookeeper - Native ZooKeeper client for Go +https://github.com/samuel/go-zookeeper +Copyright (c) 2013, Samuel Stauffer +See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. + +Time series compression algorithm from Facebook's Gorilla paper +https://github.com/dgryski/go-tsz +Copyright (c) 2015,2016 Damian Gryski +See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details. + +We also use code from a large number of npm packages. For details, see: +- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package.json +- https://github.com/prometheus/prometheus/blob/master/web/ui/react-app/package-lock.json +- The individual package licenses as copied from the node_modules directory can be found in + the npm_licenses.tar.bz2 archive in release tarballs and Docker images. diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go new file mode 100644 index 000000000..5c11cc2ee --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/labels.go @@ -0,0 +1,474 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "sort" + "strconv" + + "github.com/cespare/xxhash/v2" +) + +// Well-known label names used by Prometheus components. +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +// Labels is a sorted set of labels. Order has to be guaranteed upon +// instantiation. +type Labels []Label + +func (ls Labels) Len() int { return len(ls) } +func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + for i, l := range ls { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.WriteString(strconv.Quote(l.Value)) + } + b.WriteByte('}') + return b.String() +} + +// Bytes returns ls as a byte slice. +// It uses an byte invalid character as a separator and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + for i, l := range ls { + if i > 0 { + b.WriteByte(seps[0]) + } + b.WriteString(l.Name) + b.WriteByte(seps[0]) + b.WriteString(l.Value) + } + return b.Bytes() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + matchedLabels := Labels{} + + nameSet := map[string]struct{}{} + for _, n := range names { + nameSet[n] = struct{}{} + } + + for _, v := range ls { + if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) { + matchedLabels = append(matchedLabels, v) + } + } + + return matchedLabels +} + +// Hash returns a hash value for the label set. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + i, j := 0, 0 + for i < len(ls) && j < len(names) { + if names[j] < ls[i].Name { + j++ + } else if ls[i].Name < names[j] { + i++ + } else { + b = append(b, ls[i].Name...) + b = append(b, seps[0]) + b = append(b, ls[i].Value...) + b = append(b, seps[0]) + i++ + j++ + } + } + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { + continue + } + b = append(b, ls[i].Name...) + b = append(b, seps[0]) + b = append(b, ls[i].Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// WithLabels returns a new labels.Labels from ls that only contains labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) WithLabels(names ...string) Labels { + ret := make([]Label, 0, len(ls)) + + i, j := 0, 0 + for i < len(ls) && j < len(names) { + if names[j] < ls[i].Name { + j++ + } else if ls[i].Name < names[j] { + i++ + } else { + ret = append(ret, ls[i]) + i++ + j++ + } + } + return ret +} + +// WithoutLabels returns a new labels.Labels from ls that contains labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) WithoutLabels(names ...string) Labels { + ret := make([]Label, 0, len(ls)) + + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { + continue + } + ret = append(ret, ls[i]) + } + return ret +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + res := make(Labels, len(ls)) + copy(res, ls) + return res +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for _, l := range ls { + if l.Name == name { + return l.Value + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for _, l := range ls { + if l.Name == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + for i, l := range ls { + if i == 0 { + continue + } + if l.Name == ls[i-1].Name { + return l.Name, true + } + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for _, v := range ls { + if v.Value != "" { + continue + } + // Do not copy the slice until it's necessary. + els := make(Labels, 0, len(ls)-1) + for _, v := range ls { + if v.Value != "" { + els = append(els, v) + } + } + return els + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + if len(ls) != len(o) { + return false + } + for i, l := range ls { + if l.Name != o[i].Name || l.Value != o[i].Value { + return false + } + } + return true +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string, len(ls)) + for _, l := range ls { + m[l.Name] = l.Value + } + return m +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + set := make(Labels, 0, len(ls)) + for _, l := range ls { + set = append(set, l) + } + sort.Sort(set) + + return set +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + var res Labels + for i := 0; i < len(ss); i += 2 { + res = append(res, Label{Name: ss[i], Value: ss[i+1]}) + } + + sort.Sort(res) + return res +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + l := len(a) + if len(b) < l { + l = len(b) + } + + for i := 0; i < l; i++ { + if a[i].Name != b[i].Name { + if a[i].Name < b[i].Name { + return -1 + } + return 1 + } + if a[i].Value != b[i].Value { + if a[i].Value < b[i].Value { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + for _, l := range b.base { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + } +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Set the name/value pair as a label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +// Labels returns the labels from the builder. If no modifications +// were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + // In the general case, labels are removed, modified or moved + // rather than added. + res := make(Labels, 0, len(b.base)) +Outer: + for _, l := range b.base { + for _, n := range b.del { + if l.Name == n { + continue Outer + } + } + for _, la := range b.add { + if l.Name == la.Name { + continue Outer + } + } + res = append(res, l) + } + res = append(res, b.add...) + sort.Sort(res) + + return res +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go new file mode 100644 index 000000000..88d463233 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/matcher.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "fmt" +) + +// MatchType is an enum for label matching types. +type MatchType int + +// Possible MatchTypes. +const ( + MatchEqual MatchType = iota + MatchNotEqual + MatchRegexp + MatchNotRegexp +) + +func (m MatchType) String() string { + typeToStr := map[MatchType]string{ + MatchEqual: "=", + MatchNotEqual: "!=", + MatchRegexp: "=~", + MatchNotRegexp: "!~", + } + if str, ok := typeToStr[m]; ok { + return str + } + panic("unknown match type") +} + +// Matcher models the matching of a label. +type Matcher struct { + Type MatchType + Name string + Value string + + re *FastRegexMatcher +} + +// NewMatcher returns a matcher object. +func NewMatcher(t MatchType, n, v string) (*Matcher, error) { + m := &Matcher{ + Type: t, + Name: n, + Value: v, + } + if t == MatchRegexp || t == MatchNotRegexp { + re, err := NewFastRegexMatcher(v) + if err != nil { + return nil, err + } + m.re = re + } + return m, nil +} + +// MustNewMatcher panics on error - only for use in tests! +func MustNewMatcher(mt MatchType, name, val string) *Matcher { + m, err := NewMatcher(mt, name, val) + if err != nil { + panic(err) + } + return m +} + +func (m *Matcher) String() string { + return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value) +} + +// Matches returns whether the matcher matches the given string value. +func (m *Matcher) Matches(s string) bool { + switch m.Type { + case MatchEqual: + return s == m.Value + case MatchNotEqual: + return s != m.Value + case MatchRegexp: + return m.re.MatchString(s) + case MatchNotRegexp: + return !m.re.MatchString(s) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// Inverse returns a matcher that matches the opposite. +func (m *Matcher) Inverse() (*Matcher, error) { + switch m.Type { + case MatchEqual: + return NewMatcher(MatchNotEqual, m.Name, m.Value) + case MatchNotEqual: + return NewMatcher(MatchEqual, m.Name, m.Value) + case MatchRegexp: + return NewMatcher(MatchNotRegexp, m.Name, m.Value) + case MatchNotRegexp: + return NewMatcher(MatchRegexp, m.Name, m.Value) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// GetRegexString returns the regex string. +func (m *Matcher) GetRegexString() string { + if m.re == nil { + return "" + } + return m.re.GetRegexString() +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go new file mode 100644 index 000000000..eb2b07995 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/regexp.go @@ -0,0 +1,107 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "regexp" + "regexp/syntax" + "strings" +) + +type FastRegexMatcher struct { + re *regexp.Regexp + prefix string + suffix string + contains string +} + +func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + re, err := regexp.Compile("^(?:" + v + ")$") + if err != nil { + return nil, err + } + + parsed, err := syntax.Parse(v, syntax.Perl) + if err != nil { + return nil, err + } + + m := &FastRegexMatcher{ + re: re, + } + + if parsed.Op == syntax.OpConcat { + m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) + } + + return m, nil +} + +func (m *FastRegexMatcher) MatchString(s string) bool { + if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { + return false + } + if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { + return false + } + if m.contains != "" && !strings.Contains(s, m.contains) { + return false + } + return m.re.MatchString(s) +} + +func (m *FastRegexMatcher) GetRegexString() string { + return m.re.String() +} + +// optimizeConcatRegex returns literal prefix/suffix text that can be safely +// checked against the label value before running the regexp matcher. +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) { + sub := r.Sub + + // We can safely remove begin and end text matchers respectively + // at the beginning and end of the regexp. + if len(sub) > 0 && sub[0].Op == syntax.OpBeginText { + sub = sub[1:] + } + if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText { + sub = sub[:len(sub)-1] + } + + if len(sub) == 0 { + return + } + + // Given Prometheus regex matchers are always anchored to the begin/end + // of the text, if the first/last operations are literals, we can safely + // treat them as prefix/suffix. + if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 { + prefix = string(sub[0].Rune) + } + if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 { + suffix = string(sub[last].Rune) + } + + // If contains any literal which is not a prefix/suffix, we keep the + // 1st one. We do not keep the whole list of literals to simplify the + // fast path. + for i := 1; i < len(sub)-1; i++ { + if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { + contains = string(sub[i].Rune) + break + } + } + + return +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go new file mode 100644 index 000000000..319ee6184 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/labels/test_utils.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bufio" + "os" + "sort" + "strings" + + "github.com/pkg/errors" +) + +// Slice is a sortable slice of label sets. +type Slice []Labels + +func (s Slice) Len() int { return len(s) } +func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } + +// Selector holds constraints for matching against a label set. +type Selector []*Matcher + +// Matches returns whether the labels satisfy all matchers. +func (s Selector) Matches(labels Labels) bool { + for _, m := range s { + if v := labels.Get(m.Name); !m.Matches(v) { + return false + } + } + return true +} + +// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful +// to load testing data. +func ReadLabels(fn string, n int) ([]Labels, error) { + f, err := os.Open(fn) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + + var mets []Labels + hashes := map[uint64]struct{}{} + i := 0 + + for scanner.Scan() && i < n { + m := make(Labels, 0, 10) + + r := strings.NewReplacer("\"", "", "{", "", "}", "") + s := r.Replace(scanner.Text()) + + labelChunks := strings.Split(s, ",") + for _, labelChunk := range labelChunks { + split := strings.Split(labelChunk, ":") + m = append(m, Label{Name: split[0], Value: split[1]}) + } + // Order of the k/v labels matters, don't assume we'll always receive them already sorted. + sort.Sort(m) + + h := m.Hash() + if _, ok := hashes[h]; ok { + continue + } + mets = append(mets, m) + hashes[h] = struct{}{} + i++ + } + + if i != n { + return mets, errors.Errorf("requested %d metrics but found %d", n, i) + } + return mets, nil +} diff --git a/vendor/github.com/prometheus/prometheus/pkg/value/value.go b/vendor/github.com/prometheus/prometheus/pkg/value/value.go new file mode 100644 index 000000000..655ce852d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/pkg/value/value.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package value + +import ( + "math" +) + +const ( + // NormalNaN is a quiet NaN. This is also math.NaN(). + NormalNaN uint64 = 0x7ff8000000000001 + + // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0. + // This value is chosen with many leading 0s, so we have scope to store more + // complicated values in the future. It is 2 rather than 1 to make + // it easier to distinguish from the NormalNaN by a human when debugging. + StaleNaN uint64 = 0x7ff0000000000002 +) + +// IsStaleNaN returns true when the provided NaN value is a stale marker. +func IsStaleNaN(v float64) bool { + return math.Float64bits(v) == StaleNaN +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go new file mode 100644 index 000000000..de82d6725 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -0,0 +1,434 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "context" + "time" + + "github.com/pkg/errors" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" +) + +// Node is a generic interface for all nodes in an AST. +// +// Whenever numerous nodes are listed such as in a switch-case statement +// or a chain of function definitions (e.g. String(), PromQLExpr(), etc.) convention is +// to list them as follows: +// +// - Statements +// - statement types (alphabetical) +// - ... +// - Expressions +// - expression types (alphabetical) +// - ... +// +type Node interface { + // String representation of the node that returns the given node when parsed + // as part of a valid query. + String() string + + // PositionRange returns the position of the AST Node in the query string. + PositionRange() PositionRange +} + +// Statement is a generic interface for all statements. +type Statement interface { + Node + + // PromQLStmt ensures that no other type accidentally implements the interface + // nolint:unused + PromQLStmt() +} + +// EvalStmt holds an expression and information on the range it should +// be evaluated on. +type EvalStmt struct { + Expr Expr // Expression to be evaluated. + + // The time boundaries for the evaluation. If Start equals End an instant + // is evaluated. + Start, End time.Time + // Time between two evaluated instants for the range [Start:End]. + Interval time.Duration +} + +func (*EvalStmt) PromQLStmt() {} + +// Expr is a generic interface for all expression types. +type Expr interface { + Node + + // Type returns the type the expression evaluates to. It does not perform + // in-depth checks as this is done at parsing-time. + Type() ValueType + // PromQLExpr ensures that no other types accidentally implement the interface. + PromQLExpr() +} + +// Expressions is a list of expression nodes that implements Node. +type Expressions []Expr + +// AggregateExpr represents an aggregation operation on a Vector. +type AggregateExpr struct { + Op ItemType // The used aggregation operation. + Expr Expr // The Vector expression over which is aggregated. + Param Expr // Parameter used by some aggregators. + Grouping []string // The labels by which to group the Vector. + Without bool // Whether to drop the given labels rather than keep them. + PosRange PositionRange +} + +// BinaryExpr represents a binary expression between two child expressions. +type BinaryExpr struct { + Op ItemType // The operation of the expression. + LHS, RHS Expr // The operands on the respective sides of the operator. + + // The matching behavior for the operation if both operands are Vectors. + // If they are not this field is nil. + VectorMatching *VectorMatching + + // If a comparison operator, return 0/1 rather than filtering. + ReturnBool bool +} + +// Call represents a function call. +type Call struct { + Func *Function // The function that was called. + Args Expressions // Arguments used in the call. + + PosRange PositionRange +} + +// MatrixSelector represents a Matrix selection. +type MatrixSelector struct { + // It is safe to assume that this is an VectorSelector + // if the parser hasn't returned an error. + VectorSelector Expr + Range time.Duration + + EndPos Pos +} + +// SubqueryExpr represents a subquery. +type SubqueryExpr struct { + Expr Expr + Range time.Duration + Offset time.Duration + Step time.Duration + + EndPos Pos +} + +// NumberLiteral represents a number. +type NumberLiteral struct { + Val float64 + + PosRange PositionRange +} + +// ParenExpr wraps an expression so it cannot be disassembled as a consequence +// of operator precedence. +type ParenExpr struct { + Expr Expr + PosRange PositionRange +} + +// StringLiteral represents a string. +type StringLiteral struct { + Val string + PosRange PositionRange +} + +// UnaryExpr represents a unary operation on another expression. +// Currently unary operations are only supported for Scalars. +type UnaryExpr struct { + Op ItemType + Expr Expr + + StartPos Pos +} + +// VectorSelector represents a Vector selection. +type VectorSelector struct { + Name string + Offset time.Duration + LabelMatchers []*labels.Matcher + + // The unexpanded seriesSet populated at query preparation time. + UnexpandedSeriesSet storage.SeriesSet + Series []storage.Series + + PosRange PositionRange +} + +// TestStmt is an internal helper statement that allows execution +// of an arbitrary function during handling. It is used to test the Engine. +type TestStmt func(context.Context) error + +func (TestStmt) String() string { return "test statement" } +func (TestStmt) PromQLStmt() {} + +func (TestStmt) PositionRange() PositionRange { + return PositionRange{ + Start: -1, + End: -1, + } +} +func (e *AggregateExpr) Type() ValueType { return ValueTypeVector } +func (e *Call) Type() ValueType { return e.Func.ReturnType } +func (e *MatrixSelector) Type() ValueType { return ValueTypeMatrix } +func (e *SubqueryExpr) Type() ValueType { return ValueTypeMatrix } +func (e *NumberLiteral) Type() ValueType { return ValueTypeScalar } +func (e *ParenExpr) Type() ValueType { return e.Expr.Type() } +func (e *StringLiteral) Type() ValueType { return ValueTypeString } +func (e *UnaryExpr) Type() ValueType { return e.Expr.Type() } +func (e *VectorSelector) Type() ValueType { return ValueTypeVector } +func (e *BinaryExpr) Type() ValueType { + if e.LHS.Type() == ValueTypeScalar && e.RHS.Type() == ValueTypeScalar { + return ValueTypeScalar + } + return ValueTypeVector +} + +func (*AggregateExpr) PromQLExpr() {} +func (*BinaryExpr) PromQLExpr() {} +func (*Call) PromQLExpr() {} +func (*MatrixSelector) PromQLExpr() {} +func (*SubqueryExpr) PromQLExpr() {} +func (*NumberLiteral) PromQLExpr() {} +func (*ParenExpr) PromQLExpr() {} +func (*StringLiteral) PromQLExpr() {} +func (*UnaryExpr) PromQLExpr() {} +func (*VectorSelector) PromQLExpr() {} + +// VectorMatchCardinality describes the cardinality relationship +// of two Vectors in a binary operation. +type VectorMatchCardinality int + +const ( + CardOneToOne VectorMatchCardinality = iota + CardManyToOne + CardOneToMany + CardManyToMany +) + +func (vmc VectorMatchCardinality) String() string { + switch vmc { + case CardOneToOne: + return "one-to-one" + case CardManyToOne: + return "many-to-one" + case CardOneToMany: + return "one-to-many" + case CardManyToMany: + return "many-to-many" + } + panic("promql.VectorMatchCardinality.String: unknown match cardinality") +} + +// VectorMatching describes how elements from two Vectors in a binary +// operation are supposed to be matched. +type VectorMatching struct { + // The cardinality of the two Vectors. + Card VectorMatchCardinality + // MatchingLabels contains the labels which define equality of a pair of + // elements from the Vectors. + MatchingLabels []string + // On includes the given label names from matching, + // rather than excluding them. + On bool + // Include contains additional labels that should be included in + // the result from the side with the lower cardinality. + Include []string +} + +// Visitor allows visiting a Node and its child nodes. The Visit method is +// invoked for each node with the path leading to the node provided additionally. +// If the result visitor w is not nil and no error, Walk visits each of the children +// of node with the visitor w, followed by a call of w.Visit(nil, nil). +type Visitor interface { + Visit(node Node, path []Node) (w Visitor, err error) +} + +// Walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node, path); node must not be nil. If the visitor w returned by +// v.Visit(node, path) is not nil and the visitor returns no error, Walk is +// invoked recursively with visitor w for each of the non-nil children of node, +// followed by a call of w.Visit(nil), returning an error +// As the tree is descended the path of previous nodes is provided. +func Walk(v Visitor, node Node, path []Node) error { + var err error + if v, err = v.Visit(node, path); v == nil || err != nil { + return err + } + path = append(path, node) + + for _, e := range Children(node) { + if err := Walk(v, e, path); err != nil { + return err + } + } + + _, err = v.Visit(nil, nil) + return err +} + +type inspector func(Node, []Node) error + +func (f inspector) Visit(node Node, path []Node) (Visitor, error) { + if err := f(node, path); err != nil { + return nil, err + } + + return f, nil +} + +// Inspect traverses an AST in depth-first order: It starts by calling +// f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f +// for all the non-nil children of node, recursively. +func Inspect(node Node, f inspector) { + //nolint: errcheck + Walk(inspector(f), node, nil) +} + +// Children returns a list of all child nodes of a syntax tree node. +func Children(node Node) []Node { + // For some reasons these switches have significantly better performance than interfaces + switch n := node.(type) { + case *EvalStmt: + return []Node{n.Expr} + case Expressions: + // golang cannot convert slices of interfaces + ret := make([]Node, len(n)) + for i, e := range n { + ret[i] = e + } + return ret + case *AggregateExpr: + // While this does not look nice, it should avoid unnecessary allocations + // caused by slice resizing + if n.Expr == nil && n.Param == nil { + return nil + } else if n.Expr == nil { + return []Node{n.Param} + } else if n.Param == nil { + return []Node{n.Expr} + } else { + return []Node{n.Expr, n.Param} + } + case *BinaryExpr: + return []Node{n.LHS, n.RHS} + case *Call: + // golang cannot convert slices of interfaces + ret := make([]Node, len(n.Args)) + for i, e := range n.Args { + ret[i] = e + } + return ret + case *SubqueryExpr: + return []Node{n.Expr} + case *ParenExpr: + return []Node{n.Expr} + case *UnaryExpr: + return []Node{n.Expr} + case *MatrixSelector: + return []Node{n.VectorSelector} + case *NumberLiteral, *StringLiteral, *VectorSelector: + // nothing to do + return []Node{} + default: + panic(errors.Errorf("promql.Children: unhandled node type %T", node)) + } +} + +// PositionRange describes a position in the input string of the parser. +type PositionRange struct { + Start Pos + End Pos +} + +// mergeRanges is a helper function to merge the PositionRanges of two Nodes. +// Note that the arguments must be in the same order as they +// occur in the input string. +func mergeRanges(first Node, last Node) PositionRange { + return PositionRange{ + Start: first.PositionRange().Start, + End: last.PositionRange().End, + } +} + +// Item implements the Node interface. +// This makes it possible to call mergeRanges on them. +func (i *Item) PositionRange() PositionRange { + return PositionRange{ + Start: i.Pos, + End: i.Pos + Pos(len(i.Val)), + } +} + +func (e *AggregateExpr) PositionRange() PositionRange { + return e.PosRange +} +func (e *BinaryExpr) PositionRange() PositionRange { + return mergeRanges(e.LHS, e.RHS) +} +func (e *Call) PositionRange() PositionRange { + return e.PosRange +} +func (e *EvalStmt) PositionRange() PositionRange { + return e.Expr.PositionRange() +} +func (e Expressions) PositionRange() PositionRange { + if len(e) == 0 { + // Position undefined. + return PositionRange{ + Start: -1, + End: -1, + } + } + return mergeRanges(e[0], e[len(e)-1]) +} +func (e *MatrixSelector) PositionRange() PositionRange { + return PositionRange{ + Start: e.VectorSelector.PositionRange().Start, + End: e.EndPos, + } +} +func (e *SubqueryExpr) PositionRange() PositionRange { + return PositionRange{ + Start: e.Expr.PositionRange().Start, + End: e.EndPos, + } +} +func (e *NumberLiteral) PositionRange() PositionRange { + return e.PosRange +} +func (e *ParenExpr) PositionRange() PositionRange { + return e.PosRange +} +func (e *StringLiteral) PositionRange() PositionRange { + return e.PosRange +} +func (e *UnaryExpr) PositionRange() PositionRange { + return PositionRange{ + Start: e.StartPos, + End: e.Expr.PositionRange().End, + } +} +func (e *VectorSelector) PositionRange() PositionRange { + return e.PosRange +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go new file mode 100644 index 000000000..4516829e5 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -0,0 +1,277 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +// Function represents a function of the expression language and is +// used by function nodes. +type Function struct { + Name string + ArgTypes []ValueType + Variadic int + ReturnType ValueType +} + +// Functions is a list of all functions supported by PromQL, including their types. +var Functions = map[string]*Function{ + "abs": { + Name: "abs", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "absent": { + Name: "absent", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "absent_over_time": { + Name: "absent_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "avg_over_time": { + Name: "avg_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "ceil": { + Name: "ceil", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "changes": { + Name: "changes", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "clamp_max": { + Name: "clamp_max", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, + "clamp_min": { + Name: "clamp_min", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, + "count_over_time": { + Name: "count_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "days_in_month": { + Name: "days_in_month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "day_of_month": { + Name: "day_of_month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "day_of_week": { + Name: "day_of_week", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "delta": { + Name: "delta", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "deriv": { + Name: "deriv", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "exp": { + Name: "exp", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "floor": { + Name: "floor", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "histogram_quantile": { + Name: "histogram_quantile", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "holt_winters": { + Name: "holt_winters", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, + "hour": { + Name: "hour", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "idelta": { + Name: "idelta", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "increase": { + Name: "increase", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "irate": { + Name: "irate", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "label_replace": { + Name: "label_replace", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString, ValueTypeString}, + ReturnType: ValueTypeVector, + }, + "label_join": { + Name: "label_join", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeString, ValueTypeString, ValueTypeString}, + Variadic: -1, + ReturnType: ValueTypeVector, + }, + "ln": { + Name: "ln", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "log10": { + Name: "log10", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "log2": { + Name: "log2", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "max_over_time": { + Name: "max_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "min_over_time": { + Name: "min_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "minute": { + Name: "minute", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "month": { + Name: "month", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "predict_linear": { + Name: "predict_linear", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, + "quantile_over_time": { + Name: "quantile_over_time", + ArgTypes: []ValueType{ValueTypeScalar, ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "rate": { + Name: "rate", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "resets": { + Name: "resets", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "round": { + Name: "round", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeScalar}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, + "scalar": { + Name: "scalar", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeScalar, + }, + "sort": { + Name: "sort", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "sort_desc": { + Name: "sort_desc", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "sqrt": { + Name: "sqrt", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "stddev_over_time": { + Name: "stddev_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "stdvar_over_time": { + Name: "stdvar_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "sum_over_time": { + Name: "sum_over_time", + ArgTypes: []ValueType{ValueTypeMatrix}, + ReturnType: ValueTypeVector, + }, + "time": { + Name: "time", + ArgTypes: []ValueType{}, + ReturnType: ValueTypeScalar, + }, + "timestamp": { + Name: "timestamp", + ArgTypes: []ValueType{ValueTypeVector}, + ReturnType: ValueTypeVector, + }, + "vector": { + Name: "vector", + ArgTypes: []ValueType{ValueTypeScalar}, + ReturnType: ValueTypeVector, + }, + "year": { + Name: "year", + ArgTypes: []ValueType{ValueTypeVector}, + Variadic: 1, + ReturnType: ValueTypeVector, + }, +} + +// getFunction returns a predefined Function object for the given name. +func getFunction(name string) (*Function, bool) { + function, ok := Functions[name] + return function, ok +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y new file mode 100644 index 000000000..f0bdc320f --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -0,0 +1,710 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +%{ +package parser + +import ( + "math" + "sort" + "strconv" + "time" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" +) +%} + +%union { + node Node + item Item + matchers []*labels.Matcher + matcher *labels.Matcher + label labels.Label + labels labels.Labels + strings []string + series []SequenceValue + uint uint64 + float float64 + duration time.Duration +} + + +%token +EQL +BLANK +COLON +COMMA +COMMENT +DURATION +EOF +ERROR +IDENTIFIER +LEFT_BRACE +LEFT_BRACKET +LEFT_PAREN +METRIC_IDENTIFIER +NUMBER +RIGHT_BRACE +RIGHT_BRACKET +RIGHT_PAREN +SEMICOLON +SPACE +STRING +TIMES + +// Operators. +%token operatorsStart +%token +ADD +DIV +EQLC +EQL_REGEX +GTE +GTR +LAND +LOR +LSS +LTE +LUNLESS +MOD +MUL +NEQ +NEQ_REGEX +POW +SUB +%token operatorsEnd + +// Aggregators. +%token aggregatorsStart +%token +AVG +BOTTOMK +COUNT +COUNT_VALUES +GROUP +MAX +MIN +QUANTILE +STDDEV +STDVAR +SUM +TOPK +%token aggregatorsEnd + +// Keywords. +%token keywordsStart +%token +BOOL +BY +GROUP_LEFT +GROUP_RIGHT +IGNORING +OFFSET +ON +WITHOUT +%token keywordsEnd + + +// Start symbols for the generated parser. +%token startSymbolsStart +%token +START_METRIC +START_SERIES_DESCRIPTION +START_EXPRESSION +START_METRIC_SELECTOR +%token startSymbolsEnd + + +// Type definitions for grammar rules. +%type label_match_list +%type label_matcher + +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op + +%type label_set label_set_list metric +%type
NameTypeDescriptionRequired
actionenum + `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`.
+
+ Enum: Accept, Reject
+
false
cidrstring + `cidr` defines the IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64`
+
false
destPortsint or string + `destPorts` optionally defines the destination ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
false
directionenum + `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`.
+
+ Enum: Ingress, Egress
+
false
icmpCodeinteger + `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by.
+
false
icmpTypeinteger + `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by.
+
false
peerIPstring + `peerIP` optionally defines the remote IP address to filter flows by. +Example: `10.10.10.10`.
+
false
pktDropsboolean + `pktDrops` optionally filters only flows containing packet drops.
+
false
portsint or string + `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. +To filter a single port, set a single port as an integer value. For example, `ports: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`.
+
false
protocolenum + `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`.
+
+ Enum: TCP, UDP, ICMP, ICMPv6, SCTP
+
false
samplinginteger + `sampling` sampling rate for the matched flow
+
+ Format: int32
+
false
sourcePorts int or string
") + tableCloseTag = []byte("
") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
\n\n") + footnotesCloseDivBytes = []byte("\n
\n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 000000000..d45bd9417 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 000000000..58d2e4538 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 000000000..04e6050ce --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 000000000..3a220e942 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/github.com/safchain/ethtool/.gitignore b/vendor/github.com/safchain/ethtool/.gitignore new file mode 100644 index 000000000..db6cadffd --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.gitignore @@ -0,0 +1,27 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Skip compiled example binary file +/example/example diff --git a/vendor/github.com/safchain/ethtool/.golangci.yml b/vendor/github.com/safchain/ethtool/.golangci.yml new file mode 100644 index 000000000..77ccf927e --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.golangci.yml @@ -0,0 +1,14 @@ +linters: + disable: + - gosimple + - unused + enable: + - gci + - gofmt + - misspell +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/safchain/ethtool) diff --git a/vendor/github.com/safchain/ethtool/.yamllint b/vendor/github.com/safchain/ethtool/.yamllint new file mode 100644 index 000000000..9862c5f78 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/.yamllint @@ -0,0 +1,7 @@ +--- +extends: default + +rules: + document-start: disable + truthy: + check-keys: false diff --git a/vendor/github.com/safchain/ethtool/LICENSE b/vendor/github.com/safchain/ethtool/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/vendor/github.com/safchain/ethtool/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/safchain/ethtool/Makefile b/vendor/github.com/safchain/ethtool/Makefile new file mode 100644 index 000000000..67d2da395 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/Makefile @@ -0,0 +1,4 @@ +all: build + +build: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build diff --git a/vendor/github.com/safchain/ethtool/README.md b/vendor/github.com/safchain/ethtool/README.md new file mode 100644 index 000000000..e44367582 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/README.md @@ -0,0 +1,55 @@ +# ethtool go package # + +![Build Status](https://github.com/safchain/ethtool/actions/workflows/unittests.yml/badge.svg) +[![GoDoc](https://godoc.org/github.com/safchain/ethtool?status.svg)](https://godoc.org/github.com/safchain/ethtool) + + +The ethtool package aims to provide a library that provides easy access to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information from a network device such as statistics, driver related information or even the peer of a VETH interface. + +# Installation + +```shell +go get github.com/safchain/ethtool +``` + +# How to use + +```go +package main + +import ( + "fmt" + + "github.com/safchain/ethtool" +) + +func main() { + ethHandle, err := ethtool.NewEthtool() + if err != nil { + panic(err.Error()) + } + defer ethHandle.Close() + + // Retrieve tx from eth0 + stats, err := ethHandle.Stats("eth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("TX: %d\n", stats["tx_bytes"]) + + // Retrieve peer index of a veth interface + stats, err = ethHandle.Stats("veth0") + if err != nil { + panic(err.Error()) + } + fmt.Printf("Peer Index: %d\n", stats["peer_ifindex"]) +} +``` + +## LICENSE ## + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/safchain/ethtool/ethtool.go b/vendor/github.com/safchain/ethtool/ethtool.go new file mode 100644 index 000000000..42fc34520 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool.go @@ -0,0 +1,1012 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// The ethtool package aims to provide a library that provides easy access +// to the Linux SIOCETHTOOL ioctl operations. It can be used to retrieve information +// from a network device such as statistics, driver related information or even +// the peer of a VETH interface. +package ethtool + +import ( + "bytes" + "encoding/hex" + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Maximum size of an interface name +const ( + IFNAMSIZ = 16 +) + +// ioctl ethtool request +const ( + SIOCETHTOOL = 0x8946 +) + +// ethtool stats related constants. +const ( + ETH_GSTRING_LEN = 32 + ETH_SS_STATS = 1 + ETH_SS_PRIV_FLAGS = 2 + ETH_SS_FEATURES = 4 + + // CMD supported + ETHTOOL_GSET = 0x00000001 /* Get settings. */ + ETHTOOL_SSET = 0x00000002 /* Set settings. */ + ETHTOOL_GDRVINFO = 0x00000003 /* Get driver info. */ + ETHTOOL_GMSGLVL = 0x00000007 /* Get driver message level */ + ETHTOOL_SMSGLVL = 0x00000008 /* Set driver msg level. */ + + /* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ + ETHTOOL_GLINK = 0x0000000a + ETHTOOL_GCOALESCE = 0x0000000e /* Get coalesce config */ + ETHTOOL_GRINGPARAM = 0x00000010 /* Get ring parameters */ + ETHTOOL_SRINGPARAM = 0x00000011 /* Set ring parameters. */ + ETHTOOL_GPAUSEPARAM = 0x00000012 /* Get pause parameters */ + ETHTOOL_SPAUSEPARAM = 0x00000013 /* Set pause parameters. */ + ETHTOOL_GSTRINGS = 0x0000001b /* Get specified string set */ + ETHTOOL_GSTATS = 0x0000001d /* Get NIC-specific statistics */ + ETHTOOL_GPERMADDR = 0x00000020 /* Get permanent hardware address */ + ETHTOOL_GFLAGS = 0x00000025 /* Get flags bitmap(ethtool_value) */ + ETHTOOL_GPFLAGS = 0x00000027 /* Get driver-private flags bitmap */ + ETHTOOL_SPFLAGS = 0x00000028 /* Set driver-private flags bitmap */ + ETHTOOL_GSSET_INFO = 0x00000037 /* Get string set info */ + ETHTOOL_GFEATURES = 0x0000003a /* Get device offload settings */ + ETHTOOL_SFEATURES = 0x0000003b /* Change device offload settings */ + ETHTOOL_GCHANNELS = 0x0000003c /* Get no of channels */ + ETHTOOL_SCHANNELS = 0x0000003d /* Set no of channels */ + ETHTOOL_GET_TS_INFO = 0x00000041 /* Get time stamping and PHC info */ + ETHTOOL_GMODULEINFO = 0x00000042 /* Get plug-in module information */ + ETHTOOL_GMODULEEEPROM = 0x00000043 /* Get plug-in module eeprom */ +) + +// MAX_GSTRINGS maximum number of stats entries that ethtool can +// retrieve currently. +const ( + MAX_GSTRINGS = 32768 + MAX_FEATURE_BLOCKS = (MAX_GSTRINGS + 32 - 1) / 32 + EEPROM_LEN = 640 + PERMADDR_LEN = 32 +) + +// ethtool sset_info related constants +const ( + MAX_SSET_INFO = 64 +) + +var supportedCapabilities = []struct { + name string + mask uint64 + speed uint64 +}{ + {"10baseT_Half", unix.ETHTOOL_LINK_MODE_10baseT_Half_BIT, 10_000_000}, + {"10baseT_Full", unix.ETHTOOL_LINK_MODE_10baseT_Full_BIT, 10_000_000}, + {"100baseT_Half", unix.ETHTOOL_LINK_MODE_100baseT_Half_BIT, 100_000_000}, + {"100baseT_Full", unix.ETHTOOL_LINK_MODE_100baseT_Full_BIT, 100_000_000}, + {"1000baseT_Half", unix.ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 1_000_000_000}, + {"1000baseT_Full", unix.ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 1_000_000_000}, + {"10000baseT_Full", unix.ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 10_000_000_000}, + {"2500baseT_Full", unix.ETHTOOL_LINK_MODE_2500baseT_Full_BIT, 2_500_000_000}, + {"1000baseKX_Full", unix.ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 1_000_000_000}, + {"10000baseKX_Full", unix.ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 10_000_000_000}, + {"10000baseKR_Full", unix.ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 10_000_000_000}, + {"10000baseR_FEC", unix.ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 10_000_000_000}, + {"20000baseMLD2_Full", unix.ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, 20_000_000_000}, + {"20000baseKR2_Full", unix.ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 20_000_000_000}, + {"40000baseKR4_Full", unix.ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 40_000_000_000}, + {"40000baseCR4_Full", unix.ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 40_000_000_000}, + {"40000baseSR4_Full", unix.ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 40_000_000_000}, + {"40000baseLR4_Full", unix.ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 40_000_000_000}, + {"56000baseKR4_Full", unix.ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, 56_000_000_000}, + {"56000baseCR4_Full", unix.ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, 56_000_000_000}, + {"56000baseSR4_Full", unix.ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, 56_000_000_000}, + {"56000baseLR4_Full", unix.ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, 56_000_000_000}, + {"25000baseCR_Full", unix.ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 25_000_000_000}, +} + +type ifreq struct { + ifr_name [IFNAMSIZ]byte + ifr_data uintptr +} + +// following structures comes from uapi/linux/ethtool.h +type ethtoolSsetInfo struct { + cmd uint32 + reserved uint32 + sset_mask uint64 + data [MAX_SSET_INFO]uint32 +} + +type ethtoolGetFeaturesBlock struct { + available uint32 + requested uint32 + active uint32 + never_changed uint32 +} + +type ethtoolGfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock +} + +type ethtoolSetFeaturesBlock struct { + valid uint32 + requested uint32 +} + +type ethtoolSfeatures struct { + cmd uint32 + size uint32 + blocks [MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock +} + +type ethtoolDrvInfo struct { + cmd uint32 + driver [32]byte + version [32]byte + fw_version [32]byte + bus_info [32]byte + erom_version [32]byte + reserved2 [12]byte + n_priv_flags uint32 + n_stats uint32 + testinfo_len uint32 + eedump_len uint32 + regdump_len uint32 +} + +// DrvInfo contains driver information +// ethtool.h v3.5: struct ethtool_drvinfo +type DrvInfo struct { + Cmd uint32 + Driver string + Version string + FwVersion string + BusInfo string + EromVersion string + Reserved2 string + NPrivFlags uint32 + NStats uint32 + TestInfoLen uint32 + EedumpLen uint32 + RegdumpLen uint32 +} + +// Channels contains the number of channels for a given interface. +type Channels struct { + Cmd uint32 + MaxRx uint32 + MaxTx uint32 + MaxOther uint32 + MaxCombined uint32 + RxCount uint32 + TxCount uint32 + OtherCount uint32 + CombinedCount uint32 +} + +// Coalesce is a coalesce config for an interface +type Coalesce struct { + Cmd uint32 + RxCoalesceUsecs uint32 + RxMaxCoalescedFrames uint32 + RxCoalesceUsecsIrq uint32 + RxMaxCoalescedFramesIrq uint32 + TxCoalesceUsecs uint32 + TxMaxCoalescedFrames uint32 + TxCoalesceUsecsIrq uint32 + TxMaxCoalescedFramesIrq uint32 + StatsBlockCoalesceUsecs uint32 + UseAdaptiveRxCoalesce uint32 + UseAdaptiveTxCoalesce uint32 + PktRateLow uint32 + RxCoalesceUsecsLow uint32 + RxMaxCoalescedFramesLow uint32 + TxCoalesceUsecsLow uint32 + TxMaxCoalescedFramesLow uint32 + PktRateHigh uint32 + RxCoalesceUsecsHigh uint32 + RxMaxCoalescedFramesHigh uint32 + TxCoalesceUsecsHigh uint32 + TxMaxCoalescedFramesHigh uint32 + RateSampleInterval uint32 +} + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = (1 << 0) + SOF_TIMESTAMPING_TX_SOFTWARE = (1 << 1) + SOF_TIMESTAMPING_RX_HARDWARE = (1 << 2) + SOF_TIMESTAMPING_RX_SOFTWARE = (1 << 3) + SOF_TIMESTAMPING_SOFTWARE = (1 << 4) + SOF_TIMESTAMPING_SYS_HARDWARE = (1 << 5) + SOF_TIMESTAMPING_RAW_HARDWARE = (1 << 6) + SOF_TIMESTAMPING_OPT_ID = (1 << 7) + SOF_TIMESTAMPING_TX_SCHED = (1 << 8) + SOF_TIMESTAMPING_TX_ACK = (1 << 9) + SOF_TIMESTAMPING_OPT_CMSG = (1 << 10) + SOF_TIMESTAMPING_OPT_TSONLY = (1 << 11) + SOF_TIMESTAMPING_OPT_STATS = (1 << 12) + SOF_TIMESTAMPING_OPT_PKTINFO = (1 << 13) + SOF_TIMESTAMPING_OPT_TX_SWHW = (1 << 14) + SOF_TIMESTAMPING_BIND_PHC = (1 << 15) +) + +const ( + /* + * No outgoing packet will need hardware time stamping; + * should a packet arrive which asks for it, no hardware + * time stamping will be done. + */ + HWTSTAMP_TX_OFF = iota + + /* + * Enables hardware time stamping for outgoing packets; + * the sender of the packet decides which are to be + * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE + * before sending the packet. + */ + HWTSTAMP_TX_ON + + /* + * Enables time stamping for outgoing packets just as + * HWTSTAMP_TX_ON does, but also enables time stamp insertion + * directly into Sync packets. In this case, transmitted Sync + * packets will not received a time stamp via the socket error + * queue. + */ + HWTSTAMP_TX_ONESTEP_SYNC + + /* + * Same as HWTSTAMP_TX_ONESTEP_SYNC, but also enables time + * stamp insertion directly into PDelay_Resp packets. In this + * case, neither transmitted Sync nor PDelay_Resp packets will + * receive a time stamp via the socket error queue. + */ + HWTSTAMP_TX_ONESTEP_P2P +) + +const ( + HWTSTAMP_FILTER_NONE = iota /* time stamp no incoming packet at all */ + HWTSTAMP_FILTER_ALL /* time stamp any incoming packet */ + HWTSTAMP_FILTER_SOME /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_PTP_V1_L4_EVENT /* PTP v1, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V1_L4_SYNC /* PTP v1, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ /* PTP v1, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L4_EVENT /* PTP v2, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L4_SYNC /* PTP v2, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ /* PTP v2, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L2_EVENT /* 802.AS1, Ethernet, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L2_SYNC /* 802.AS1, Ethernet, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ /* 802.AS1, Ethernet, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_EVENT /* PTP v2/802.AS1, any layer, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_SYNC /* PTP v2/802.AS1, any layer, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ /* PTP v2/802.AS1, any layer, Delay_req packet */ + HWTSTAMP_FILTER_NTP_ALL /* NTP, UDP, all versions and packet modes */ +) + +type TimestampingInformation struct { + Cmd uint32 + SoTimestamping uint32 /* SOF_TIMESTAMPING_* bitmask */ + PhcIndex int32 + TxTypes uint32 /* HWTSTAMP_TX_* */ + txReserved [3]uint32 + RxFilters uint32 /* HWTSTAMP_FILTER_ */ + rxReserved [3]uint32 +} + +type ethtoolGStrings struct { + cmd uint32 + string_set uint32 + len uint32 + data [MAX_GSTRINGS * ETH_GSTRING_LEN]byte +} + +type ethtoolStats struct { + cmd uint32 + n_stats uint32 + data [MAX_GSTRINGS]uint64 +} + +type ethtoolEeprom struct { + cmd uint32 + magic uint32 + offset uint32 + len uint32 + data [EEPROM_LEN]byte +} + +type ethtoolModInfo struct { + cmd uint32 + tpe uint32 + eeprom_len uint32 + reserved [8]uint32 +} + +type ethtoolLink struct { + cmd uint32 + data uint32 +} + +type ethtoolPermAddr struct { + cmd uint32 + size uint32 + data [PERMADDR_LEN]byte +} + +// Ring is a ring config for an interface +type Ring struct { + Cmd uint32 + RxMaxPending uint32 + RxMiniMaxPending uint32 + RxJumboMaxPending uint32 + TxMaxPending uint32 + RxPending uint32 + RxMiniPending uint32 + RxJumboPending uint32 + TxPending uint32 +} + +// Pause is a pause config for an interface +type Pause struct { + Cmd uint32 + Autoneg uint32 + RxPause uint32 + TxPause uint32 +} + +type Ethtool struct { + fd int +} + +// Convert zero-terminated array of chars (string in C) to a Go string. +func goString(s []byte) string { + strEnd := bytes.IndexByte(s, 0) + if strEnd == -1 { + return string(s[:]) + } + return string(s[:strEnd]) +} + +// DriverName returns the driver name of the given interface name. +func (e *Ethtool) DriverName(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.driver[:]), nil +} + +// BusInfo returns the bus information of the given interface name. +func (e *Ethtool) BusInfo(intf string) (string, error) { + info, err := e.getDriverInfo(intf) + if err != nil { + return "", err + } + return goString(info.bus_info[:]), nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEeprom(intf string) ([]byte, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return nil, err + } + + return eeprom.data[:eeprom.len], nil +} + +// ModuleEeprom returns Eeprom information of the given interface name. +func (e *Ethtool) ModuleEepromHex(intf string) (string, error) { + eeprom, _, err := e.getModuleEeprom(intf) + if err != nil { + return "", err + } + + return hex.EncodeToString(eeprom.data[:eeprom.len]), nil +} + +// DriverInfo returns driver information of the given interface name. +func (e *Ethtool) DriverInfo(intf string) (DrvInfo, error) { + i, err := e.getDriverInfo(intf) + if err != nil { + return DrvInfo{}, err + } + + drvInfo := DrvInfo{ + Cmd: i.cmd, + Driver: goString(i.driver[:]), + Version: goString(i.version[:]), + FwVersion: goString(i.fw_version[:]), + BusInfo: goString(i.bus_info[:]), + EromVersion: goString(i.erom_version[:]), + Reserved2: goString(i.reserved2[:]), + NPrivFlags: i.n_priv_flags, + NStats: i.n_stats, + TestInfoLen: i.testinfo_len, + EedumpLen: i.eedump_len, + RegdumpLen: i.regdump_len, + } + + return drvInfo, nil +} + +// GetChannels returns the number of channels for the given interface name. +func (e *Ethtool) GetChannels(intf string) (Channels, error) { + channels, err := e.getChannels(intf) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// SetChannels sets the number of channels for the given interface name and +// returns the new number of channels. +func (e *Ethtool) SetChannels(intf string, channels Channels) (Channels, error) { + channels, err := e.setChannels(intf, channels) + if err != nil { + return Channels{}, err + } + + return channels, nil +} + +// GetCoalesce returns the coalesce config for the given interface name. +func (e *Ethtool) GetCoalesce(intf string) (Coalesce, error) { + coalesce, err := e.getCoalesce(intf) + if err != nil { + return Coalesce{}, err + } + return coalesce, nil +} + +// GetTimestampingInformation returns the PTP timestamping information for the given interface name. +func (e *Ethtool) GetTimestampingInformation(intf string) (TimestampingInformation, error) { + ts, err := e.getTimestampingInformation(intf) + if err != nil { + return TimestampingInformation{}, err + } + return ts, nil +} + +// PermAddr returns permanent address of the given interface name. +func (e *Ethtool) PermAddr(intf string) (string, error) { + permAddr, err := e.getPermAddr(intf) + if err != nil { + return "", err + } + + if permAddr.data[0] == 0 && permAddr.data[1] == 0 && + permAddr.data[2] == 0 && permAddr.data[3] == 0 && + permAddr.data[4] == 0 && permAddr.data[5] == 0 { + return "", nil + } + + return fmt.Sprintf("%x:%x:%x:%x:%x:%x", + permAddr.data[0:1], + permAddr.data[1:2], + permAddr.data[2:3], + permAddr.data[3:4], + permAddr.data[4:5], + permAddr.data[5:6], + ), nil +} + +func (e *Ethtool) ioctl(intf string, data uintptr) error { + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: data, + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return ep + } + + return nil +} + +func (e *Ethtool) getDriverInfo(intf string) (ethtoolDrvInfo, error) { + drvinfo := ethtoolDrvInfo{ + cmd: ETHTOOL_GDRVINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&drvinfo))); err != nil { + return ethtoolDrvInfo{}, err + } + + return drvinfo, nil +} + +func (e *Ethtool) getChannels(intf string) (Channels, error) { + channels := Channels{ + Cmd: ETHTOOL_GCHANNELS, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) setChannels(intf string, channels Channels) (Channels, error) { + channels.Cmd = ETHTOOL_SCHANNELS + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&channels))); err != nil { + return Channels{}, err + } + + return channels, nil +} + +func (e *Ethtool) getCoalesce(intf string) (Coalesce, error) { + coalesce := Coalesce{ + Cmd: ETHTOOL_GCOALESCE, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&coalesce))); err != nil { + return Coalesce{}, err + } + + return coalesce, nil +} + +func (e *Ethtool) getTimestampingInformation(intf string) (TimestampingInformation, error) { + ts := TimestampingInformation{ + Cmd: ETHTOOL_GET_TS_INFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ts))); err != nil { + return TimestampingInformation{}, err + } + + return ts, nil +} + +func (e *Ethtool) getPermAddr(intf string) (ethtoolPermAddr, error) { + permAddr := ethtoolPermAddr{ + cmd: ETHTOOL_GPERMADDR, + size: PERMADDR_LEN, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&permAddr))); err != nil { + return ethtoolPermAddr{}, err + } + + return permAddr, nil +} + +func (e *Ethtool) getModuleEeprom(intf string) (ethtoolEeprom, ethtoolModInfo, error) { + modInfo := ethtoolModInfo{ + cmd: ETHTOOL_GMODULEINFO, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&modInfo))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + eeprom := ethtoolEeprom{ + cmd: ETHTOOL_GMODULEEEPROM, + len: modInfo.eeprom_len, + offset: 0, + } + + if modInfo.eeprom_len > EEPROM_LEN { + return ethtoolEeprom{}, ethtoolModInfo{}, fmt.Errorf("eeprom size: %d is larger than buffer size: %d", modInfo.eeprom_len, EEPROM_LEN) + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&eeprom))); err != nil { + return ethtoolEeprom{}, ethtoolModInfo{}, err + } + + return eeprom, modInfo, nil +} + +// GetRing retrieves ring parameters of the given interface name. +func (e *Ethtool) GetRing(intf string) (Ring, error) { + ring := Ring{ + Cmd: ETHTOOL_GRINGPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// SetRing sets ring parameters of the given interface name. +func (e *Ethtool) SetRing(intf string, ring Ring) (Ring, error) { + ring.Cmd = ETHTOOL_SRINGPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ring))); err != nil { + return Ring{}, err + } + + return ring, nil +} + +// GetPause retrieves pause parameters of the given interface name. +func (e *Ethtool) GetPause(intf string) (Pause, error) { + pause := Pause{ + Cmd: ETHTOOL_GPAUSEPARAM, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +// SetPause sets pause parameters of the given interface name. +func (e *Ethtool) SetPause(intf string, pause Pause) (Pause, error) { + pause.Cmd = ETHTOOL_SPAUSEPARAM + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&pause))); err != nil { + return Pause{}, err + } + + return pause, nil +} + +func isFeatureBitSet(blocks [MAX_FEATURE_BLOCKS]ethtoolGetFeaturesBlock, index uint) bool { + return (blocks)[index/32].active&(1<<(index%32)) != 0 +} + +func setFeatureBit(blocks *[MAX_FEATURE_BLOCKS]ethtoolSetFeaturesBlock, index uint, value bool) { + blockIndex, bitIndex := index/32, index%32 + + blocks[blockIndex].valid |= 1 << bitIndex + + if value { + blocks[blockIndex].requested |= 1 << bitIndex + } else { + blocks[blockIndex].requested &= ^(1 << bitIndex) + } +} + +func (e *Ethtool) getNames(intf string, mask int) (map[string]uint, error) { + ssetInfo := ethtoolSsetInfo{ + cmd: ETHTOOL_GSSET_INFO, + sset_mask: 1 << mask, + data: [MAX_SSET_INFO]uint32{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&ssetInfo))); err != nil { + return nil, err + } + + /* we only read data on first index because single bit was set in sset_mask(0x10) */ + length := ssetInfo.data[0] + if length == 0 { + return map[string]uint{}, nil + } else if length > MAX_GSTRINGS { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, length) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: uint32(mask), + len: length, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + result := make(map[string]uint) + for i := 0; i != int(length); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + key := goString(b) + if key != "" { + result[key] = uint(i) + } + } + + return result, nil +} + +// FeatureNames shows supported features by their name. +func (e *Ethtool) FeatureNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_FEATURES) +} + +// Features retrieves features of the given interface name. +func (e *Ethtool) Features(intf string) (map[string]bool, error) { + names, err := e.FeatureNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + features := ethtoolGfeatures{ + cmd: ETHTOOL_GFEATURES, + size: (length + 32 - 1) / 32, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&features))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for key, index := range names { + result[key] = isFeatureBitSet(features.blocks, index) + } + + return result, nil +} + +// Change requests a change in the given device's features. +func (e *Ethtool) Change(intf string, config map[string]bool) error { + names, err := e.FeatureNames(intf) + if err != nil { + return err + } + + length := uint32(len(names)) + + features := ethtoolSfeatures{ + cmd: ETHTOOL_SFEATURES, + size: (length + 32 - 1) / 32, + } + + for key, value := range config { + if index, ok := names[key]; ok { + setFeatureBit(&features.blocks, index, value) + } else { + return fmt.Errorf("unsupported feature %q", key) + } + } + + return e.ioctl(intf, uintptr(unsafe.Pointer(&features))) +} + +// PrivFlagsNames shows supported private flags by their name. +func (e *Ethtool) PrivFlagsNames(intf string) (map[string]uint, error) { + return e.getNames(intf, ETH_SS_PRIV_FLAGS) +} + +// PrivFlags retrieves private flags of the given interface name. +func (e *Ethtool) PrivFlags(intf string) (map[string]bool, error) { + names, err := e.PrivFlagsNames(intf) + if err != nil { + return nil, err + } + + length := uint32(len(names)) + if length == 0 { + return map[string]bool{}, nil + } + + var val ethtoolLink + val.cmd = ETHTOOL_GPFLAGS + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&val))); err != nil { + return nil, err + } + + result := make(map[string]bool, length) + for name, mask := range names { + result[name] = val.data&(1< MAX_GSTRINGS*ETH_GSTRING_LEN { + return nil, fmt.Errorf("ethtool currently doesn't support more than %d entries, received %d", MAX_GSTRINGS, drvinfo.n_stats) + } + + gstrings := ethtoolGStrings{ + cmd: ETHTOOL_GSTRINGS, + string_set: ETH_SS_STATS, + len: drvinfo.n_stats, + data: [MAX_GSTRINGS * ETH_GSTRING_LEN]byte{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&gstrings))); err != nil { + return nil, err + } + + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + n_stats: drvinfo.n_stats, + data: [MAX_GSTRINGS]uint64{}, + } + + if err := e.ioctl(intf, uintptr(unsafe.Pointer(&stats))); err != nil { + return nil, err + } + + result := make(map[string]uint64) + for i := 0; i != int(drvinfo.n_stats); i++ { + b := gstrings.data[i*ETH_GSTRING_LEN : i*ETH_GSTRING_LEN+ETH_GSTRING_LEN] + strEnd := strings.Index(string(b), "\x00") + if strEnd == -1 { + strEnd = ETH_GSTRING_LEN + } + key := string(b[:strEnd]) + if len(key) != 0 { + result[key] = stats.data[i] + } + } + + return result, nil +} + +// Close closes the ethool handler +func (e *Ethtool) Close() { + unix.Close(e.fd) +} + +// NewEthtool returns a new ethtool handler +func NewEthtool() (*Ethtool, error) { + fd, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP) + if err != nil { + return nil, err + } + + return &Ethtool{ + fd: int(fd), + }, nil +} + +// BusInfo returns bus information of the given interface name. +func BusInfo(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.BusInfo(intf) +} + +// DriverName returns the driver name of the given interface name. +func DriverName(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.DriverName(intf) +} + +// Stats retrieves stats of the given interface name. +func Stats(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.Stats(intf) +} + +// PermAddr returns permanent address of the given interface name. +func PermAddr(intf string) (string, error) { + e, err := NewEthtool() + if err != nil { + return "", err + } + defer e.Close() + return e.PermAddr(intf) +} + +func supportedSpeeds(mask uint64) (ret []struct { + name string + mask uint64 + speed uint64 +}) { + for _, mode := range supportedCapabilities { + if ((1 << mode.mask) & mask) != 0 { + ret = append(ret, mode) + } + } + return ret +} + +// SupportedLinkModes returns the names of the link modes supported by the interface. +func SupportedLinkModes(mask uint64) []string { + var ret []string + for _, mode := range supportedSpeeds(mask) { + ret = append(ret, mode.name) + } + return ret +} + +// SupportedSpeed returns the maximum capacity of this interface. +func SupportedSpeed(mask uint64) uint64 { + var ret uint64 + for _, mode := range supportedSpeeds(mask) { + if mode.speed > ret { + ret = mode.speed + } + } + return ret +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_cmd.go b/vendor/github.com/safchain/ethtool/ethtool_cmd.go new file mode 100644 index 000000000..e94d6dd89 --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_cmd.go @@ -0,0 +1,208 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "math" + "reflect" + "unsafe" + + "golang.org/x/sys/unix" +) + +type EthtoolCmd struct { /* ethtool.c: struct ethtool_cmd */ + Cmd uint32 + Supported uint32 + Advertising uint32 + Speed uint16 + Duplex uint8 + Port uint8 + Phy_address uint8 + Transceiver uint8 + Autoneg uint8 + Mdio_support uint8 + Maxtxpkt uint32 + Maxrxpkt uint32 + Speed_hi uint16 + Eth_tp_mdix uint8 + Reserved2 uint8 + Lp_advertising uint32 + Reserved [2]uint32 +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdGet(ecmd, intf) +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (ecmd *EthtoolCmd) CmdSet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.CmdSet(ecmd, intf) +} + +func (f *EthtoolCmd) reflect(retv *map[string]uint64) { + val := reflect.ValueOf(f).Elem() + + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + + t := valueField.Interface() + // tt := reflect.TypeOf(t) + // fmt.Printf(" t %T %v tt %T %v\n", t, t, tt, tt) + switch tt := t.(type) { + case uint32: + // fmt.Printf(" t is uint32\n") + (*retv)[typeField.Name] = uint64(tt) + case uint16: + (*retv)[typeField.Name] = uint64(tt) + case uint8: + (*retv)[typeField.Name] = uint64(tt) + case int32: + (*retv)[typeField.Name] = uint64(tt) + case int16: + (*retv)[typeField.Name] = uint64(tt) + case int8: + (*retv)[typeField.Name] = uint64(tt) + default: + (*retv)[typeField.Name+"_unknown_type"] = 0 + } + + // tag := typeField.Tag + // fmt.Printf("Field Name: %s,\t Field Value: %v,\t Tag Value: %s\n", + // typeField.Name, valueField.Interface(), tag.Get("tag_name")) + } +} + +// CmdGet returns the interface settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdGet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_GSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdSet sets and returns the settings in the receiver struct +// and returns speed +func (e *Ethtool) CmdSet(ecmd *EthtoolCmd, intf string) (uint32, error) { + ecmd.Cmd = ETHTOOL_SSET + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, unix.Errno(ep) + } + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + if speedval == math.MaxUint16 { + speedval = math.MaxUint32 + } + + return speedval, nil +} + +// CmdGetMapped returns the interface settings in a map +func (e *Ethtool) CmdGetMapped(intf string) (map[string]uint64, error) { + ecmd := EthtoolCmd{ + Cmd: ETHTOOL_GSET, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&ecmd)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return nil, ep + } + + result := make(map[string]uint64) + + // ref https://gist.github.com/drewolson/4771479 + // Golang Reflection Example + ecmd.reflect(&result) + + var speedval uint32 = (uint32(ecmd.Speed_hi) << 16) | + (uint32(ecmd.Speed) & 0xffff) + result["speed"] = uint64(speedval) + + return result, nil +} + +func CmdGetMapped(intf string) (map[string]uint64, error) { + e, err := NewEthtool() + if err != nil { + return nil, err + } + defer e.Close() + return e.CmdGetMapped(intf) +} diff --git a/vendor/github.com/safchain/ethtool/ethtool_msglvl.go b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go new file mode 100644 index 000000000..1f6e338cf --- /dev/null +++ b/vendor/github.com/safchain/ethtool/ethtool_msglvl.go @@ -0,0 +1,114 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +// Package ethtool aims to provide a library giving a simple access to the +// Linux SIOCETHTOOL ioctl operations. It can be used to retrieve informations +// from a network device like statistics, driver related informations or +// even the peer of a VETH interface. +package ethtool + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type ethtoolValue struct { /* ethtool.c: struct ethtool_value */ + cmd uint32 + data uint32 +} + +// MsglvlGet returns the msglvl of the given interface. +func (e *Ethtool) MsglvlGet(intf string) (uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, ep + } + + return edata.data, nil +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func (e *Ethtool) MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + edata := ethtoolValue{ + cmd: ETHTOOL_GMSGLVL, + } + + var name [IFNAMSIZ]byte + copy(name[:], []byte(intf)) + + ifr := ifreq{ + ifr_name: name, + ifr_data: uintptr(unsafe.Pointer(&edata)), + } + + _, _, ep := unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + readval := edata.data + + edata.cmd = ETHTOOL_SMSGLVL + edata.data = valset + + _, _, ep = unix.Syscall(unix.SYS_IOCTL, uintptr(e.fd), + SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))) + if ep != 0 { + return 0, 0, ep + } + + return readval, edata.data, nil +} + +// MsglvlGet returns the msglvl of the given interface. +func MsglvlGet(intf string) (uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, err + } + defer e.Close() + return e.MsglvlGet(intf) +} + +// MsglvlSet returns the read-msglvl, post-set-msglvl of the given interface. +func MsglvlSet(intf string, valset uint32) (uint32, uint32, error) { + e, err := NewEthtool() + if err != nil { + return 0, 0, err + } + defer e.Close() + return e.MsglvlSet(intf, valset) +} diff --git a/vendor/github.com/segmentio/kafka-go/.gitattributes b/vendor/github.com/segmentio/kafka-go/.gitattributes new file mode 100644 index 000000000..0cf33618e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.gitattributes @@ -0,0 +1 @@ +fixtures/*.hex binary diff --git a/vendor/github.com/segmentio/kafka-go/.gitignore b/vendor/github.com/segmentio/kafka-go/.gitignore new file mode 100644 index 000000000..f8b4085e3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.gitignore @@ -0,0 +1,40 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/kafkacli + +# Emacs +*~ + +# VIM +*.swp + +# Goland +.idea + +#IntelliJ +*.iml + +# govendor +/vendor/*/ diff --git a/vendor/github.com/segmentio/kafka-go/.golangci.yml b/vendor/github.com/segmentio/kafka-go/.golangci.yml new file mode 100644 index 000000000..040910ab6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.golangci.yml @@ -0,0 +1,18 @@ +linters: + enable: + - bodyclose + - errorlint + - goconst + - godot + - gofmt + - goimports + - prealloc + + disable: + # Temporarily disabling so it can be addressed in a dedicated PR. + - errcheck + - goerr113 + +linters-settings: + goconst: + ignore-tests: true diff --git a/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md b/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..359d39b9d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +Project maintainers are available at [#kafka-go](https://gophers.slack.com/archives/CG4H0N9PX) channel inside the [Gophers Slack](https://gophers.slack.com) + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at open-source@twilio.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md b/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md new file mode 100644 index 000000000..a52ad6e91 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md @@ -0,0 +1,139 @@ +# Contributing to kafka-go + +kafka-go is an open source project. We welcome contributions to kafka-go of any kind including documentation, +organization, tutorials, bug reports, issues, feature requests, feature implementations, pull requests, etc. + +## Table of Contents + +* [Reporting Issues](#reporting-issues) +* [Submitting Patches](#submitting-patches) + * [Code Contribution Guidelines](#code-contribution-guidelines) + * [Git Commit Message Guidelines](#git-commit-message-guidelines) + * [Fetching the Source From GitHub](#fetching-the-sources-from-github) + * [Building kafka-go with Your Changes](#building-kakfa-go-with-your-changes) + +## Reporting Issues + +If you believe you have found a defect in kafka-go, use the GitHub issue tracker to report +the problem to the maintainers. +When reporting the issue, please provide the version of kafka-go, what version(s) of Kafka +are you testing against, and your operating system. + + - [kafka-go Issues segmentio/kafka-go](https://github.com/segmentio/kafka-go/issues) + +## Submitting Patches + +kafka-go project welcomes all contributors and contributions regardless of skill or experience levels. If you are +interested in helping with the project, we will help you with your contribution. + +### Code Contribution + +To make contributions as seamless as possible, we ask the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to allow for review and discussion of code changes. +* When you’re ready to create a pull request, be sure to: + * Have test cases for the new code. If you have questions about how to do this, please ask in your pull request. + * Run `go fmt`. + * Squash your commits into a single commit. `git rebase -i`. It’s okay to force update your pull request with `git push -f`. + * Follow the **Git Commit Message Guidelines** below. + +### Git Commit Message Guidelines + +This [blog article](http://chris.beams.io/posts/git-commit/) is a good resource for learning how to write good commit messages, +the most important part being that each commit message should have a title/subject in imperative mood starting with a capital letter and no trailing period: +*"Return error on wrong use of the Reader"*, **NOT** *"returning some error."* + +Also, if your commit references one or more GitHub issues, always end your commit message body with *See #1234* or *Fixes #1234*. +Replace *1234* with the GitHub issue ID. The last example will close the issue when the commit is merged into *master*. + +Please use a short and descriptive branch name, e.g. NOT "patch-1". It's very common but creates a naming conflict each +time when a submission is pulled for a review. + +An example: + +```text +Add Code of Conduct and Code Contribution Guidelines + +Add a full Code of Conduct and Code Contribution Guidelines document. +Provide description on how best to retrieve code, fork, checkout, and commit changes. + +Fixes #688 +``` + +### Fetching the Sources From GitHub + +We use Go Modules support built into Go 1.11 to build. The easiest way is to clone kafka-go into a directory outside of +`GOPATH`, as in the following example: + +```bash +mkdir $HOME/src +cd $HOME/src +git clone https://github.com/segmentio/kafka-go.git +cd kafka-go +go build ./... +``` + +To make changes to kafka-go's source: + +1. Create a new branch for your changes (the branch name is arbitrary): + + ```bash + git checkout -b branch1234 + ``` + +1. After making your changes, commit them to your new branch: + + ```bash + git commit -a -v + ``` + +1. Fork kafka-go in GitHub + +1. Add your fork as a new remote (the remote name, "upstream" in this example, is arbitrary): + + ```bash + git remote add upstream git@github.com:USERNAME/kafka-go.git + ``` + +1. Push your branch (the remote name, "upstream" in this example, is arbitrary): + + ```bash + git push upstream + ``` + +1. You are now ready to submit a PR based upon the new branch in your forked repository. + +### Using the forked library + +To replace the original version of kafka-go library with a forked version is accomplished this way. + +1. Make sure your application already has a go.mod entry depending on kafka-go + + ```bash + module github.com/myusername/myapp + + require ( + ... + github.com/segmentio/kafka-go v1.2.3 + ... + ) + ``` + +1. Add the following entry to the beginning of the modules file. + + ```bash + module github.com/myusername/myapp + + replace github.com/segmentio/kafka-go v1.2.3 => ../local/directory + + require ( + ... + github.com/segmentio/kafka-go v1.2.3 + ... + ) + ``` +1. Depending on if you are using `vendor`ing or not you might need to run the following command to pull in the new bits. + + ```bash + > go mod vendor + ``` diff --git a/vendor/github.com/segmentio/kafka-go/LICENSE b/vendor/github.com/segmentio/kafka-go/LICENSE new file mode 100644 index 000000000..09e136c51 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/kafka-go/Makefile b/vendor/github.com/segmentio/kafka-go/Makefile new file mode 100644 index 000000000..e2374f2e3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/Makefile @@ -0,0 +1,7 @@ +test: + KAFKA_SKIP_NETTEST=1 \ + KAFKA_VERSION=2.3.1 \ + go test -race -cover ./... + +docker: + docker-compose up -d diff --git a/vendor/github.com/segmentio/kafka-go/README.md b/vendor/github.com/segmentio/kafka-go/README.md new file mode 100644 index 000000000..e17878825 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/README.md @@ -0,0 +1,799 @@ +# kafka-go [![CircleCI](https://circleci.com/gh/segmentio/kafka-go.svg?style=shield)](https://circleci.com/gh/segmentio/kafka-go) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/kafka-go)](https://goreportcard.com/report/github.com/segmentio/kafka-go) [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go) + +## Motivations + +We rely on both Go and Kafka a lot at Segment. Unfortunately, the state of the Go +client libraries for Kafka at the time of this writing was not ideal. The available +options were: + +- [sarama](https://github.com/Shopify/sarama), which is by far the most popular +but is quite difficult to work with. It is poorly documented, the API exposes +low level concepts of the Kafka protocol, and it doesn't support recent Go features +like [contexts](https://golang.org/pkg/context/). It also passes all values as +pointers which causes large numbers of dynamic memory allocations, more frequent +garbage collections, and higher memory usage. + +- [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) is a +cgo based wrapper around [librdkafka](https://github.com/edenhill/librdkafka), +which means it introduces a dependency to a C library on all Go code that uses +the package. It has much better documentation than sarama but still lacks support +for Go contexts. + +- [goka](https://github.com/lovoo/goka) is a more recent Kafka client for Go +which focuses on a specific usage pattern. It provides abstractions for using Kafka +as a message passing bus between services rather than an ordered log of events, but +this is not the typical use case of Kafka for us at Segment. The package also +depends on sarama for all interactions with Kafka. + +This is where `kafka-go` comes into play. It provides both low and high level +APIs for interacting with Kafka, mirroring concepts and implementing interfaces of +the Go standard library to make it easy to use and integrate with existing +software. + +#### Note: + +In order to better align with our newly adopted Code of Conduct, the kafka-go +project has renamed our default branch to `main`. For the full details of our +Code Of Conduct see [this](./CODE_OF_CONDUCT.md) document. + +## Kafka versions + +`kafka-go` is currently tested with Kafka versions 0.10.1.0 to 2.7.1. +While it should also be compatible with later versions, newer features available +in the Kafka API may not yet be implemented in the client. + +## Go versions + +`kafka-go` requires Go version 1.15 or later. + +## Connection [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Conn) + +The `Conn` type is the core of the `kafka-go` package. It wraps around a raw +network connection to expose a low-level API to a Kafka server. + +Here are some examples showing typical use of a connection object: +```go +// to produce messages +topic := "my-topic" +partition := 0 + +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) +if err != nil { + log.Fatal("failed to dial leader:", err) +} + +conn.SetWriteDeadline(time.Now().Add(10*time.Second)) +_, err = conn.WriteMessages( + kafka.Message{Value: []byte("one!")}, + kafka.Message{Value: []byte("two!")}, + kafka.Message{Value: []byte("three!")}, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := conn.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` +```go +// to consume messages +topic := "my-topic" +partition := 0 + +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) +if err != nil { + log.Fatal("failed to dial leader:", err) +} + +conn.SetReadDeadline(time.Now().Add(10*time.Second)) +batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max + +b := make([]byte, 10e3) // 10KB max per message +for { + n, err := batch.Read(b) + if err != nil { + break + } + fmt.Println(string(b[:n])) +} + +if err := batch.Close(); err != nil { + log.Fatal("failed to close batch:", err) +} + +if err := conn.Close(); err != nil { + log.Fatal("failed to close connection:", err) +} +``` + +### To Create Topics +By default kafka has the `auto.create.topics.enable='true'` (`KAFKA_AUTO_CREATE_TOPICS_ENABLE='true'` in the wurstmeister/kafka kafka docker image). If this value is set to `'true'` then topics will be created as a side effect of `kafka.DialLeader` like so: +```go +// to create topics when auto.create.topics.enable='true' +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", "my-topic", 0) +if err != nil { + panic(err.Error()) +} +``` + +If `auto.create.topics.enable='false'` then you will need to create topics explicitly like so: +```go +// to create topics when auto.create.topics.enable='false' +topic := "my-topic" + +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() + +controller, err := conn.Controller() +if err != nil { + panic(err.Error()) +} +var controllerConn *kafka.Conn +controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) +if err != nil { + panic(err.Error()) +} +defer controllerConn.Close() + + +topicConfigs := []kafka.TopicConfig{ + { + Topic: topic, + NumPartitions: 1, + ReplicationFactor: 1, + }, +} + +err = controllerConn.CreateTopics(topicConfigs...) +if err != nil { + panic(err.Error()) +} +``` + +### To Connect To Leader Via a Non-leader Connection +```go +// to connect to the kafka leader via an existing non-leader connection rather than using DialLeader +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() +controller, err := conn.Controller() +if err != nil { + panic(err.Error()) +} +var connLeader *kafka.Conn +connLeader, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) +if err != nil { + panic(err.Error()) +} +defer connLeader.Close() +``` + +### To list topics +```go +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() + +partitions, err := conn.ReadPartitions() +if err != nil { + panic(err.Error()) +} + +m := map[string]struct{}{} + +for _, p := range partitions { + m[p.Topic] = struct{}{} +} +for k := range m { + fmt.Println(k) +} +``` + + +Because it is low level, the `Conn` type turns out to be a great building block +for higher level abstractions, like the `Reader` for example. + +## Reader [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Reader) + +A `Reader` is another concept exposed by the `kafka-go` package, which intends +to make it simpler to implement the typical use case of consuming from a single +topic-partition pair. +A `Reader` also automatically handles reconnections and offset management, and +exposes an API that supports asynchronous cancellations and timeouts using Go +contexts. + +Note that it is important to call `Close()` on a `Reader` when a process exits. +The kafka server needs a graceful disconnect to stop it from continuing to +attempt to send messages to the connected clients. The given example will not +call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or +SIGTERM (as docker stop or a kubernetes restart does). This can result in a +delay when a new reader on the same topic connects (e.g. new process started +or new container running). Use a `signal.Notify` handler to close the reader on +process shutdown. + +```go +// make a new reader that consumes from topic-A, partition 0, at offset 42 +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"}, + Topic: "topic-A", + Partition: 0, + MaxBytes: 10e6, // 10MB +}) +r.SetOffset(42) + +for { + m, err := r.ReadMessage(context.Background()) + if err != nil { + break + } + fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + +### Consumer Groups + +```kafka-go``` also supports Kafka consumer groups including broker managed offsets. +To enable consumer groups, simply specify the GroupID in the ReaderConfig. + +ReadMessage automatically commits offsets when using consumer groups. + +```go +// make a new reader that consumes from topic-A +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + MaxBytes: 10e6, // 10MB +}) + +for { + m, err := r.ReadMessage(context.Background()) + if err != nil { + break + } + fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + +There are a number of limitations when using consumer groups: + +* ```(*Reader).SetOffset``` will return an error when GroupID is set +* ```(*Reader).Offset``` will always return ```-1``` when GroupID is set +* ```(*Reader).Lag``` will always return ```-1``` when GroupID is set +* ```(*Reader).ReadLag``` will return an error when GroupID is set +* ```(*Reader).Stats``` will return a partition of ```-1``` when GroupID is set + +### Explicit Commits + +```kafka-go``` also supports explicit commits. Instead of calling ```ReadMessage```, +call ```FetchMessage``` followed by ```CommitMessages```. + +```go +ctx := context.Background() +for { + m, err := r.FetchMessage(ctx) + if err != nil { + break + } + fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) + if err := r.CommitMessages(ctx, m); err != nil { + log.Fatal("failed to commit messages:", err) + } +} +``` + +When committing messages in consumer groups, the message with the highest offset +for a given topic/partition determines the value of the committed offset for +that partition. For example, if messages at offset 1, 2, and 3 of a single +partition were retrieved by call to `FetchMessage`, calling `CommitMessages` +with message offset 3 will also result in committing the messages at offsets 1 +and 2 for that partition. + +### Managing Commits + +By default, CommitMessages will synchronously commit offsets to Kafka. For +improved performance, you can instead periodically commit offsets to Kafka +by setting CommitInterval on the ReaderConfig. + + +```go +// make a new reader that consumes from topic-A +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + MaxBytes: 10e6, // 10MB + CommitInterval: time.Second, // flushes commits to Kafka every second +}) +``` + +## Writer [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Writer) + +To produce messages to Kafka, a program may use the low-level `Conn` API, but +the package also provides a higher level `Writer` type which is more appropriate +to use in most cases as it provides additional features: + +- Automatic retries and reconnections on errors. +- Configurable distribution of messages across available partitions. +- Synchronous or asynchronous writes of messages to Kafka. +- Asynchronous cancellation using contexts. +- Flushing of pending messages on close to support graceful shutdowns. +- Creation of a missing topic before publishing a message. *Note!* it was the default behaviour up to the version `v0.4.30`. + +```go +// make a writer that produces to topic-A, using the least-bytes distribution +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.LeastBytes{}, +} + +err := w.WriteMessages(context.Background(), + kafka.Message{ + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + kafka.Message{ + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + kafka.Message{ + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +### Missing topic creation before publication + +```go +// Make a writer that publishes messages to topic-A. +// The topic will be created if it is missing. +w := &Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + AllowAutoTopicCreation: true, +} + +messages := []kafka.Message{ + { + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + { + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + { + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +} + +var err error +const retries = 3 +for i := 0; i < retries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // attempt to create topic prior to publishing the message + err = w.WriteMessages(ctx, messages...) + if errors.Is(err, kafka.LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) { + time.Sleep(time.Millisecond * 250) + continue + } + + if err != nil { + log.Fatalf("unexpected error %v", err) + } + break +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +### Writing to multiple topics + +Normally, the `WriterConfig.Topic` is used to initialize a single-topic writer. +By excluding that particular configuration, you are given the ability to define +the topic on a per-message basis by setting `Message.Topic`. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + // NOTE: When Topic is not defined here, each Message must define it instead. + Balancer: &kafka.LeastBytes{}, +} + +err := w.WriteMessages(context.Background(), + // NOTE: Each Message has Topic defined, otherwise an error is returned. + kafka.Message{ + Topic: "topic-A", + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + kafka.Message{ + Topic: "topic-B", + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + kafka.Message{ + Topic: "topic-C", + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +**NOTE:** These 2 patterns are mutually exclusive, if you set `Writer.Topic`, +you must not also explicitly define `Message.Topic` on the messages you are +writing. The opposite applies when you do not define a topic for the writer. +The `Writer` will return an error if it detects this ambiguity. + +### Compatibility with other clients + +#### Sarama + +If you're switching from Sarama and need/want to use the same algorithm for message partitioning, you can either use +the `kafka.Hash` balancer or the `kafka.ReferenceHash` balancer: +* `kafka.Hash` = `sarama.NewHashPartitioner` +* `kafka.ReferenceHash` = `sarama.NewReferenceHashPartitioner` + +The `kafka.Hash` and `kafka.ReferenceHash` balancers would route messages to the same partitions that the two +aforementioned Sarama partitioners would route them to. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, +} +``` + +#### librdkafka and confluent-kafka-go + +Use the ```kafka.CRC32Balancer``` balancer to get the same behaviour as librdkafka's +default ```consistent_random``` partition strategy. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: kafka.CRC32Balancer{}, +} +``` + +#### Java + +Use the ```kafka.Murmur2Balancer``` balancer to get the same behaviour as the canonical +Java client's default partitioner. Note: the Java class allows you to directly specify +the partition which is not permitted. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: kafka.Murmur2Balancer{}, +} +``` + +### Compression + +Compression can be enabled on the `Writer` by setting the `Compression` field: + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Compression: kafka.Snappy, +} +``` + +The `Reader` will by determine if the consumed messages are compressed by +examining the message attributes. However, the package(s) for all expected +codecs must be imported so that they get loaded correctly. + +_Note: in versions prior to 0.4 programs had to import compression packages to +install codecs and support reading compressed messages from kafka. This is no +longer the case and import of the compression packages are now no-ops._ + +## TLS Support + +For a bare bones Conn type or in the Reader/Writer configs you can specify a dialer option for TLS support. If the TLS field is nil, it will not connect with TLS. +*Note:* Connecting to a Kafka cluster with TLS enabled without configuring TLS on the Conn/Reader/Writer can manifest in opaque io.ErrUnexpectedEOF errors. + + +### Connection + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093") +``` + +### Reader + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + Dialer: dialer, +}) +``` + +### Writer + + +Direct Writer creation + +```go +w := kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Transport: &kafka.Transport{ + TLS: &tls.Config{}, + }, + } +``` + +Using `kafka.NewWriter` + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +w := kafka.NewWriter(kafka.WriterConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Dialer: dialer, +}) +``` +Note that `kafka.NewWriter` and `kafka.WriterConfig` are deprecated and will be removed in a future release. + +## SASL Support + +You can specify an option on the `Dialer` to use SASL authentication. The `Dialer` can be used directly to open a `Conn` or it can be passed to a `Reader` or `Writer` via their respective configs. If the `SASLMechanism` field is `nil`, it will not authenticate with SASL. + +### SASL Authentication Types + +#### [Plain](https://godoc.org/github.com/segmentio/kafka-go/sasl/plain#Mechanism) +```go +mechanism := plain.Mechanism{ + Username: "username", + Password: "password", +} +``` + +#### [SCRAM](https://godoc.org/github.com/segmentio/kafka-go/sasl/scram#Mechanism) +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} +``` + +### Connection + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + SASLMechanism: mechanism, +} + +conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093") +``` + + +### Reader + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + SASLMechanism: mechanism, +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + Dialer: dialer, +}) +``` + +### Writer + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +// Transports are responsible for managing connection pools and other resources, +// it's generally best to create a few of these and share them across your +// application. +sharedTransport := &kafka.Transport{ + SASL: mechanism, +} + +w := kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Transport: sharedTransport, +} +``` + +### Client + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +// Transports are responsible for managing connection pools and other resources, +// it's generally best to create a few of these and share them across your +// application. +sharedTransport := &kafka.Transport{ + SASL: mechanism, +} + +client := &kafka.Client{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Timeout: 10 * time.Second, + Transport: sharedTransport, +} +``` + +#### Reading all messages within a time range + +```go +startTime := time.Now().Add(-time.Hour) +endTime := time.Now() +batchSize := int(10e6) // 10MB + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "my-topic1", + Partition: 0, + MaxBytes: batchSize, +}) + +r.SetOffsetAt(context.Background(), startTime) + +for { + m, err := r.ReadMessage(context.Background()) + + if err != nil { + break + } + if m.Time.After(endTime) { + break + } + // TODO: process message + fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + + +## Logging + +For visiblity into the operations of the Reader/Writer types, configure a logger on creation. + + +### Reader + +```go +func logf(msg string, a ...interface{}) { + fmt.Printf(msg, a...) + fmt.Println() +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "my-topic1", + Partition: 0, + Logger: kafka.LoggerFunc(logf), + ErrorLogger: kafka.LoggerFunc(logf), +}) +``` + +### Writer + +```go +func logf(msg string, a ...interface{}) { + fmt.Printf(msg, a...) + fmt.Println() +} + +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092"), + Topic: "topic", + Logger: kafka.LoggerFunc(logf), + ErrorLogger: kafka.LoggerFunc(logf), +} +``` + + + +## Testing + +Subtle behavior changes in later Kafka versions have caused some historical tests to break, if you are running against Kafka 2.3.1 or later, exporting the `KAFKA_SKIP_NETTEST=1` environment variables will skip those tests. + +Run Kafka locally in docker + +```bash +docker-compose up -d +``` + +Run tests + +```bash +KAFKA_VERSION=2.3.1 \ + KAFKA_SKIP_NETTEST=1 \ + go test -race ./... +``` diff --git a/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go b/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go new file mode 100644 index 000000000..dd83edb3a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go @@ -0,0 +1,67 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/addoffsetstotxn" +) + +// AddOffsetsToTxnRequest is the request structure for the AddOffsetsToTxn function. +type AddOffsetsToTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key + TransactionalID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // The unique group identifier. + GroupID string +} + +// AddOffsetsToTxnResponse is the response structure for the AddOffsetsToTxn function. +type AddOffsetsToTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred when attempting to add the offsets + // to a transaction. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +// AddOffsetsToTnx sends an add offsets to txn request to a kafka broker and returns the response. +func (c *Client) AddOffsetsToTxn( + ctx context.Context, + req *AddOffsetsToTxnRequest, +) (*AddOffsetsToTxnResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &addoffsetstotxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + GroupID: req.GroupID, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AddOffsetsToTxn: %w", err) + } + + r := m.(*addoffsetstotxn.Response) + + res := &AddOffsetsToTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Error: makeError(r.ErrorCode, ""), + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go b/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go new file mode 100644 index 000000000..74792135e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go @@ -0,0 +1,108 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/addpartitionstotxn" +) + +// AddPartitionToTxn represents a partition to be added +// to a transaction. +type AddPartitionToTxn struct { + // Partition is the ID of a partition to add to the transaction. + Partition int +} + +// AddPartitionsToTxnRequest is the request structure fo the AddPartitionsToTxn function. +type AddPartitionsToTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key + TransactionalID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // Mappings of topic names to lists of partitions. + Topics map[string][]AddPartitionToTxn +} + +// AddPartitionsToTxnResponse is the response structure for the AddPartitionsToTxn function. +type AddPartitionsToTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mappings of topic names to partitions being added to a transactions. + Topics map[string][]AddPartitionToTxnPartition +} + +// AddPartitionToTxnPartition represents the state of a single partition +// in response to adding to a transaction. +type AddPartitionToTxnPartition struct { + // The ID of the partition. + Partition int + + // An error that may have occurred when attempting to add the partition + // to a transaction. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +// AddPartitionsToTnx sends an add partitions to txn request to a kafka broker and returns the response. +func (c *Client) AddPartitionsToTxn( + ctx context.Context, + req *AddPartitionsToTxnRequest, +) (*AddPartitionsToTxnResponse, error) { + protoReq := &addpartitionstotxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + } + protoReq.Topics = make([]addpartitionstotxn.RequestTopic, 0, len(req.Topics)) + + for topic, partitions := range req.Topics { + reqTopic := addpartitionstotxn.RequestTopic{ + Name: topic, + Partitions: make([]int32, len(partitions)), + } + for i, partition := range partitions { + reqTopic.Partitions[i] = int32(partition.Partition) + } + protoReq.Topics = append(protoReq.Topics, reqTopic) + } + + m, err := c.roundTrip(ctx, req.Addr, protoReq) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AddPartitionsToTxn: %w", err) + } + + r := m.(*addpartitionstotxn.Response) + + res := &AddPartitionsToTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]AddPartitionToTxnPartition, len(r.Results)), + } + + for _, result := range r.Results { + partitions := make([]AddPartitionToTxnPartition, 0, len(result.Results)) + for _, rp := range result.Results { + partitions = append(partitions, AddPartitionToTxnPartition{ + Partition: int(rp.PartitionIndex), + Error: makeError(rp.ErrorCode, ""), + }) + } + res.Topics[result.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/address.go b/vendor/github.com/segmentio/kafka-go/address.go new file mode 100644 index 000000000..f332b7b0b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/address.go @@ -0,0 +1,64 @@ +package kafka + +import ( + "net" + "strings" +) + +// TCP constructs an address with the network set to "tcp". +func TCP(address ...string) net.Addr { return makeNetAddr("tcp", address) } + +func makeNetAddr(network string, addresses []string) net.Addr { + switch len(addresses) { + case 0: + return nil // maybe panic instead? + case 1: + return makeAddr(network, addresses[0]) + default: + return makeMultiAddr(network, addresses) + } +} + +func makeAddr(network, address string) net.Addr { + return &networkAddress{ + network: network, + address: canonicalAddress(address), + } +} + +func makeMultiAddr(network string, addresses []string) net.Addr { + multi := make(multiAddr, len(addresses)) + for i, address := range addresses { + multi[i] = makeAddr(network, address) + } + return multi +} + +type networkAddress struct { + network string + address string +} + +func (a *networkAddress) Network() string { return a.network } + +func (a *networkAddress) String() string { return a.address } + +type multiAddr []net.Addr + +func (m multiAddr) Network() string { return m.join(net.Addr.Network) } + +func (m multiAddr) String() string { return m.join(net.Addr.String) } + +func (m multiAddr) join(f func(net.Addr) string) string { + switch len(m) { + case 0: + return "" + case 1: + return f(m[0]) + } + s := make([]string, len(m)) + for i, a := range m { + s[i] = f(a) + } + return strings.Join(s, ",") +} diff --git a/vendor/github.com/segmentio/kafka-go/alterclientquotas.go b/vendor/github.com/segmentio/kafka-go/alterclientquotas.go new file mode 100644 index 000000000..7a926e5c4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterclientquotas.go @@ -0,0 +1,131 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterclientquotas" +) + +// AlterClientQuotasRequest represents a request sent to a kafka broker to +// alter client quotas. +type AlterClientQuotasRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of client quotas entries to alter. + Entries []AlterClientQuotaEntry + + // Whether the alteration should be validated, but not performed. + ValidateOnly bool +} + +type AlterClientQuotaEntry struct { + // The quota entities to alter. + Entities []AlterClientQuotaEntity + + // An individual quota configuration entry to alter. + Ops []AlterClientQuotaOps +} + +type AlterClientQuotaEntity struct { + // The quota entity type. + EntityType string + + // The name of the quota entity, or null if the default. + EntityName string +} + +type AlterClientQuotaOps struct { + // The quota configuration key. + Key string + + // The quota configuration value to set, otherwise ignored if the value is to be removed. + Value float64 + + // Whether the quota configuration value should be removed, otherwise set. + Remove bool +} + +type AlterClientQuotaResponseQuotas struct { + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // The altered quota entities. + Entities []AlterClientQuotaEntity +} + +// AlterClientQuotasResponse represents a response from a kafka broker to an alter client +// quotas request. +type AlterClientQuotasResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of altered client quotas responses. + Entries []AlterClientQuotaResponseQuotas +} + +// AlterClientQuotas sends client quotas alteration request to a kafka broker and returns +// the response. +func (c *Client) AlterClientQuotas(ctx context.Context, req *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) { + entries := make([]alterclientquotas.Entry, len(req.Entries)) + + for entryIdx, entry := range req.Entries { + entities := make([]alterclientquotas.Entity, len(entry.Entities)) + for entityIdx, entity := range entry.Entities { + entities[entityIdx] = alterclientquotas.Entity{ + EntityType: entity.EntityType, + EntityName: entity.EntityName, + } + } + + ops := make([]alterclientquotas.Ops, len(entry.Ops)) + for opsIdx, op := range entry.Ops { + ops[opsIdx] = alterclientquotas.Ops{ + Key: op.Key, + Value: op.Value, + Remove: op.Remove, + } + } + + entries[entryIdx] = alterclientquotas.Entry{ + Entities: entities, + Ops: ops, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &alterclientquotas.Request{ + Entries: entries, + ValidateOnly: req.ValidateOnly, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AlterClientQuotas: %w", err) + } + + res := m.(*alterclientquotas.Response) + responseEntries := make([]AlterClientQuotaResponseQuotas, len(res.Results)) + + for responseEntryIdx, responseEntry := range res.Results { + responseEntities := make([]AlterClientQuotaEntity, len(responseEntry.Entities)) + for responseEntityIdx, responseEntity := range responseEntry.Entities { + responseEntities[responseEntityIdx] = AlterClientQuotaEntity{ + EntityType: responseEntity.EntityType, + EntityName: responseEntity.EntityName, + } + } + + responseEntries[responseEntryIdx] = AlterClientQuotaResponseQuotas{ + Error: makeError(responseEntry.ErrorCode, responseEntry.ErrorMessage), + Entities: responseEntities, + } + } + ret := &AlterClientQuotasResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Entries: responseEntries, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/alterconfigs.go b/vendor/github.com/segmentio/kafka-go/alterconfigs.go new file mode 100644 index 000000000..c994506a1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterconfigs.go @@ -0,0 +1,107 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterconfigs" +) + +// AlterConfigsRequest represents a request sent to a kafka broker to alter configs. +type AlterConfigsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of resources to update. + Resources []AlterConfigRequestResource + + // When set to true, topics are not created but the configuration is + // validated as if they were. + ValidateOnly bool +} + +type AlterConfigRequestResource struct { + // Resource Type + ResourceType ResourceType + + // Resource Name + ResourceName string + + // Configs is a list of configuration updates. + Configs []AlterConfigRequestConfig +} + +type AlterConfigRequestConfig struct { + // Configuration key name + Name string + + // The value to set for the configuration key. + Value string +} + +// AlterConfigsResponse represents a response from a kafka broker to an alter config request. +type AlterConfigsResponse struct { + // Duration for which the request was throttled due to a quota violation. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[AlterConfigsResponseResource]error +} + +// AlterConfigsResponseResource helps map errors to specific resources in an +// alter config response. +type AlterConfigsResponseResource struct { + Type int8 + Name string +} + +// AlterConfigs sends a config altering request to a kafka broker and returns the +// response. +func (c *Client) AlterConfigs(ctx context.Context, req *AlterConfigsRequest) (*AlterConfigsResponse, error) { + resources := make([]alterconfigs.RequestResources, len(req.Resources)) + + for i, t := range req.Resources { + configs := make([]alterconfigs.RequestConfig, len(t.Configs)) + for j, v := range t.Configs { + configs[j] = alterconfigs.RequestConfig{ + Name: v.Name, + Value: v.Value, + } + } + resources[i] = alterconfigs.RequestResources{ + ResourceType: int8(t.ResourceType), + ResourceName: t.ResourceName, + Configs: configs, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &alterconfigs.Request{ + Resources: resources, + ValidateOnly: req.ValidateOnly, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AlterConfigs: %w", err) + } + + res := m.(*alterconfigs.Response) + ret := &AlterConfigsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[AlterConfigsResponseResource]error, len(res.Responses)), + } + + for _, t := range res.Responses { + ret.Errors[AlterConfigsResponseResource{ + Type: t.ResourceType, + Name: t.ResourceName, + }] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go new file mode 100644 index 000000000..dd67d003b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go @@ -0,0 +1,134 @@ +package kafka + +import ( + "context" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterpartitionreassignments" +) + +// AlterPartitionReassignmentsRequest is a request to the AlterPartitionReassignments API. +type AlterPartitionReassignmentsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Topic is the name of the topic to alter partitions in. Keep this field empty and use Topic in AlterPartitionReassignmentsRequestAssignment to + // reassign to multiple topics. + Topic string + + // Assignments is the list of partition reassignments to submit to the API. + Assignments []AlterPartitionReassignmentsRequestAssignment + + // Timeout is the amount of time to wait for the request to complete. + Timeout time.Duration +} + +// AlterPartitionReassignmentsRequestAssignment contains the requested reassignments for a single +// partition. +type AlterPartitionReassignmentsRequestAssignment struct { + // Topic is the name of the topic to alter partitions in. If empty, the value of Topic in AlterPartitionReassignmentsRequest is used. + Topic string + + // PartitionID is the ID of the partition to make the reassignments in. + PartitionID int + + // BrokerIDs is a slice of brokers to set the partition replicas to, or null to cancel a pending reassignment for this partition. + BrokerIDs []int +} + +// AlterPartitionReassignmentsResponse is a response from the AlterPartitionReassignments API. +type AlterPartitionReassignmentsResponse struct { + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // PartitionResults contains the specific results for each partition. + PartitionResults []AlterPartitionReassignmentsResponsePartitionResult +} + +// AlterPartitionReassignmentsResponsePartitionResult contains the detailed result of +// doing reassignments for a single partition. +type AlterPartitionReassignmentsResponsePartitionResult struct { + // Topic is the topic name. + Topic string + + // PartitionID is the ID of the partition that was altered. + PartitionID int + + // Error is set to a non-nil value including the code and message if an error was encountered + // during the update for this partition. + Error error +} + +func (c *Client) AlterPartitionReassignments( + ctx context.Context, + req *AlterPartitionReassignmentsRequest, +) (*AlterPartitionReassignmentsResponse, error) { + apiTopicMap := make(map[string]*alterpartitionreassignments.RequestTopic) + + for _, assignment := range req.Assignments { + topic := assignment.Topic + if topic == "" { + topic = req.Topic + } + + apiTopic := apiTopicMap[topic] + if apiTopic == nil { + apiTopic = &alterpartitionreassignments.RequestTopic{ + Name: topic, + } + apiTopicMap[topic] = apiTopic + } + + replicas := []int32{} + for _, brokerID := range assignment.BrokerIDs { + replicas = append(replicas, int32(brokerID)) + } + + apiTopic.Partitions = append( + apiTopic.Partitions, + alterpartitionreassignments.RequestPartition{ + PartitionIndex: int32(assignment.PartitionID), + Replicas: replicas, + }, + ) + } + + apiReq := &alterpartitionreassignments.Request{ + TimeoutMs: int32(req.Timeout.Milliseconds()), + } + + for _, apiTopic := range apiTopicMap { + apiReq.Topics = append(apiReq.Topics, *apiTopic) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*alterpartitionreassignments.Response) + + resp := &AlterPartitionReassignmentsResponse{ + Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage), + } + + for _, topicResult := range apiResp.Results { + for _, partitionResult := range topicResult.Partitions { + resp.PartitionResults = append( + resp.PartitionResults, + AlterPartitionReassignmentsResponsePartitionResult{ + Topic: topicResult.Name, + PartitionID: int(partitionResult.PartitionIndex), + Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage), + }, + ) + } + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/alteruserscramcredentials.go b/vendor/github.com/segmentio/kafka-go/alteruserscramcredentials.go new file mode 100644 index 000000000..6163e564e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alteruserscramcredentials.go @@ -0,0 +1,107 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alteruserscramcredentials" +) + +// AlterUserScramCredentialsRequest represents a request sent to a kafka broker to +// alter user scram credentials. +type AlterUserScramCredentialsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of credentials to delete. + Deletions []UserScramCredentialsDeletion + + // List of credentials to upsert. + Upsertions []UserScramCredentialsUpsertion +} + +type ScramMechanism int8 + +const ( + ScramMechanismUnknown ScramMechanism = iota // 0 + ScramMechanismSha256 // 1 + ScramMechanismSha512 // 2 +) + +type UserScramCredentialsDeletion struct { + Name string + Mechanism ScramMechanism +} + +type UserScramCredentialsUpsertion struct { + Name string + Mechanism ScramMechanism + Iterations int + Salt []byte + SaltedPassword []byte +} + +// AlterUserScramCredentialsResponse represents a response from a kafka broker to an alter user +// credentials request. +type AlterUserScramCredentialsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of altered user scram credentials. + Results []AlterUserScramCredentialsResponseUser +} + +type AlterUserScramCredentialsResponseUser struct { + User string + Error error +} + +// AlterUserScramCredentials sends user scram credentials alteration request to a kafka broker and returns +// the response. +func (c *Client) AlterUserScramCredentials(ctx context.Context, req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) { + deletions := make([]alteruserscramcredentials.RequestUserScramCredentialsDeletion, len(req.Deletions)) + upsertions := make([]alteruserscramcredentials.RequestUserScramCredentialsUpsertion, len(req.Upsertions)) + + for deletionIdx, deletion := range req.Deletions { + deletions[deletionIdx] = alteruserscramcredentials.RequestUserScramCredentialsDeletion{ + Name: deletion.Name, + Mechanism: int8(deletion.Mechanism), + } + } + + for upsertionIdx, upsertion := range req.Upsertions { + upsertions[upsertionIdx] = alteruserscramcredentials.RequestUserScramCredentialsUpsertion{ + Name: upsertion.Name, + Mechanism: int8(upsertion.Mechanism), + Iterations: int32(upsertion.Iterations), + Salt: upsertion.Salt, + SaltedPassword: upsertion.SaltedPassword, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &alteruserscramcredentials.Request{ + Deletions: deletions, + Upsertions: upsertions, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AlterUserScramCredentials: %w", err) + } + + res := m.(*alteruserscramcredentials.Response) + responseEntries := make([]AlterUserScramCredentialsResponseUser, len(res.Results)) + + for responseIdx, responseResult := range res.Results { + responseEntries[responseIdx] = AlterUserScramCredentialsResponseUser{ + User: responseResult.User, + Error: makeError(responseResult.ErrorCode, responseResult.ErrorMessage), + } + } + ret := &AlterUserScramCredentialsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Results: responseEntries, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/apiversions.go b/vendor/github.com/segmentio/kafka-go/apiversions.go new file mode 100644 index 000000000..52412b811 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/apiversions.go @@ -0,0 +1,72 @@ +package kafka + +import ( + "context" + "net" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/apiversions" +) + +// ApiVersionsRequest is a request to the ApiVersions API. +type ApiVersionsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr +} + +// ApiVersionsResponse is a response from the ApiVersions API. +type ApiVersionsResponse struct { + // Error is set to a non-nil value if an error was encountered. + Error error + + // ApiKeys contains the specific details of each supported API. + ApiKeys []ApiVersionsResponseApiKey +} + +// ApiVersionsResponseApiKey includes the details of which versions are supported for a single API. +type ApiVersionsResponseApiKey struct { + // ApiKey is the ID of the API. + ApiKey int + + // ApiName is a human-friendly description of the API. + ApiName string + + // MinVersion is the minimum API version supported by the broker. + MinVersion int + + // MaxVersion is the maximum API version supported by the broker. + MaxVersion int +} + +func (c *Client) ApiVersions( + ctx context.Context, + req *ApiVersionsRequest, +) (*ApiVersionsResponse, error) { + apiReq := &apiversions.Request{} + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*apiversions.Response) + + resp := &ApiVersionsResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + for _, apiKey := range apiResp.ApiKeys { + resp.ApiKeys = append( + resp.ApiKeys, + ApiVersionsResponseApiKey{ + ApiKey: int(apiKey.ApiKey), + ApiName: protocol.ApiKey(apiKey.ApiKey).String(), + MinVersion: int(apiKey.MinVersion), + MaxVersion: int(apiKey.MaxVersion), + }, + ) + } + + return resp, err +} diff --git a/vendor/github.com/segmentio/kafka-go/balancer.go b/vendor/github.com/segmentio/kafka-go/balancer.go new file mode 100644 index 000000000..ee3a25885 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/balancer.go @@ -0,0 +1,353 @@ +package kafka + +import ( + "hash" + "hash/crc32" + "hash/fnv" + "math/rand" + "sort" + "sync" +) + +// The Balancer interface provides an abstraction of the message distribution +// logic used by Writer instances to route messages to the partitions available +// on a kafka cluster. +// +// Balancers must be safe to use concurrently from multiple goroutines. +type Balancer interface { + // Balance receives a message and a set of available partitions and + // returns the partition number that the message should be routed to. + // + // An application should refrain from using a balancer to manage multiple + // sets of partitions (from different topics for examples), use one balancer + // instance for each partition set, so the balancer can detect when the + // partitions change and assume that the kafka topic has been rebalanced. + Balance(msg Message, partitions ...int) (partition int) +} + +// BalancerFunc is an implementation of the Balancer interface that makes it +// possible to use regular functions to distribute messages across partitions. +type BalancerFunc func(Message, ...int) int + +// Balance calls f, satisfies the Balancer interface. +func (f BalancerFunc) Balance(msg Message, partitions ...int) int { + return f(msg, partitions...) +} + +// RoundRobin is an Balancer implementation that equally distributes messages +// across all available partitions. It can take an optional chunk size to send +// ChunkSize messages to the same partition before moving to the next partition. +// This can be used to improve batch sizes. +type RoundRobin struct { + ChunkSize int + // Use a 32 bits integer so RoundRobin values don't need to be aligned to + // apply increments. + counter uint32 + + mutex sync.Mutex +} + +// Balance satisfies the Balancer interface. +func (rr *RoundRobin) Balance(msg Message, partitions ...int) int { + return rr.balance(partitions) +} + +func (rr *RoundRobin) balance(partitions []int) int { + rr.mutex.Lock() + defer rr.mutex.Unlock() + + if rr.ChunkSize < 1 { + rr.ChunkSize = 1 + } + + length := len(partitions) + counterNow := rr.counter + offset := int(counterNow / uint32(rr.ChunkSize)) + rr.counter++ + return partitions[offset%length] +} + +// LeastBytes is a Balancer implementation that routes messages to the partition +// that has received the least amount of data. +// +// Note that no coordination is done between multiple producers, having good +// balancing relies on the fact that each producer using a LeastBytes balancer +// should produce well balanced messages. +type LeastBytes struct { + mutex sync.Mutex + counters []leastBytesCounter +} + +type leastBytesCounter struct { + partition int + bytes uint64 +} + +// Balance satisfies the Balancer interface. +func (lb *LeastBytes) Balance(msg Message, partitions ...int) int { + lb.mutex.Lock() + defer lb.mutex.Unlock() + + // partitions change + if len(partitions) != len(lb.counters) { + lb.counters = lb.makeCounters(partitions...) + } + + minBytes := lb.counters[0].bytes + minIndex := 0 + + for i, c := range lb.counters[1:] { + if c.bytes < minBytes { + minIndex = i + 1 + minBytes = c.bytes + } + } + + c := &lb.counters[minIndex] + c.bytes += uint64(len(msg.Key)) + uint64(len(msg.Value)) + return c.partition +} + +func (lb *LeastBytes) makeCounters(partitions ...int) (counters []leastBytesCounter) { + counters = make([]leastBytesCounter, len(partitions)) + + for i, p := range partitions { + counters[i].partition = p + } + + sort.Slice(counters, func(i int, j int) bool { + return counters[i].partition < counters[j].partition + }) + return +} + +var ( + fnv1aPool = &sync.Pool{ + New: func() interface{} { + return fnv.New32a() + }, + } +) + +// Hash is a Balancer that uses the provided hash function to determine which +// partition to route messages to. This ensures that messages with the same key +// are routed to the same partition. +// +// The logic to calculate the partition is: +// +// hasher.Sum32() % len(partitions) => partition +// +// By default, Hash uses the FNV-1a algorithm. This is the same algorithm used +// by the Sarama Producer and ensures that messages produced by kafka-go will +// be delivered to the same topics that the Sarama producer would be delivered to. +type Hash struct { + rr RoundRobin + Hasher hash.Hash32 + + // lock protects Hasher while calculating the hash code. It is assumed that + // the Hasher field is read-only once the Balancer is created, so as a + // performance optimization, reads of the field are not protected. + lock sync.Mutex +} + +func (h *Hash) Balance(msg Message, partitions ...int) int { + if msg.Key == nil { + return h.rr.Balance(msg, partitions...) + } + + hasher := h.Hasher + if hasher != nil { + h.lock.Lock() + defer h.lock.Unlock() + } else { + hasher = fnv1aPool.Get().(hash.Hash32) + defer fnv1aPool.Put(hasher) + } + + hasher.Reset() + if _, err := hasher.Write(msg.Key); err != nil { + panic(err) + } + + // uses same algorithm that Sarama's hashPartitioner uses + // note the type conversions here. if the uint32 hash code is not cast to + // an int32, we do not get the same result as sarama. + partition := int32(hasher.Sum32()) % int32(len(partitions)) + if partition < 0 { + partition = -partition + } + + return int(partition) +} + +// ReferenceHash is a Balancer that uses the provided hash function to determine which +// partition to route messages to. This ensures that messages with the same key +// are routed to the same partition. +// +// The logic to calculate the partition is: +// +// (int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition +// +// By default, ReferenceHash uses the FNV-1a algorithm. This is the same algorithm as +// the Sarama NewReferenceHashPartitioner and ensures that messages produced by kafka-go will +// be delivered to the same topics that the Sarama producer would be delivered to. +type ReferenceHash struct { + rr randomBalancer + Hasher hash.Hash32 + + // lock protects Hasher while calculating the hash code. It is assumed that + // the Hasher field is read-only once the Balancer is created, so as a + // performance optimization, reads of the field are not protected. + lock sync.Mutex +} + +func (h *ReferenceHash) Balance(msg Message, partitions ...int) int { + if msg.Key == nil { + return h.rr.Balance(msg, partitions...) + } + + hasher := h.Hasher + if hasher != nil { + h.lock.Lock() + defer h.lock.Unlock() + } else { + hasher = fnv1aPool.Get().(hash.Hash32) + defer fnv1aPool.Put(hasher) + } + + hasher.Reset() + if _, err := hasher.Write(msg.Key); err != nil { + panic(err) + } + + // uses the same algorithm as the Sarama's referenceHashPartitioner. + // note the type conversions here. if the uint32 hash code is not cast to + // an int32, we do not get the same result as sarama. + partition := (int32(hasher.Sum32()) & 0x7fffffff) % int32(len(partitions)) + return int(partition) +} + +type randomBalancer struct { + mock int // mocked return value, used for testing +} + +func (b randomBalancer) Balance(msg Message, partitions ...int) (partition int) { + if b.mock != 0 { + return b.mock + } + return partitions[rand.Int()%len(partitions)] +} + +// CRC32Balancer is a Balancer that uses the CRC32 hash function to determine +// which partition to route messages to. This ensures that messages with the +// same key are routed to the same partition. This balancer is compatible with +// the built-in hash partitioners in librdkafka and the language bindings that +// are built on top of it, including the +// github.com/confluentinc/confluent-kafka-go Go package. +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "consistent_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "consistent" setting. The latter will hash +// empty or nil keys into the same partition. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +type CRC32Balancer struct { + Consistent bool + random randomBalancer +} + +func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the crc32 balancers in librdkafka don't differentiate between nil + // and empty keys. both cases are treated as unset. + if len(msg.Key) == 0 && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := crc32.ChecksumIEEE(msg.Key) % uint32(len(partitions)) + return partitions[idx] +} + +// Murmur2Balancer is a Balancer that uses the Murmur2 hash function to +// determine which partition to route messages to. This ensures that messages +// with the same key are routed to the same partition. This balancer is +// compatible with the partitioner used by the Java library and by librdkafka's +// "murmur2" and "murmur2_random" partitioners. +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "murmur2_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "murmur2" setting. The latter will hash +// nil keys into the same partition. Empty, non-nil keys are always hashed to +// the same partition regardless of configuration. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +// +// Note that the librdkafka documentation states that the "murmur2_random" is +// functionally equivalent to the default Java partitioner. That's because the +// Java partitioner will use a round robin balancer instead of random on nil +// keys. We choose librdkafka's implementation because it arguably has a larger +// install base. +type Murmur2Balancer struct { + Consistent bool + random randomBalancer +} + +func (b Murmur2Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the murmur2 balancers in java and librdkafka treat a nil key as + // non-existent while treating an empty slice as a defined value. + if msg.Key == nil && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := (murmur2(msg.Key) & 0x7fffffff) % uint32(len(partitions)) + return partitions[idx] +} + +// Go port of the Java library's murmur2 function. +// https://github.com/apache/kafka/blob/1.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353 +func murmur2(data []byte) uint32 { + length := len(data) + const ( + seed uint32 = 0x9747b28c + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + m = 0x5bd1e995 + r = 24 + ) + + // Initialize the hash to a random value + h := seed ^ uint32(length) + length4 := length / 4 + + for i := 0; i < length4; i++ { + i4 := i * 4 + k := (uint32(data[i4+0]) & 0xff) + ((uint32(data[i4+1]) & 0xff) << 8) + ((uint32(data[i4+2]) & 0xff) << 16) + ((uint32(data[i4+3]) & 0xff) << 24) + k *= m + k ^= k >> r + k *= m + h *= m + h ^= k + } + + // Handle the last few bytes of the input array + extra := length % 4 + if extra >= 3 { + h ^= (uint32(data[(length & ^3)+2]) & 0xff) << 16 + } + if extra >= 2 { + h ^= (uint32(data[(length & ^3)+1]) & 0xff) << 8 + } + if extra >= 1 { + h ^= uint32(data[length & ^3]) & 0xff + h *= m + } + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h +} diff --git a/vendor/github.com/segmentio/kafka-go/batch.go b/vendor/github.com/segmentio/kafka-go/batch.go new file mode 100644 index 000000000..19dcef8cd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/batch.go @@ -0,0 +1,313 @@ +package kafka + +import ( + "bufio" + "errors" + "io" + "sync" + "time" +) + +// A Batch is an iterator over a sequence of messages fetched from a kafka +// server. +// +// Batches are created by calling (*Conn).ReadBatch. They hold a internal lock +// on the connection, which is released when the batch is closed. Failing to +// call a batch's Close method will likely result in a dead-lock when trying to +// use the connection. +// +// Batches are safe to use concurrently from multiple goroutines. +type Batch struct { + mutex sync.Mutex + conn *Conn + lock *sync.Mutex + msgs *messageSetReader + deadline time.Time + throttle time.Duration + topic string + partition int + offset int64 + highWaterMark int64 + err error + // The last offset in the batch. + // + // We use lastOffset to skip offsets that have been compacted away. + // + // We store lastOffset because we get lastOffset when we read a new message + // but only try to handle compaction when we receive an EOF. However, when + // we get an EOF we do not get the lastOffset. So there is a mismatch + // between when we receive it and need to use it. + lastOffset int64 +} + +// Throttle gives the throttling duration applied by the kafka server on the +// connection. +func (batch *Batch) Throttle() time.Duration { + return batch.throttle +} + +// Watermark returns the current highest watermark in a partition. +func (batch *Batch) HighWaterMark() int64 { + return batch.highWaterMark +} + +// Partition returns the batch partition. +func (batch *Batch) Partition() int { + return batch.partition +} + +// Offset returns the offset of the next message in the batch. +func (batch *Batch) Offset() int64 { + batch.mutex.Lock() + offset := batch.offset + batch.mutex.Unlock() + return offset +} + +// Close closes the batch, releasing the connection lock and returning an error +// if reading the batch failed for any reason. +func (batch *Batch) Close() error { + batch.mutex.Lock() + err := batch.close() + batch.mutex.Unlock() + return err +} + +func (batch *Batch) close() (err error) { + conn := batch.conn + lock := batch.lock + + batch.conn = nil + batch.lock = nil + + if batch.msgs != nil { + batch.msgs.discard() + } + + if batch.msgs != nil && batch.msgs.decompressed != nil { + releaseBuffer(batch.msgs.decompressed) + batch.msgs.decompressed = nil + } + + if err = batch.err; errors.Is(batch.err, io.EOF) { + err = nil + } + + if conn != nil { + conn.rdeadline.unsetConnReadDeadline() + conn.mutex.Lock() + conn.offset = batch.offset + conn.mutex.Unlock() + + if err != nil { + var kafkaError Error + if !errors.As(err, &kafkaError) && !errors.Is(err, io.ErrShortBuffer) { + conn.Close() + } + } + } + + if lock != nil { + lock.Unlock() + } + + return +} + +// Err returns a non-nil error if the batch is broken. This is the same error +// that would be returned by Read, ReadMessage or Close (except in the case of +// io.EOF which is never returned by Close). +// +// This method is useful when building retry mechanisms for (*Conn).ReadBatch, +// the program can check whether the batch carried a error before attempting to +// read the first message. +// +// Note that checking errors on a batch is optional, calling Read or ReadMessage +// is always valid and can be used to either read a message or an error in cases +// where that's convenient. +func (batch *Batch) Err() error { return batch.err } + +// Read reads the value of the next message from the batch into b, returning the +// number of bytes read, or an error if the next message couldn't be read. +// +// If an error is returned the batch cannot be used anymore and calling Read +// again will keep returning that error. All errors except io.EOF (indicating +// that the program consumed all messages from the batch) are also returned by +// Close. +// +// The method fails with io.ErrShortBuffer if the buffer passed as argument is +// too small to hold the message value. +func (batch *Batch) Read(b []byte) (int, error) { + n := 0 + + batch.mutex.Lock() + offset := batch.offset + + _, _, _, err := batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (int, error) { + if nbytes < 0 { + return size, nil + } + return discardN(r, size, nbytes) + }, + func(r *bufio.Reader, size int, nbytes int) (int, error) { + if nbytes < 0 { + return size, nil + } + // make sure there are enough bytes for the message value. return + // errShortRead if the message is truncated. + if nbytes > size { + return size, errShortRead + } + n = nbytes // return value + if nbytes > cap(b) { + nbytes = cap(b) + } + if nbytes > len(b) { + b = b[:nbytes] + } + nbytes, err := io.ReadFull(r, b[:nbytes]) + if err != nil { + return size - nbytes, err + } + return discardN(r, size-nbytes, n-nbytes) + }, + ) + + if err == nil && n > len(b) { + n, err = len(b), io.ErrShortBuffer + batch.err = io.ErrShortBuffer + batch.offset = offset // rollback + } + + batch.mutex.Unlock() + return n, err +} + +// ReadMessage reads and return the next message from the batch. +// +// Because this method allocate memory buffers for the message key and value +// it is less memory-efficient than Read, but has the advantage of never +// failing with io.ErrShortBuffer. +func (batch *Batch) ReadMessage() (Message, error) { + msg := Message{} + batch.mutex.Lock() + + var offset, timestamp int64 + var headers []Header + var err error + + offset, timestamp, headers, err = batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Key, remain, err = readNewBytes(r, size, nbytes) + return + }, + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Value, remain, err = readNewBytes(r, size, nbytes) + return + }, + ) + // A batch may start before the requested offset so skip messages + // until the requested offset is reached. + for batch.conn != nil && offset < batch.conn.offset { + if err != nil { + break + } + offset, timestamp, headers, err = batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Key, remain, err = readNewBytes(r, size, nbytes) + return + }, + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Value, remain, err = readNewBytes(r, size, nbytes) + return + }, + ) + } + + batch.mutex.Unlock() + msg.Topic = batch.topic + msg.Partition = batch.partition + msg.Offset = offset + msg.HighWaterMark = batch.highWaterMark + msg.Time = makeTime(timestamp) + msg.Headers = headers + + return msg, err +} + +func (batch *Batch) readMessage( + key func(*bufio.Reader, int, int) (int, error), + val func(*bufio.Reader, int, int) (int, error), +) (offset int64, timestamp int64, headers []Header, err error) { + if err = batch.err; err != nil { + return + } + + var lastOffset int64 + offset, lastOffset, timestamp, headers, err = batch.msgs.readMessage(batch.offset, key, val) + switch { + case err == nil: + batch.offset = offset + 1 + batch.lastOffset = lastOffset + case errors.Is(err, errShortRead): + // As an "optimization" kafka truncates the returned response after + // producing MaxBytes, which could then cause the code to return + // errShortRead. + err = batch.msgs.discard() + switch { + case err != nil: + // Since io.EOF is used by the batch to indicate that there is are + // no more messages to consume, it is crucial that any io.EOF errors + // on the underlying connection are repackaged. Otherwise, the + // caller can't tell the difference between a batch that was fully + // consumed or a batch whose connection is in an error state. + batch.err = dontExpectEOF(err) + case batch.msgs.remaining() == 0: + // Because we use the adjusted deadline we could end up returning + // before the actual deadline occurred. This is necessary otherwise + // timing out the connection for real could end up leaving it in an + // unpredictable state, which would require closing it. + // This design decision was made to maximize the chances of keeping + // the connection open, the trade off being to lose precision on the + // read deadline management. + err = checkTimeoutErr(batch.deadline) + batch.err = err + + // Checks the following: + // - `batch.err` for a "success" from the previous timeout check + // - `batch.msgs.lengthRemain` to ensure that this EOF is not due + // to MaxBytes truncation + // - `batch.lastOffset` to ensure that the message format contains + // `lastOffset` + if errors.Is(batch.err, io.EOF) && batch.msgs.lengthRemain == 0 && batch.lastOffset != -1 { + // Log compaction can create batches that end with compacted + // records so the normal strategy that increments the "next" + // offset as records are read doesn't work as the compacted + // records are "missing" and never get "read". + // + // In order to reliably reach the next non-compacted offset we + // jump past the saved lastOffset. + batch.offset = batch.lastOffset + 1 + } + } + default: + // Since io.EOF is used by the batch to indicate that there is are + // no more messages to consume, it is crucial that any io.EOF errors + // on the underlying connection are repackaged. Otherwise, the + // caller can't tell the difference between a batch that was fully + // consumed or a batch whose connection is in an error state. + batch.err = dontExpectEOF(err) + } + + return +} + +func checkTimeoutErr(deadline time.Time) (err error) { + if !deadline.IsZero() && time.Now().After(deadline) { + err = RequestTimedOut + } else { + err = io.EOF + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/buffer.go b/vendor/github.com/segmentio/kafka-go/buffer.go new file mode 100644 index 000000000..5bf50c05f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/buffer.go @@ -0,0 +1,27 @@ +package kafka + +import ( + "bytes" + "sync" +) + +var bufferPool = sync.Pool{ + New: func() interface{} { return newBuffer() }, +} + +func newBuffer() *bytes.Buffer { + b := new(bytes.Buffer) + b.Grow(65536) + return b +} + +func acquireBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func releaseBuffer(b *bytes.Buffer) { + if b != nil { + b.Reset() + bufferPool.Put(b) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/client.go b/vendor/github.com/segmentio/kafka-go/client.go new file mode 100644 index 000000000..d965040e8 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/client.go @@ -0,0 +1,146 @@ +package kafka + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + defaultCreateTopicsTimeout = 2 * time.Second + defaultDeleteTopicsTimeout = 2 * time.Second + defaultCreatePartitionsTimeout = 2 * time.Second + defaultProduceTimeout = 500 * time.Millisecond + defaultMaxWait = 500 * time.Millisecond +) + +// Client is a high-level API to interract with kafka brokers. +// +// All methods of the Client type accept a context as first argument, which may +// be used to asynchronously cancel the requests. +// +// Clients are safe to use concurrently from multiple goroutines, as long as +// their configuration is not changed after first use. +type Client struct { + // Address of the kafka cluster (or specific broker) that the client will be + // sending requests to. + // + // This field is optional, the address may be provided in each request + // instead. The request address takes precedence if both were specified. + Addr net.Addr + + // Time limit for requests sent by this client. + // + // If zero, no timeout is applied. + Timeout time.Duration + + // A transport used to communicate with the kafka brokers. + // + // If nil, DefaultTransport is used. + Transport RoundTripper +} + +// A ConsumerGroup and Topic as these are both strings we define a type for +// clarity when passing to the Client as a function argument +// +// N.B TopicAndGroup is currently experimental! Therefore, it is subject to +// change, including breaking changes between MINOR and PATCH releases. +// +// DEPRECATED: this type will be removed in version 1.0, programs should +// migrate to use kafka.(*Client).OffsetFetch instead. +type TopicAndGroup struct { + Topic string + GroupId string +} + +// ConsumerOffsets returns a map[int]int64 of partition to committed offset for +// a consumer group id and topic. +// +// DEPRECATED: this method will be removed in version 1.0, programs should +// migrate to use kafka.(*Client).OffsetFetch instead. +func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) { + metadata, err := c.Metadata(ctx, &MetadataRequest{ + Topics: []string{tg.Topic}, + }) + + if err != nil { + return nil, fmt.Errorf("failed to get topic metadata :%w", err) + } + + topic := metadata.Topics[0] + partitions := make([]int, len(topic.Partitions)) + + for i := range topic.Partitions { + partitions[i] = topic.Partitions[i].ID + } + + offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{ + GroupID: tg.GroupId, + Topics: map[string][]int{ + tg.Topic: partitions, + }, + }) + + if err != nil { + return nil, fmt.Errorf("failed to get offsets: %w", err) + } + + topicOffsets := offsets.Topics[topic.Name] + partitionOffsets := make(map[int]int64, len(topicOffsets)) + + for _, off := range topicOffsets { + partitionOffsets[off.Partition] = off.CommittedOffset + } + + return partitionOffsets, nil +} + +func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) { + if c.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.Timeout) + defer cancel() + } + + if addr == nil { + if addr = c.Addr; addr == nil { + return nil, errors.New("no address was given for the kafka cluster in the request or on the client") + } + } + + return c.transport().RoundTrip(ctx, addr, msg) +} + +func (c *Client) transport() RoundTripper { + if c.Transport != nil { + return c.Transport + } + return DefaultTransport +} + +func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration { + timeout := c.Timeout + + if deadline, ok := ctx.Deadline(); ok { + if remain := time.Until(deadline); remain < timeout { + timeout = remain + } + } + + if timeout > 0 { + // Half the timeout because it is communicated to kafka in multiple + // requests (e.g. Fetch, Produce, etc...), this adds buffer to account + // for network latency when waiting for the response from kafka. + return timeout / 2 + } + + return defaultTimeout +} + +func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 { + return milliseconds(c.timeout(ctx, defaultTimeout)) +} diff --git a/vendor/github.com/segmentio/kafka-go/commit.go b/vendor/github.com/segmentio/kafka-go/commit.go new file mode 100644 index 000000000..e7740d58a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/commit.go @@ -0,0 +1,39 @@ +package kafka + +// A commit represents the instruction of publishing an update of the last +// offset read by a program for a topic and partition. +type commit struct { + topic string + partition int + offset int64 +} + +// makeCommit builds a commit value from a message, the resulting commit takes +// its topic, partition, and offset from the message. +func makeCommit(msg Message) commit { + return commit{ + topic: msg.Topic, + partition: msg.Partition, + offset: msg.Offset + 1, + } +} + +// makeCommits generates a slice of commits from a list of messages, it extracts +// the topic, partition, and offset of each message and builds the corresponding +// commit slice. +func makeCommits(msgs ...Message) []commit { + commits := make([]commit, len(msgs)) + + for i, m := range msgs { + commits[i] = makeCommit(m) + } + + return commits +} + +// commitRequest is the data type exchanged between the CommitMessages method +// and internals of the reader's implementation. +type commitRequest struct { + commits []commit + errch chan<- error +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/compress.go b/vendor/github.com/segmentio/kafka-go/compress/compress.go new file mode 100644 index 000000000..054bf03d0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/compress.go @@ -0,0 +1,124 @@ +package compress + +import ( + "encoding" + "fmt" + "io" + "strconv" + "strings" + + "github.com/segmentio/kafka-go/compress/gzip" + "github.com/segmentio/kafka-go/compress/lz4" + "github.com/segmentio/kafka-go/compress/snappy" + "github.com/segmentio/kafka-go/compress/zstd" +) + +// Compression represents the compression applied to a record set. +type Compression int8 + +const ( + None Compression = 0 + Gzip Compression = 1 + Snappy Compression = 2 + Lz4 Compression = 3 + Zstd Compression = 4 +) + +func (c Compression) Codec() Codec { + if i := int(c); i >= 0 && i < len(Codecs) { + return Codecs[i] + } + return nil +} + +func (c Compression) String() string { + if codec := c.Codec(); codec != nil { + return codec.Name() + } + return "uncompressed" +} + +func (c Compression) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +func (c *Compression) UnmarshalText(b []byte) error { + switch string(b) { + case "none", "uncompressed": + *c = None + return nil + } + + for _, codec := range Codecs[None+1:] { + if codec.Name() == string(b) { + *c = Compression(codec.Code()) + return nil + } + } + + i, err := strconv.ParseInt(string(b), 10, 64) + if err == nil && i >= 0 && i < int64(len(Codecs)) { + *c = Compression(i) + return nil + } + + s := &strings.Builder{} + s.WriteString("none, uncompressed") + + for i, codec := range Codecs[None+1:] { + if i < (len(Codecs) - 1) { + s.WriteString(", ") + } else { + s.WriteString(", or ") + } + s.WriteString(codec.Name()) + } + + return fmt.Errorf("compression format must be one of %s, not %q", s, b) +} + +var ( + _ encoding.TextMarshaler = Compression(0) + _ encoding.TextUnmarshaler = (*Compression)(nil) +) + +// Codec represents a compression codec to encode and decode the messages. +// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression +// +// A Codec must be safe for concurrent access by multiple go routines. +type Codec interface { + // Code returns the compression codec code + Code() int8 + + // Human-readable name for the codec. + Name() string + + // Constructs a new reader which decompresses data from r. + NewReader(r io.Reader) io.ReadCloser + + // Constructs a new writer which writes compressed data to w. + NewWriter(w io.Writer) io.WriteCloser +} + +var ( + // The global gzip codec installed on the Codecs table. + GzipCodec gzip.Codec + + // The global snappy codec installed on the Codecs table. + SnappyCodec snappy.Codec + + // The global lz4 codec installed on the Codecs table. + Lz4Codec lz4.Codec + + // The global zstd codec installed on the Codecs table. + ZstdCodec zstd.Codec + + // The global table of compression codecs supported by the kafka protocol. + Codecs = [...]Codec{ + None: nil, + Gzip: &GzipCodec, + Snappy: &SnappyCodec, + Lz4: &Lz4Codec, + Zstd: &ZstdCodec, + } +) diff --git a/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go b/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go new file mode 100644 index 000000000..ad5009c39 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go @@ -0,0 +1,123 @@ +package gzip + +import ( + "io" + "sync" + + "github.com/klauspost/compress/gzip" +) + +var ( + readerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with gzip. +type Codec struct { + // The compression level to configure on writers created by this codec. + // Acceptable values are defined in the standard gzip package. + // + // Default to gzip.DefaultCompressionLevel. + Level int + + writerPool sync.Pool +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 1 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "gzip" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + var err error + z, _ := readerPool.Get().(*gzip.Reader) + if z != nil { + err = z.Reset(r) + } else { + z, err = gzip.NewReader(r) + } + if err != nil { + if z != nil { + readerPool.Put(z) + } + return &errorReader{err: err} + } + return &reader{Reader: z} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + x := c.writerPool.Get() + z, _ := x.(*gzip.Writer) + if z == nil { + x, err := gzip.NewWriterLevel(w, c.level()) + if err != nil { + return &errorWriter{err: err} + } + z = x + } else { + z.Reset(w) + } + return &writer{codec: c, Writer: z} +} + +func (c *Codec) level() int { + if c.Level != 0 { + return c.Level + } + return gzip.DefaultCompression +} + +type reader struct{ *gzip.Reader } + +func (r *reader) Close() (err error) { + if z := r.Reader; z != nil { + r.Reader = nil + err = z.Close() + // Pass it an empty reader, which is a zero-size value implementing the + // flate.Reader interface to avoid the construction of a bufio.Reader in + // the call to Reset. + // + // Note: we could also not reset the reader at all, but that would cause + // the underlying reader to be retained until the gzip.Reader is freed, + // which may not be desirable. + z.Reset(emptyReader{}) + readerPool.Put(z) + } + return +} + +type writer struct { + codec *Codec + *gzip.Writer +} + +func (w *writer) Close() (err error) { + if z := w.Writer; z != nil { + w.Writer = nil + err = z.Close() + z.Reset(nil) + w.codec.writerPool.Put(z) + } + return +} + +type emptyReader struct{} + +func (emptyReader) ReadByte() (byte, error) { return 0, io.EOF } + +func (emptyReader) Read([]byte) (int, error) { return 0, io.EOF } + +type errorReader struct{ err error } + +func (r *errorReader) Close() error { return r.err } + +func (r *errorReader) Read([]byte) (int, error) { return 0, r.err } + +type errorWriter struct{ err error } + +func (w *errorWriter) Close() error { return w.err } + +func (w *errorWriter) Write([]byte) (int, error) { return 0, w.err } diff --git a/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go b/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go new file mode 100644 index 000000000..1aa8289b8 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go @@ -0,0 +1,68 @@ +package lz4 + +import ( + "io" + "sync" + + "github.com/pierrec/lz4/v4" +) + +var ( + readerPool sync.Pool + writerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with lz4. +type Codec struct{} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 3 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "lz4" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + z, _ := readerPool.Get().(*lz4.Reader) + if z != nil { + z.Reset(r) + } else { + z = lz4.NewReader(r) + } + return &reader{Reader: z} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + z, _ := writerPool.Get().(*lz4.Writer) + if z != nil { + z.Reset(w) + } else { + z = lz4.NewWriter(w) + } + return &writer{Writer: z} +} + +type reader struct{ *lz4.Reader } + +func (r *reader) Close() (err error) { + if z := r.Reader; z != nil { + r.Reader = nil + z.Reset(nil) + readerPool.Put(z) + } + return +} + +type writer struct{ *lz4.Writer } + +func (w *writer) Close() (err error) { + if z := w.Writer; z != nil { + w.Writer = nil + err = z.Close() + z.Reset(nil) + writerPool.Put(z) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go b/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go new file mode 100644 index 000000000..5bc6194f1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go @@ -0,0 +1,110 @@ +package snappy + +import ( + "io" + "sync" + + "github.com/klauspost/compress/s2" + "github.com/klauspost/compress/snappy" +) + +// Framing is an enumeration type used to enable or disable xerial framing of +// snappy messages. +type Framing int + +const ( + Framed Framing = iota + Unframed +) + +// Compression level. +type Compression int + +const ( + DefaultCompression Compression = iota + FasterCompression + BetterCompression + BestCompression +) + +var ( + readerPool sync.Pool + writerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with snappy. +type Codec struct { + // An optional framing to apply to snappy compression. + // + // Default to Framed. + Framing Framing + + // Compression level. + Compression Compression +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 2 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "snappy" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + x, _ := readerPool.Get().(*xerialReader) + if x != nil { + x.Reset(r) + } else { + x = &xerialReader{ + reader: r, + decode: snappy.Decode, + } + } + return &reader{xerialReader: x} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + x, _ := writerPool.Get().(*xerialWriter) + if x != nil { + x.Reset(w) + } else { + x = &xerialWriter{writer: w} + } + x.framed = c.Framing == Framed + switch c.Compression { + case FasterCompression: + x.encode = s2.EncodeSnappy + case BetterCompression: + x.encode = s2.EncodeSnappyBetter + case BestCompression: + x.encode = s2.EncodeSnappyBest + default: + x.encode = snappy.Encode // aka. s2.EncodeSnappyBetter + } + return &writer{xerialWriter: x} +} + +type reader struct{ *xerialReader } + +func (r *reader) Close() (err error) { + if x := r.xerialReader; x != nil { + r.xerialReader = nil + x.Reset(nil) + readerPool.Put(x) + } + return +} + +type writer struct{ *xerialWriter } + +func (w *writer) Close() (err error) { + if x := w.xerialWriter; x != nil { + w.xerialWriter = nil + err = x.Flush() + x.Reset(nil) + writerPool.Put(x) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go b/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go new file mode 100644 index 000000000..e2725af9c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go @@ -0,0 +1,330 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + + "github.com/klauspost/compress/snappy" +) + +const defaultBufferSize = 32 * 1024 + +// An implementation of io.Reader which consumes a stream of xerial-framed +// snappy-encoeded data. The framing is optional, if no framing is detected +// the reader will simply forward the bytes from its underlying stream. +type xerialReader struct { + reader io.Reader + header [16]byte + input []byte + output []byte + offset int64 + nbytes int64 + decode func([]byte, []byte) ([]byte, error) +} + +func (x *xerialReader) Reset(r io.Reader) { + x.reader = r + x.input = x.input[:0] + x.output = x.output[:0] + x.header = [16]byte{} + x.offset = 0 + x.nbytes = 0 +} + +func (x *xerialReader) Read(b []byte) (int, error) { + for { + if x.offset < int64(len(x.output)) { + n := copy(b, x.output[x.offset:]) + x.offset += int64(n) + return n, nil + } + + n, err := x.readChunk(b) + if err != nil { + return 0, err + } + if n > 0 { + return n, nil + } + } +} + +func (x *xerialReader) WriteTo(w io.Writer) (int64, error) { + wn := int64(0) + + for { + for x.offset < int64(len(x.output)) { + n, err := w.Write(x.output[x.offset:]) + wn += int64(n) + x.offset += int64(n) + if err != nil { + return wn, err + } + } + + if _, err := x.readChunk(nil); err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return wn, err + } + } +} + +func (x *xerialReader) readChunk(dst []byte) (int, error) { + x.output = x.output[:0] + x.offset = 0 + prefix := 0 + + if x.nbytes == 0 { + n, err := x.readFull(x.header[:]) + if err != nil && n == 0 { + return 0, err + } + prefix = n + } + + if isXerialHeader(x.header[:]) { + if cap(x.input) < 4 { + x.input = make([]byte, 4, defaultBufferSize) + } else { + x.input = x.input[:4] + } + + _, err := x.readFull(x.input) + if err != nil { + return 0, err + } + + frame := int(binary.BigEndian.Uint32(x.input)) + if cap(x.input) < frame { + x.input = make([]byte, frame, align(frame, defaultBufferSize)) + } else { + x.input = x.input[:frame] + } + + if _, err := x.readFull(x.input); err != nil { + return 0, err + } + } else { + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } else { + x.input = x.input[:0] + } + + if prefix > 0 { + x.input = append(x.input, x.header[:prefix]...) + } + + for { + if len(x.input) == cap(x.input) { + b := make([]byte, len(x.input), 2*cap(x.input)) + copy(b, x.input) + x.input = b + } + + n, err := x.read(x.input[len(x.input):cap(x.input)]) + x.input = x.input[:len(x.input)+n] + if err != nil { + if errors.Is(err, io.EOF) && len(x.input) > 0 { + break + } + return 0, err + } + } + } + + var n int + var err error + + if x.decode == nil { + x.output, x.input, err = x.input, x.output, nil + } else if n, err = snappy.DecodedLen(x.input); n <= len(dst) && err == nil { + // If the output buffer is large enough to hold the decode value, + // write it there directly instead of using the intermediary output + // buffer. + _, err = x.decode(dst, x.input) + } else { + var b []byte + n = 0 + b, err = x.decode(x.output[:cap(x.output)], x.input) + if err == nil { + x.output = b + } + } + + return n, err +} + +func (x *xerialReader) read(b []byte) (int, error) { + n, err := x.reader.Read(b) + x.nbytes += int64(n) + return n, err +} + +func (x *xerialReader) readFull(b []byte) (int, error) { + n, err := io.ReadFull(x.reader, b) + x.nbytes += int64(n) + return n, err +} + +// An implementation of a xerial-framed snappy-encoded output stream. +// Each Write made to the writer is framed with a xerial header. +type xerialWriter struct { + writer io.Writer + header [16]byte + input []byte + output []byte + nbytes int64 + framed bool + encode func([]byte, []byte) []byte +} + +func (x *xerialWriter) Reset(w io.Writer) { + x.writer = w + x.input = x.input[:0] + x.output = x.output[:0] + x.nbytes = 0 +} + +func (x *xerialWriter) ReadFrom(r io.Reader) (int64, error) { + wn := int64(0) + + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } + + for { + if x.full() { + x.grow() + } + + n, err := r.Read(x.input[len(x.input):cap(x.input)]) + wn += int64(n) + x.input = x.input[:len(x.input)+n] + + if x.fullEnough() { + if err := x.Flush(); err != nil { + return wn, err + } + } + + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return wn, err + } + } +} + +func (x *xerialWriter) Write(b []byte) (int, error) { + wn := 0 + + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } + + for len(b) > 0 { + if x.full() { + x.grow() + } + + n := copy(x.input[len(x.input):cap(x.input)], b) + b = b[n:] + wn += n + x.input = x.input[:len(x.input)+n] + + if x.fullEnough() { + if err := x.Flush(); err != nil { + return wn, err + } + } + } + + return wn, nil +} + +func (x *xerialWriter) Flush() error { + if len(x.input) == 0 { + return nil + } + + var b []byte + if x.encode == nil { + b = x.input + } else { + x.output = x.encode(x.output[:cap(x.output)], x.input) + b = x.output + } + + x.input = x.input[:0] + x.output = x.output[:0] + + if x.framed && x.nbytes == 0 { + writeXerialHeader(x.header[:]) + _, err := x.write(x.header[:]) + if err != nil { + return err + } + } + + if x.framed { + writeXerialFrame(x.header[:4], len(b)) + _, err := x.write(x.header[:4]) + if err != nil { + return err + } + } + + _, err := x.write(b) + return err +} + +func (x *xerialWriter) write(b []byte) (int, error) { + n, err := x.writer.Write(b) + x.nbytes += int64(n) + return n, err +} + +func (x *xerialWriter) full() bool { + return len(x.input) == cap(x.input) +} + +func (x *xerialWriter) fullEnough() bool { + return x.framed && (cap(x.input)-len(x.input)) < 1024 +} + +func (x *xerialWriter) grow() { + tmp := make([]byte, len(x.input), 2*cap(x.input)) + copy(tmp, x.input) + x.input = tmp +} + +func align(n, a int) int { + if (n % a) == 0 { + return n + } + return ((n / a) + 1) * a +} + +var ( + xerialHeader = [...]byte{130, 83, 78, 65, 80, 80, 89, 0} + xerialVersionInfo = [...]byte{0, 0, 0, 1, 0, 0, 0, 1} +) + +func isXerialHeader(src []byte) bool { + return len(src) >= 16 && bytes.Equal(src[:8], xerialHeader[:]) +} + +func writeXerialHeader(b []byte) { + copy(b[:8], xerialHeader[:]) + copy(b[8:], xerialVersionInfo[:]) +} + +func writeXerialFrame(b []byte, n int) { + binary.BigEndian.PutUint32(b, uint32(n)) +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go b/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go new file mode 100644 index 000000000..1cc5e8490 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go @@ -0,0 +1,168 @@ +// Package zstd implements Zstandard compression. +package zstd + +import ( + "io" + "sync" + + "github.com/klauspost/compress/zstd" +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with zstd. +type Codec struct { + // The compression level configured on writers created by the codec. + // + // Default to 3. + Level int + + encoderPool sync.Pool // *encoder +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 4 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "zstd" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + p := new(reader) + if p.dec, _ = decoderPool.Get().(*zstd.Decoder); p.dec != nil { + p.dec.Reset(r) + } else { + z, err := zstd.NewReader(r, + zstd.WithDecoderConcurrency(1), + ) + if err != nil { + p.err = err + } else { + p.dec = z + } + } + return p +} + +func (c *Codec) level() int { + if c.Level != 0 { + return c.Level + } + return 3 +} + +func (c *Codec) zstdLevel() zstd.EncoderLevel { + return zstd.EncoderLevelFromZstd(c.level()) +} + +var decoderPool sync.Pool // *zstd.Decoder + +type reader struct { + dec *zstd.Decoder + err error +} + +// Close implements the io.Closer interface. +func (r *reader) Close() error { + if r.dec != nil { + r.dec.Reset(devNull{}) // don't retain the underlying reader + decoderPool.Put(r.dec) + r.dec = nil + r.err = io.ErrClosedPipe + } + return nil +} + +// Read implements the io.Reader interface. +func (r *reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + if r.dec == nil { + return 0, io.EOF + } + return r.dec.Read(p) +} + +// WriteTo implements the io.WriterTo interface. +func (r *reader) WriteTo(w io.Writer) (int64, error) { + if r.err != nil { + return 0, r.err + } + if r.dec == nil { + return 0, io.ErrClosedPipe + } + return r.dec.WriteTo(w) +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + p := new(writer) + if enc, _ := c.encoderPool.Get().(*zstd.Encoder); enc == nil { + z, err := zstd.NewWriter(w, + zstd.WithEncoderLevel(c.zstdLevel()), + zstd.WithEncoderConcurrency(1), + zstd.WithZeroFrames(true), + ) + if err != nil { + p.err = err + } else { + p.enc = z + } + } else { + p.enc = enc + p.enc.Reset(w) + } + p.c = c + return p +} + +type writer struct { + c *Codec + enc *zstd.Encoder + err error +} + +// Close implements the io.Closer interface. +func (w *writer) Close() error { + if w.enc != nil { + // Close needs to be called to write the end of stream marker and flush + // the buffers. The zstd package documents that the encoder is re-usable + // after being closed. + err := w.enc.Close() + if err != nil { + w.err = err + } + w.enc.Reset(devNull{}) // don't retain the underlying writer + w.c.encoderPool.Put(w.enc) + w.enc = nil + return err + } + return w.err +} + +// WriteTo implements the io.WriterTo interface. +func (w *writer) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.enc == nil { + return 0, io.ErrClosedPipe + } + return w.enc.Write(p) +} + +// ReadFrom implements the io.ReaderFrom interface. +func (w *writer) ReadFrom(r io.Reader) (int64, error) { + if w.err != nil { + return 0, w.err + } + if w.enc == nil { + return 0, io.ErrClosedPipe + } + return w.enc.ReadFrom(r) +} + +type devNull struct{} + +func (devNull) Read([]byte) (int, error) { return 0, io.EOF } +func (devNull) Write([]byte) (int, error) { return 0, nil } diff --git a/vendor/github.com/segmentio/kafka-go/compression.go b/vendor/github.com/segmentio/kafka-go/compression.go new file mode 100644 index 000000000..411fe87a1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compression.go @@ -0,0 +1,31 @@ +package kafka + +import ( + "errors" + + "github.com/segmentio/kafka-go/compress" +) + +type Compression = compress.Compression + +const ( + Gzip Compression = compress.Gzip + Snappy Compression = compress.Snappy + Lz4 Compression = compress.Lz4 + Zstd Compression = compress.Zstd +) + +type CompressionCodec = compress.Codec + +var ( + errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported") +) + +// resolveCodec looks up a codec by Code(). +func resolveCodec(code int8) (CompressionCodec, error) { + codec := compress.Compression(code).Codec() + if codec == nil { + return nil, errUnknownCodec + } + return codec, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/conn.go b/vendor/github.com/segmentio/kafka-go/conn.go new file mode 100644 index 000000000..2b51afbd5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/conn.go @@ -0,0 +1,1645 @@ +package kafka + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "net" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" +) + +var ( + errInvalidWriteTopic = errors.New("writes must NOT set Topic on kafka.Message") + errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message") +) + +// Conn represents a connection to a kafka broker. +// +// Instances of Conn are safe to use concurrently from multiple goroutines. +type Conn struct { + // base network connection + conn net.Conn + + // number of inflight requests on the connection. + inflight int32 + + // offset management (synchronized on the mutex field) + mutex sync.Mutex + offset int64 + + // read buffer (synchronized on rlock) + rlock sync.Mutex + rbuf bufio.Reader + + // write buffer (synchronized on wlock) + wlock sync.Mutex + wbuf bufio.Writer + wb writeBuffer + + // deadline management + wdeadline connDeadline + rdeadline connDeadline + + // immutable values of the connection object + clientID string + topic string + partition int32 + fetchMaxBytes int32 + fetchMinSize int32 + broker int32 + rack string + + // correlation ID generator (synchronized on wlock) + correlationID int32 + + // number of replica acks required when publishing to a partition + requiredAcks int32 + + // lazily loaded API versions used by this connection + apiVersions atomic.Value // apiVersionMap + + transactionalID *string +} + +type apiVersionMap map[apiKey]ApiVersion + +func (v apiVersionMap) negotiate(key apiKey, sortedSupportedVersions ...apiVersion) apiVersion { + x := v[key] + + for i := len(sortedSupportedVersions) - 1; i >= 0; i-- { + s := sortedSupportedVersions[i] + + if apiVersion(x.MaxVersion) >= s { + return s + } + } + + return -1 +} + +// ConnConfig is a configuration object used to create new instances of Conn. +type ConnConfig struct { + ClientID string + Topic string + Partition int + Broker int + Rack string + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that this connection can't be transactional. + TransactionalID string +} + +// ReadBatchConfig is a configuration object used for reading batches of messages. +type ReadBatchConfig struct { + // MinBytes indicates to the broker the minimum batch size that the consumer + // will accept. Setting a high minimum when consuming from a low-volume topic + // may result in delayed delivery when the broker does not have enough data to + // satisfy the defined minimum. + MinBytes int + + // MaxBytes indicates to the broker the maximum batch size that the consumer + // will accept. The broker will truncate a message to satisfy this maximum, so + // choose a value that is high enough for your largest message size. + MaxBytes int + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel + + // MaxWait is the amount of time for the broker while waiting to hit the + // min/max byte targets. This setting is independent of any network-level + // timeouts or deadlines. + // + // For backward compatibility, when this field is left zero, kafka-go will + // infer the max wait from the connection's read deadline. + MaxWait time.Duration +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + +var ( + // DefaultClientID is the default value used as ClientID of kafka + // connections. + DefaultClientID string +) + +func init() { + progname := filepath.Base(os.Args[0]) + hostname, _ := os.Hostname() + DefaultClientID = fmt.Sprintf("%s@%s (github.com/segmentio/kafka-go)", progname, hostname) +} + +// NewConn returns a new kafka connection for the given topic and partition. +func NewConn(conn net.Conn, topic string, partition int) *Conn { + return NewConnWith(conn, ConnConfig{ + Topic: topic, + Partition: partition, + }) +} + +func emptyToNullable(transactionalID string) (result *string) { + if transactionalID != "" { + result = &transactionalID + } + return result +} + +// NewConnWith returns a new kafka connection configured with config. +// The offset is initialized to FirstOffset. +func NewConnWith(conn net.Conn, config ConnConfig) *Conn { + if len(config.ClientID) == 0 { + config.ClientID = DefaultClientID + } + + if config.Partition < 0 || config.Partition > math.MaxInt32 { + panic(fmt.Sprintf("invalid partition number: %d", config.Partition)) + } + + c := &Conn{ + conn: conn, + rbuf: *bufio.NewReader(conn), + wbuf: *bufio.NewWriter(conn), + clientID: config.ClientID, + topic: config.Topic, + partition: int32(config.Partition), + broker: int32(config.Broker), + rack: config.Rack, + offset: FirstOffset, + requiredAcks: -1, + transactionalID: emptyToNullable(config.TransactionalID), + } + + c.wb.w = &c.wbuf + + // The fetch request needs to ask for a MaxBytes value that is at least + // enough to load the control data of the response. To avoid having to + // recompute it on every read, it is cached here in the Conn value. + c.fetchMinSize = (fetchResponseV2{ + Topics: []fetchResponseTopicV2{{ + TopicName: config.Topic, + Partitions: []fetchResponsePartitionV2{{ + Partition: int32(config.Partition), + MessageSet: messageSet{{}}, + }}, + }}, + }).size() + c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize + return c +} + +func (c *Conn) negotiateVersion(key apiKey, sortedSupportedVersions ...apiVersion) (apiVersion, error) { + v, err := c.loadVersions() + if err != nil { + return -1, err + } + a := v.negotiate(key, sortedSupportedVersions...) + if a < 0 { + return -1, fmt.Errorf("no matching versions were found between the client and the broker for API key %d", key) + } + return a, nil +} + +func (c *Conn) loadVersions() (apiVersionMap, error) { + v, _ := c.apiVersions.Load().(apiVersionMap) + if v != nil { + return v, nil + } + + brokerVersions, err := c.ApiVersions() + if err != nil { + return nil, err + } + + v = make(apiVersionMap, len(brokerVersions)) + + for _, a := range brokerVersions { + v[apiKey(a.ApiKey)] = a + } + + c.apiVersions.Store(v) + return v, nil +} + +// Broker returns a Broker value representing the kafka broker that this +// connection was established to. +func (c *Conn) Broker() Broker { + addr := c.conn.RemoteAddr() + host, port, _ := splitHostPortNumber(addr.String()) + return Broker{ + Host: host, + Port: port, + ID: int(c.broker), + Rack: c.rack, + } +} + +// Controller requests kafka for the current controller and returns its URL. +func (c *Conn) Controller() (broker Broker, err error) { + err = c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + for _, brokerMeta := range res.Brokers { + if brokerMeta.NodeID == res.ControllerID { + broker = Broker{ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack} + break + } + } + return nil + }, + ) + return broker, err +} + +// Brokers retrieve the broker list from the Kafka metadata. +func (c *Conn) Brokers() ([]Broker, error) { + var brokers []Broker + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + + brokers = make([]Broker, len(res.Brokers)) + for i, brokerMeta := range res.Brokers { + brokers[i] = Broker{ + ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack, + } + } + return nil + }, + ) + return brokers, err +} + +// DeleteTopics deletes the specified topics. +func (c *Conn) DeleteTopics(topics ...string) error { + _, err := c.deleteTopics(deleteTopicsRequestV0{ + Topics: topics, + }) + return err +} + +// findCoordinator finds the coordinator for the specified group or transaction +// +// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator +func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { + var response findCoordinatorResponseV0 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(findCoordinator, v0, id, request) + + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return findCoordinatorResponseV0{}, err + } + if response.ErrorCode != 0 { + return findCoordinatorResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// heartbeat sends a heartbeat message required by consumer groups +// +// See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat +func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) { + var response heartbeatResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(heartbeat, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return heartbeatResponseV0{}, err + } + if response.ErrorCode != 0 { + return heartbeatResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// joinGroup attempts to join a consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup +func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) { + var response joinGroupResponseV1 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(joinGroup, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return joinGroupResponseV1{}, err + } + if response.ErrorCode != 0 { + return joinGroupResponseV1{}, Error(response.ErrorCode) + } + + return response, nil +} + +// leaveGroup leaves the consumer from the consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup +func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) { + var response leaveGroupResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(leaveGroup, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return leaveGroupResponseV0{}, err + } + if response.ErrorCode != 0 { + return leaveGroupResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// listGroups lists all the consumer groups +// +// See http://kafka.apache.org/protocol.html#The_Messages_ListGroups +func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, error) { + var response listGroupsResponseV1 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(listGroups, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return listGroupsResponseV1{}, err + } + if response.ErrorCode != 0 { + return listGroupsResponseV1{}, Error(response.ErrorCode) + } + + return response, nil +} + +// offsetCommit commits the specified topic partition offsets +// +// See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit +func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) { + var response offsetCommitResponseV2 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(offsetCommit, v2, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return offsetCommitResponseV2{}, err + } + for _, r := range response.Responses { + for _, pr := range r.PartitionResponses { + if pr.ErrorCode != 0 { + return offsetCommitResponseV2{}, Error(pr.ErrorCode) + } + } + } + + return response, nil +} + +// offsetFetch fetches the offsets for the specified topic partitions. +// -1 indicates that there is no offset saved for the partition. +// +// See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch +func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) { + var response offsetFetchResponseV1 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(offsetFetch, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return offsetFetchResponseV1{}, err + } + for _, r := range response.Responses { + for _, pr := range r.PartitionResponses { + if pr.ErrorCode != 0 { + return offsetFetchResponseV1{}, Error(pr.ErrorCode) + } + } + } + + return response, nil +} + +// syncGroup completes the handshake to join a consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup +func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) { + var response syncGroupResponseV0 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(syncGroup, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return syncGroupResponseV0{}, err + } + if response.ErrorCode != 0 { + return syncGroupResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// Close closes the kafka connection. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// SetDeadline sets the read and write deadlines associated with the connection. +// It is equivalent to calling both SetReadDeadline and SetWriteDeadline. +// +// A deadline is an absolute time after which I/O operations fail with a timeout +// (see type Error) instead of blocking. The deadline applies to all future and +// pending I/O, not just the immediately following call to Read or Write. After +// a deadline has been exceeded, the connection may be closed if it was found to +// be in an unrecoverable state. +// +// A zero value for t means I/O operations will not time out. +func (c *Conn) SetDeadline(t time.Time) error { + c.rdeadline.setDeadline(t) + c.wdeadline.setDeadline(t) + return nil +} + +// SetReadDeadline sets the deadline for future Read calls and any +// currently-blocked Read call. +// A zero value for t means Read will not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + c.rdeadline.setDeadline(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls and any +// currently-blocked Write call. +// Even if write times out, it may return n > 0, indicating that some of the +// data was successfully written. +// A zero value for t means Write will not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.wdeadline.setDeadline(t) + return nil +} + +// Offset returns the current offset of the connection as pair of integers, +// where the first one is an offset value and the second one indicates how +// to interpret it. +// +// See Seek for more details about the offset and whence values. +func (c *Conn) Offset() (offset int64, whence int) { + c.mutex.Lock() + offset = c.offset + c.mutex.Unlock() + + switch offset { + case FirstOffset: + offset = 0 + whence = SeekStart + case LastOffset: + offset = 0 + whence = SeekEnd + default: + whence = SeekAbsolute + } + return +} + +const ( + SeekStart = 0 // Seek relative to the first offset available in the partition. + SeekAbsolute = 1 // Seek to an absolute offset. + SeekEnd = 2 // Seek relative to the last offset available in the partition. + SeekCurrent = 3 // Seek relative to the current offset. + + // This flag may be combined to any of the SeekAbsolute and SeekCurrent + // constants to skip the bound check that the connection would do otherwise. + // Programs can use this flag to avoid making a metadata request to the kafka + // broker to read the current first and last offsets of the partition. + SeekDontCheck = 1 << 30 +) + +// Seek sets the offset for the next read or write operation according to whence, which +// should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent. +// When seeking relative to the end, the offset is subtracted from the current offset. +// Note that for historical reasons, these do not align with the usual whence constants +// as in lseek(2) or os.Seek. +// The method returns the new absolute offset of the connection. +func (c *Conn) Seek(offset int64, whence int) (int64, error) { + seekDontCheck := (whence & SeekDontCheck) != 0 + whence &= ^SeekDontCheck + + switch whence { + case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent: + default: + return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence) + } + + if seekDontCheck { + if whence == SeekAbsolute { + c.mutex.Lock() + c.offset = offset + c.mutex.Unlock() + return offset, nil + } + + if whence == SeekCurrent { + c.mutex.Lock() + c.offset += offset + offset = c.offset + c.mutex.Unlock() + return offset, nil + } + } + + if whence == SeekAbsolute { + c.mutex.Lock() + unchanged := offset == c.offset + c.mutex.Unlock() + if unchanged { + return offset, nil + } + } + + if whence == SeekCurrent { + c.mutex.Lock() + offset = c.offset + offset + c.mutex.Unlock() + } + + first, last, err := c.ReadOffsets() + if err != nil { + return 0, err + } + + switch whence { + case SeekStart: + offset = first + offset + case SeekEnd: + offset = last - offset + } + + if offset < first || offset > last { + return 0, OffsetOutOfRange + } + + c.mutex.Lock() + c.offset = offset + c.mutex.Unlock() + return offset, nil +} + +// Read reads the message at the current offset from the connection, advancing +// the offset on success so the next call to a read method will produce the next +// message. +// The method returns the number of bytes read, or an error if something went +// wrong. +// +// While it is safe to call Read concurrently from multiple goroutines it may +// be hard for the program to predict the results as the connection offset will +// be read and written by multiple goroutines, they could read duplicates, or +// messages may be seen by only some of the goroutines. +// +// The method fails with io.ErrShortBuffer if the buffer passed as argument is +// too small to hold the message value. +// +// This method is provided to satisfy the net.Conn interface but is much less +// efficient than using the more general purpose ReadBatch method. +func (c *Conn) Read(b []byte) (int, error) { + batch := c.ReadBatch(1, len(b)) + n, err := batch.Read(b) + return n, coalesceErrors(silentEOF(err), batch.Close()) +} + +// ReadMessage reads the message at the current offset from the connection, +// advancing the offset on success so the next call to a read method will +// produce the next message. +// +// Because this method allocate memory buffers for the message key and value +// it is less memory-efficient than Read, but has the advantage of never +// failing with io.ErrShortBuffer. +// +// While it is safe to call Read concurrently from multiple goroutines it may +// be hard for the program to predict the results as the connection offset will +// be read and written by multiple goroutines, they could read duplicates, or +// messages may be seen by only some of the goroutines. +// +// This method is provided for convenience purposes but is much less efficient +// than using the more general purpose ReadBatch method. +func (c *Conn) ReadMessage(maxBytes int) (Message, error) { + batch := c.ReadBatch(1, maxBytes) + msg, err := batch.ReadMessage() + return msg, coalesceErrors(silentEOF(err), batch.Close()) +} + +// ReadBatch reads a batch of messages from the kafka server. The method always +// returns a non-nil Batch value. If an error occurred, either sending the fetch +// request or reading the response, the error will be made available by the +// returned value of the batch's Close method. +// +// While it is safe to call ReadBatch concurrently from multiple goroutines it +// may be hard for the program to predict the results as the connection offset +// will be read and written by multiple goroutines, they could read duplicates, +// or messages may be seen by only some of the goroutines. +// +// A program doesn't specify the number of messages in wants from a batch, but +// gives the minimum and maximum number of bytes that it wants to receive from +// the kafka server. +func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch { + return c.ReadBatchWith(ReadBatchConfig{ + MinBytes: minBytes, + MaxBytes: maxBytes, + }) +} + +// ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured +// with the default values in ReadBatchConfig except for minBytes and maxBytes. +func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch { + + var adjustedDeadline time.Time + var maxFetch = int(c.fetchMaxBytes) + + if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)} + } + if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)} + } + if cfg.MinBytes > cfg.MaxBytes { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)} + } + + offset, whence := c.Offset() + + offset, err := c.Seek(offset, whence|SeekDontCheck) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + fetchVersion, err := c.negotiateVersion(fetch, v2, v5, v10) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { + now := time.Now() + var timeout time.Duration + if cfg.MaxWait > 0 { + // explicitly-configured case: no changes are made to the deadline, + // and the timeout is sent exactly as specified. + timeout = cfg.MaxWait + } else { + // default case: use the original logic to adjust the conn's + // deadline.T + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + timeout = deadlineToTimeout(deadline, now) + } + // save this variable outside of the closure for later use in detecting + // truncated messages. + adjustedDeadline = deadline + switch fetchVersion { + case v10: + return c.wb.writeFetchRequestV10( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + int8(cfg.IsolationLevel), + ) + case v5: + return c.wb.writeFetchRequestV5( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + int8(cfg.IsolationLevel), + ) + default: + return c.wb.writeFetchRequestV2( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + ) + } + }) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + _, size, lock, err := c.waitResponse(&c.rdeadline, id) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + var throttle int32 + var highWaterMark int64 + var remain int + + switch fetchVersion { + case v10: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size) + case v5: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size) + default: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size) + } + if errors.Is(err, errShortRead) { + err = checkTimeoutErr(adjustedDeadline) + } + + var msgs *messageSetReader + if err == nil { + if highWaterMark == offset { + msgs = &messageSetReader{empty: true} + } else { + msgs, err = newMessageSetReader(&c.rbuf, remain) + } + } + if errors.Is(err, errShortRead) { + err = checkTimeoutErr(adjustedDeadline) + } + + return &Batch{ + conn: c, + msgs: msgs, + deadline: adjustedDeadline, + throttle: makeDuration(throttle), + lock: lock, + topic: c.topic, // topic is copied to Batch to prevent race with Batch.close + partition: int(c.partition), // partition is copied to Batch to prevent race with Batch.close + offset: offset, + highWaterMark: highWaterMark, + // there shouldn't be a short read on initially setting up the batch. + // as such, any io.EOF is re-mapped to an io.ErrUnexpectedEOF so that we + // don't accidentally signal that we successfully reached the end of the + // batch. + err: dontExpectEOF(err), + } +} + +// ReadOffset returns the offset of the first message with a timestamp equal or +// greater to t. +func (c *Conn) ReadOffset(t time.Time) (int64, error) { + return c.readOffset(timestamp(t)) +} + +// ReadFirstOffset returns the first offset available on the connection. +func (c *Conn) ReadFirstOffset() (int64, error) { + return c.readOffset(FirstOffset) +} + +// ReadLastOffset returns the last offset available on the connection. +func (c *Conn) ReadLastOffset() (int64, error) { + return c.readOffset(LastOffset) +} + +// ReadOffsets returns the absolute first and last offsets of the topic used by +// the connection. +func (c *Conn) ReadOffsets() (first, last int64, err error) { + // We have to submit two different requests to fetch the first and last + // offsets because kafka refuses requests that ask for multiple offsets + // on the same topic and partition. + if first, err = c.ReadFirstOffset(); err != nil { + return + } + if last, err = c.ReadLastOffset(); err != nil { + first = 0 // don't leak the value on error + return + } + return +} + +func (c *Conn) readOffset(t int64) (offset int64, err error) { + err = c.readOperation( + func(deadline time.Time, id int32) error { + return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { + // We skip the topic name because we've made a request for + // a single topic. + size, err := discardString(r, size) + if err != nil { + return size, err + } + + // Reading the array of partitions, there will be only one + // partition which gives the offset we're looking for. + return readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { + var p partitionOffsetV1 + size, err := p.readFrom(r, size) + if err != nil { + return size, err + } + if p.ErrorCode != 0 { + return size, Error(p.ErrorCode) + } + offset = p.Offset + return size, nil + }) + })) + }, + ) + return +} + +// ReadPartitions returns the list of available partitions for the given list of +// topics. +// +// If the method is called with no topic, it uses the topic configured on the +// connection. If there are none, the method fetches all partitions of the kafka +// cluster. +func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) { + + if len(topics) == 0 { + if len(c.topic) != 0 { + defaultTopics := [...]string{c.topic} + topics = defaultTopics[:] + } else { + // topics needs to be explicitly nil-ed out or the broker will + // interpret it as a request for 0 partitions instead of all. + topics = nil + } + } + metadataVersion, err := c.negotiateVersion(metadata, v1, v6) + if err != nil { + return nil, err + } + + err = c.readOperation( + func(deadline time.Time, id int32) error { + switch metadataVersion { + case v6: + return c.writeRequest(metadata, v6, id, topicMetadataRequestV6{Topics: topics, AllowAutoTopicCreation: true}) + default: + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics)) + } + }, + func(deadline time.Time, size int) error { + partitions, err = c.readPartitionsResponse(metadataVersion, size) + return err + }, + ) + return +} + +func (c *Conn) readPartitionsResponse(metadataVersion apiVersion, size int) ([]Partition, error) { + switch metadataVersion { + case v6: + var res metadataResponseV6 + if err := c.readResponse(size, &res); err != nil { + return nil, err + } + brokers := readBrokerMetadata(res.Brokers) + return c.readTopicMetadatav6(brokers, res.Topics) + default: + var res metadataResponseV1 + if err := c.readResponse(size, &res); err != nil { + return nil, err + } + brokers := readBrokerMetadata(res.Brokers) + return c.readTopicMetadatav1(brokers, res.Topics) + } +} + +func readBrokerMetadata(brokerMetadata []brokerMetadataV1) map[int32]Broker { + brokers := make(map[int32]Broker, len(brokerMetadata)) + for _, b := range brokerMetadata { + brokers[b.NodeID] = Broker{ + Host: b.Host, + Port: int(b.Port), + ID: int(b.NodeID), + Rack: b.Rack, + } + } + return brokers +} + +func (c *Conn) readTopicMetadatav1(brokers map[int32]Broker, topicMetadata []topicMetadataV1) (partitions []Partition, err error) { + for _, t := range topicMetadata { + if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { + // We only report errors if they happened for the topic of + // the connection, otherwise the topic will simply have no + // partitions in the result set. + return nil, Error(t.TopicErrorCode) + } + for _, p := range t.Partitions { + partitions = append(partitions, Partition{ + Topic: t.TopicName, + Leader: brokers[p.Leader], + Replicas: makeBrokers(brokers, p.Replicas...), + Isr: makeBrokers(brokers, p.Isr...), + ID: int(p.PartitionID), + OfflineReplicas: []Broker{}, + }) + } + } + return +} + +func (c *Conn) readTopicMetadatav6(brokers map[int32]Broker, topicMetadata []topicMetadataV6) (partitions []Partition, err error) { + for _, t := range topicMetadata { + if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { + // We only report errors if they happened for the topic of + // the connection, otherwise the topic will simply have no + // partitions in the result set. + return nil, Error(t.TopicErrorCode) + } + for _, p := range t.Partitions { + partitions = append(partitions, Partition{ + Topic: t.TopicName, + Leader: brokers[p.Leader], + Replicas: makeBrokers(brokers, p.Replicas...), + Isr: makeBrokers(brokers, p.Isr...), + ID: int(p.PartitionID), + OfflineReplicas: makeBrokers(brokers, p.OfflineReplicas...), + }) + } + } + return +} + +func makeBrokers(brokers map[int32]Broker, ids ...int32) []Broker { + b := make([]Broker, len(ids)) + for i, id := range ids { + br, ok := brokers[id] + if !ok { + // When the broker id isn't found in the current list of known + // brokers, use a placeholder to report that the cluster has + // logical knowledge of the broker but no information about the + // physical host where it is running. + br.ID = int(id) + } + b[i] = br + } + return b +} + +// Write writes a message to the kafka broker that this connection was +// established to. The method returns the number of bytes written, or an error +// if something went wrong. +// +// The operation either succeeds or fail, it never partially writes the message. +// +// This method is exposed to satisfy the net.Conn interface but is less efficient +// than the more general purpose WriteMessages method. +func (c *Conn) Write(b []byte) (int, error) { + return c.WriteCompressedMessages(nil, Message{Value: b}) +} + +// WriteMessages writes a batch of messages to the connection's topic and +// partition, returning the number of bytes written. The write is an atomic +// operation, it either fully succeeds or fails. +func (c *Conn) WriteMessages(msgs ...Message) (int, error) { + return c.WriteCompressedMessages(nil, msgs...) +} + +// WriteCompressedMessages writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) { + nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...) + return +} + +// WriteCompressedMessagesAt writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written, partition and offset numbers +// and timestamp assigned by the kafka broker to the message set. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + return c.writeCompressedMessages(codec, msgs...) +} + +func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + if len(msgs) == 0 { + return + } + + writeTime := time.Now() + for i, msg := range msgs { + // users may believe they can set the Topic and/or Partition + // on the kafka message. + if msg.Topic != "" && msg.Topic != c.topic { + err = errInvalidWriteTopic + return + } + if msg.Partition != 0 { + err = errInvalidWritePartition + return + } + + if msg.Time.IsZero() { + msgs[i].Time = writeTime + } + + nbytes += len(msg.Key) + len(msg.Value) + } + + var produceVersion apiVersion + if produceVersion, err = c.negotiateVersion(produce, v2, v3, v7); err != nil { + return + } + + err = c.writeOperation( + func(deadline time.Time, id int32) error { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + switch produceVersion { + case v7: + recordBatch, err := + newRecordBatch( + codec, + msgs..., + ) + if err != nil { + return err + } + return c.wb.writeProduceRequestV7( + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + recordBatch, + ) + case v3: + recordBatch, err := + newRecordBatch( + codec, + msgs..., + ) + if err != nil { + return err + } + return c.wb.writeProduceRequestV3( + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + recordBatch, + ) + default: + return c.wb.writeProduceRequestV2( + codec, + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + msgs..., + ) + } + }, + func(deadline time.Time, size int) error { + return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { + // Skip the topic, we've produced the message to only one topic, + // no need to waste resources loading it in memory. + size, err := discardString(r, size) + if err != nil { + return size, err + } + + // Read the list of partitions, there should be only one since + // we've produced a message to a single partition. + size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { + switch produceVersion { + case v7: + var p produceResponsePartitionV7 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err + default: + var p produceResponsePartitionV2 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err + } + + }) + if err != nil { + return size, err + } + + // The response is trailed by the throttle time, also skipping + // since it's not interesting here. + return discardInt32(r, size) + })) + }, + ) + + if err != nil { + nbytes = 0 + } + + return +} + +// SetRequiredAcks sets the number of acknowledges from replicas that the +// connection requests when producing messages. +func (c *Conn) SetRequiredAcks(n int) error { + switch n { + case -1, 1: + atomic.StoreInt32(&c.requiredAcks, int32(n)) + return nil + default: + return InvalidRequiredAcks + } +} + +func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error { + hdr := c.requestHeader(apiKey, apiVersion, correlationID) + hdr.Size = (hdr.size() + req.size()) - 4 + hdr.writeTo(&c.wb) + req.writeTo(&c.wb) + return c.wbuf.Flush() +} + +func (c *Conn) readResponse(size int, res interface{}) error { + size, err := read(&c.rbuf, size, res) + if err != nil { + var kafkaError Error + if errors.As(err, &kafkaError) { + size, err = discardN(&c.rbuf, size, size) + } + } + return expectZeroSize(size, err) +} + +func (c *Conn) peekResponseSizeAndID() (int32, int32, error) { + b, err := c.rbuf.Peek(8) + if err != nil { + return 0, 0, err + } + size, id := makeInt32(b[:4]), makeInt32(b[4:]) + return size, id, nil +} + +func (c *Conn) skipResponseSizeAndID() { + c.rbuf.Discard(8) +} + +func (c *Conn) readDeadline() time.Time { + return c.rdeadline.deadline() +} + +func (c *Conn) writeDeadline() time.Time { + return c.wdeadline.deadline() +} + +func (c *Conn) readOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { + return c.do(&c.rdeadline, write, read) +} + +func (c *Conn) writeOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { + return c.do(&c.wdeadline, write, read) +} + +func (c *Conn) enter() { + atomic.AddInt32(&c.inflight, +1) +} + +func (c *Conn) leave() { + atomic.AddInt32(&c.inflight, -1) +} + +func (c *Conn) concurrency() int { + return int(atomic.LoadInt32(&c.inflight)) +} + +func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error { + id, err := c.doRequest(d, write) + if err != nil { + return err + } + + deadline, size, lock, err := c.waitResponse(d, id) + if err != nil { + return err + } + + if err = read(deadline, size); err != nil { + var kafkaError Error + if !errors.As(err, &kafkaError) { + c.conn.Close() + } + } + + d.unsetConnReadDeadline() + lock.Unlock() + return err +} + +func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) { + c.enter() + c.wlock.Lock() + c.correlationID++ + id = c.correlationID + err = write(d.setConnWriteDeadline(c.conn), id) + d.unsetConnWriteDeadline() + + if err != nil { + // When an error occurs there's no way to know if the connection is in a + // recoverable state so we're better off just giving up at this point to + // avoid any risk of corrupting the following operations. + c.conn.Close() + c.leave() + } + + c.wlock.Unlock() + return +} + +func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) { + for { + var rsz int32 + var rid int32 + + c.rlock.Lock() + deadline = d.setConnReadDeadline(c.conn) + rsz, rid, err = c.peekResponseSizeAndID() + + if err != nil { + d.unsetConnReadDeadline() + c.conn.Close() + c.rlock.Unlock() + break + } + + if id == rid { + c.skipResponseSizeAndID() + size, lock = int(rsz-4), &c.rlock + // Don't unlock the read mutex to yield ownership to the caller. + break + } + + if c.concurrency() == 1 { + // If the goroutine is the only one waiting on this connection it + // should be impossible to read a correlation id different from the + // one it expects. This is a sign that the data we are reading on + // the wire is corrupted and the connection needs to be closed. + err = io.ErrNoProgress + c.rlock.Unlock() + break + } + + // Optimistically release the read lock if a response has already + // been received but the current operation is not the target for it. + c.rlock.Unlock() + } + + c.leave() + return +} + +func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader { + return requestHeader{ + ApiKey: int16(apiKey), + ApiVersion: int16(apiVersion), + CorrelationID: correlationID, + ClientID: c.clientID, + } +} + +func (c *Conn) ApiVersions() ([]ApiVersion, error) { + deadline := &c.rdeadline + + if deadline.deadline().IsZero() { + // ApiVersions is called automatically when API version negotiation + // needs to happen, so we are not guaranteed that a read deadline has + // been set yet. Fallback to use the write deadline in case it was + // set, for example when version negotiation is initiated during a + // produce request. + deadline = &c.wdeadline + } + + id, err := c.doRequest(deadline, func(_ time.Time, id int32) error { + h := requestHeader{ + ApiKey: int16(apiVersions), + ApiVersion: int16(v0), + CorrelationID: id, + ClientID: c.clientID, + } + h.Size = (h.size() - 4) + h.writeTo(&c.wb) + return c.wbuf.Flush() + }) + if err != nil { + return nil, err + } + + _, size, lock, err := c.waitResponse(deadline, id) + if err != nil { + return nil, err + } + defer lock.Unlock() + + var errorCode int16 + if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil { + return nil, err + } + var arrSize int32 + if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil { + return nil, err + } + r := make([]ApiVersion, arrSize) + for i := 0; i < int(arrSize); i++ { + if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil { + return nil, err + } + } + + if errorCode != 0 { + return r, Error(errorCode) + } + + return r, nil +} + +// connDeadline is a helper type to implement read/write deadline management on +// the kafka connection. +type connDeadline struct { + mutex sync.Mutex + value time.Time + rconn net.Conn + wconn net.Conn +} + +func (d *connDeadline) deadline() time.Time { + d.mutex.Lock() + t := d.value + d.mutex.Unlock() + return t +} + +func (d *connDeadline) setDeadline(t time.Time) { + d.mutex.Lock() + d.value = t + + if d.rconn != nil { + d.rconn.SetReadDeadline(t) + } + + if d.wconn != nil { + d.wconn.SetWriteDeadline(t) + } + + d.mutex.Unlock() +} + +func (d *connDeadline) setConnReadDeadline(conn net.Conn) time.Time { + d.mutex.Lock() + deadline := d.value + d.rconn = conn + d.rconn.SetReadDeadline(deadline) + d.mutex.Unlock() + return deadline +} + +func (d *connDeadline) setConnWriteDeadline(conn net.Conn) time.Time { + d.mutex.Lock() + deadline := d.value + d.wconn = conn + d.wconn.SetWriteDeadline(deadline) + d.mutex.Unlock() + return deadline +} + +func (d *connDeadline) unsetConnReadDeadline() { + d.mutex.Lock() + d.rconn = nil + d.mutex.Unlock() +} + +func (d *connDeadline) unsetConnWriteDeadline() { + d.mutex.Lock() + d.wconn = nil + d.mutex.Unlock() +} + +// saslHandshake sends the SASL handshake message. This will determine whether +// the Mechanism is supported by the cluster. If it's not, this function will +// error out with UnsupportedSASLMechanism. +// +// If the mechanism is unsupported, the handshake request will reply with the +// list of the cluster's configured mechanisms, which could potentially be used +// to facilitate negotiation. At the moment, we are not negotiating the +// mechanism as we believe that brokers are usually known to the client, and +// therefore the client should already know which mechanisms are supported. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake +func (c *Conn) saslHandshake(mechanism string) error { + // The wire format for V0 and V1 is identical, but the version + // number will affect how the SASL authentication + // challenge/responses are sent + var resp saslHandshakeResponseV0 + + version, err := c.negotiateVersion(saslHandshake, v0, v1) + if err != nil { + return err + } + + err = c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslHandshake, version, id, &saslHandshakeRequestV0{Mechanism: mechanism}) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (int, error) { + return (&resp).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && resp.ErrorCode != 0 { + err = Error(resp.ErrorCode) + } + return err +} + +// saslAuthenticate sends the SASL authenticate message. This function must +// be immediately preceded by a successful saslHandshake. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate +func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) { + // if we sent a v1 handshake, then we must encapsulate the authentication + // request in a saslAuthenticateRequest. otherwise, we read and write raw + // bytes. + version, err := c.negotiateVersion(saslHandshake, v0, v1) + if err != nil { + return nil, err + } + if version == v1 { + var request = saslAuthenticateRequestV0{Data: data} + var response saslAuthenticateResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslAuthenticate, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + return response.Data, err + } + + // fall back to opaque bytes on the wire. the broker is expecting these if + // it just processed a v0 sasl handshake. + c.wb.writeInt32(int32(len(data))) + if _, err := c.wb.Write(data); err != nil { + return nil, err + } + if err := c.wb.Flush(); err != nil { + return nil, err + } + + var respLen int32 + if _, err := readInt32(&c.rbuf, 4, &respLen); err != nil { + return nil, err + } + + resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen)) + return resp, err +} diff --git a/vendor/github.com/segmentio/kafka-go/consumergroup.go b/vendor/github.com/segmentio/kafka-go/consumergroup.go new file mode 100644 index 000000000..b9d0a7e2e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/consumergroup.go @@ -0,0 +1,1252 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// ErrGroupClosed is returned by ConsumerGroup.Next when the group has already +// been closed. +var ErrGroupClosed = errors.New("consumer group is closed") + +// ErrGenerationEnded is returned by the context.Context issued by the +// Generation's Start function when the context has been closed. +var ErrGenerationEnded = errors.New("consumer group generation has ended") + +const ( + // defaultProtocolType holds the default protocol type documented in the + // kafka protocol + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI + defaultProtocolType = "consumer" + + // defaultHeartbeatInterval contains the default time between heartbeats. If + // the coordinator does not receive a heartbeat within the session timeout interval, + // the consumer will be considered dead and the coordinator will rebalance the + // group. + // + // As a rule, the heartbeat interval should be no greater than 1/3 the session timeout. + defaultHeartbeatInterval = 3 * time.Second + + // defaultSessionTimeout contains the default interval the coordinator will wait + // for a heartbeat before marking a consumer as dead. + defaultSessionTimeout = 30 * time.Second + + // defaultRebalanceTimeout contains the amount of time the coordinator will wait + // for consumers to issue a join group once a rebalance has been requested. + defaultRebalanceTimeout = 30 * time.Second + + // defaultJoinGroupBackoff is the amount of time to wait after a failed + // consumer group generation before attempting to re-join. + defaultJoinGroupBackoff = 5 * time.Second + + // defaultRetentionTime holds the length of time a the consumer group will be + // saved by kafka. This value tells the broker to use its configured value. + defaultRetentionTime = -1 * time.Millisecond + + // defaultPartitionWatchTime contains the amount of time the kafka-go will wait to + // query the brokers looking for partition changes. + defaultPartitionWatchTime = 5 * time.Second + + // defaultTimeout is the deadline to set when interacting with the + // consumer group coordinator. + defaultTimeout = 5 * time.Second +) + +// ConsumerGroupConfig is a configuration object used to create new instances of +// ConsumerGroup. +type ConsumerGroupConfig struct { + // ID is the consumer group ID. It must not be empty. + ID string + + // The list of broker addresses used to connect to the kafka cluster. It + // must not be empty. + Brokers []string + + // An dialer used to open connections to the kafka server. This field is + // optional, if nil, the default dialer is used instead. + Dialer *Dialer + + // Topics is the list of topics that will be consumed by this group. It + // will usually have a single value, but it is permitted to have multiple + // for more complex use cases. + Topics []string + + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + GroupBalancers []GroupBalancer + + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer + // group heartbeat update. + // + // Default: 3s + HeartbeatInterval time.Duration + + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + + // SessionTimeout optionally sets the length of time that may pass without a heartbeat + // before the coordinator considers the consumer dead and initiates a rebalance. + // + // Default: 30s + SessionTimeout time.Duration + + // RebalanceTimeout optionally sets the length of time the coordinator will wait + // for members to join as part of a rebalance. For kafka servers under higher + // load, it may be useful to set this value higher. + // + // Default: 30s + RebalanceTimeout time.Duration + + // JoinGroupBackoff optionally sets the length of time to wait before re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + + // RetentionTime optionally sets the length of time the consumer group will + // be saved by the broker. -1 will disable the setting and leave the + // retention up to the broker's offsets.retention.minutes property. By + // default, that setting is 1 day for kafka < 2.0 and 7 days for kafka >= + // 2.0. + // + // Default: -1 + RetentionTime time.Duration + + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + StartOffset int64 + + // If not nil, specifies a logger used to report internal changes within the + // reader. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the reader falls + // back to using Logger instead. + ErrorLogger Logger + + // Timeout is the network timeout used when communicating with the consumer + // group coordinator. This value should not be too small since errors + // communicating with the broker will generally cause a consumer group + // rebalance, and it's undesirable that a transient network error intoduce + // that overhead. Similarly, it should not be too large or the consumer + // group may be slow to respond to the coordinator failing over to another + // broker. + // + // Default: 5s + Timeout time.Duration + + // connect is a function for dialing the coordinator. This is provided for + // unit testing to mock broker connections. + connect func(dialer *Dialer, brokers ...string) (coordinator, error) +} + +// Validate method validates ConsumerGroupConfig properties and sets relevant +// defaults. +func (config *ConsumerGroupConfig) Validate() error { + + if len(config.Brokers) == 0 { + return errors.New("cannot create a consumer group with an empty list of broker addresses") + } + + if len(config.Topics) == 0 { + return errors.New("cannot create a consumer group without a topic") + } + + if config.ID == "" { + return errors.New("cannot create a consumer group without an ID") + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } + } + + if config.HeartbeatInterval == 0 { + config.HeartbeatInterval = defaultHeartbeatInterval + } + + if config.SessionTimeout == 0 { + config.SessionTimeout = defaultSessionTimeout + } + + if config.PartitionWatchInterval == 0 { + config.PartitionWatchInterval = defaultPartitionWatchTime + } + + if config.RebalanceTimeout == 0 { + config.RebalanceTimeout = defaultRebalanceTimeout + } + + if config.JoinGroupBackoff == 0 { + config.JoinGroupBackoff = defaultJoinGroupBackoff + } + + if config.RetentionTime == 0 { + config.RetentionTime = defaultRetentionTime + } + + if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval) + } + + if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("SessionTimeout out of bounds: %d", config.SessionTimeout) + } + + if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout) + } + + if config.JoinGroupBackoff < 0 || (config.JoinGroupBackoff/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("JoinGroupBackoff out of bounds: %d", config.JoinGroupBackoff) + } + + if config.RetentionTime < 0 && config.RetentionTime != defaultRetentionTime { + return fmt.Errorf("RetentionTime out of bounds: %d", config.RetentionTime) + } + + if config.PartitionWatchInterval < 0 || (config.PartitionWatchInterval/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("PartitionWachInterval out of bounds %d", config.PartitionWatchInterval) + } + + if config.StartOffset == 0 { + config.StartOffset = FirstOffset + } + + if config.StartOffset != FirstOffset && config.StartOffset != LastOffset { + return fmt.Errorf("StartOffset is not valid %d", config.StartOffset) + } + + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + + if config.connect == nil { + config.connect = makeConnect(*config) + } + + return nil +} + +// PartitionAssignment represents the starting state of a partition that has +// been assigned to a consumer. +type PartitionAssignment struct { + // ID is the partition ID. + ID int + + // Offset is the initial offset at which this assignment begins. It will + // either be an absolute offset if one has previously been committed for + // the consumer group or a relative offset such as FirstOffset when this + // is the first time the partition have been assigned to a member of the + // group. + Offset int64 +} + +// genCtx adapts the done channel of the generation to a context.Context. This +// is used by Generation.Start so that we can pass a context to go routines +// instead of passing around channels. +type genCtx struct { + gen *Generation +} + +func (c genCtx) Done() <-chan struct{} { + return c.gen.done +} + +func (c genCtx) Err() error { + select { + case <-c.gen.done: + return ErrGenerationEnded + default: + return nil + } +} + +func (c genCtx) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (c genCtx) Value(interface{}) interface{} { + return nil +} + +// Generation represents a single consumer group generation. The generation +// carries the topic+partition assignments for the given. It also provides +// facilities for committing offsets and for running functions whose lifecycles +// are bound to the generation. +type Generation struct { + // ID is the generation ID as assigned by the consumer group coordinator. + ID int32 + + // GroupID is the name of the consumer group. + GroupID string + + // MemberID is the ID assigned to this consumer by the consumer group + // coordinator. + MemberID string + + // Assignments is the initial state of this Generation. The partition + // assignments are grouped by topic. + Assignments map[string][]PartitionAssignment + + conn coordinator + + // the following fields are used for process accounting to synchronize + // between Start and close. lock protects all of them. done is closed + // when the generation is ending in order to signal that the generation + // should start self-desructing. closed protects against double-closing + // the done chan. routines is a count of running go routines that have been + // launched by Start. joined will be closed by the last go routine to exit. + lock sync.Mutex + done chan struct{} + closed bool + routines int + joined chan struct{} + + retentionMillis int64 + log func(func(Logger)) + logError func(func(Logger)) +} + +// close stops the generation and waits for all functions launched via Start to +// terminate. +func (g *Generation) close() { + g.lock.Lock() + if !g.closed { + close(g.done) + g.closed = true + } + // determine whether any go routines are running that we need to wait for. + // waiting needs to happen outside of the critical section. + r := g.routines + g.lock.Unlock() + + // NOTE: r will be zero if no go routines were ever launched. no need to + // wait in that case. + if r > 0 { + <-g.joined + } +} + +// Start launches the provided function in a go routine and adds accounting such +// that when the function exits, it stops the current generation (if not +// already in the process of doing so). +// +// The provided function MUST support cancellation via the ctx argument and exit +// in a timely manner once the ctx is complete. When the context is closed, the +// context's Error() function will return ErrGenerationEnded. +// +// When closing out a generation, the consumer group will wait for all functions +// launched by Start to exit before the group can move on and join the next +// generation. If the function does not exit promptly, it will stop forward +// progress for this consumer and potentially cause consumer group membership +// churn. +func (g *Generation) Start(fn func(ctx context.Context)) { + g.lock.Lock() + defer g.lock.Unlock() + + // this is an edge case: if the generation has already closed, then it's + // possible that the close func has already waited on outstanding go + // routines and exited. + // + // nonetheless, it's important to honor that the fn is invoked in case the + // calling function is waiting e.g. on a channel send or a WaitGroup. in + // such a case, fn should immediately exit because ctx.Err() will return + // ErrGenerationEnded. + if g.closed { + go fn(genCtx{g}) + return + } + + // register that there is one more go routine that's part of this gen. + g.routines++ + + go func() { + fn(genCtx{g}) + g.lock.Lock() + // shut down the generation as soon as one function exits. this is + // different from close() in that it doesn't wait for all go routines in + // the generation to exit. + if !g.closed { + close(g.done) + g.closed = true + } + g.routines-- + // if this was the last go routine in the generation, close the joined + // chan so that close() can exit if it's waiting. + if g.routines == 0 { + close(g.joined) + } + g.lock.Unlock() + }() +} + +// CommitOffsets commits the provided topic+partition+offset combos to the +// consumer group coordinator. This can be used to reset the consumer to +// explicit offsets. +func (g *Generation) CommitOffsets(offsets map[string]map[int]int64) error { + if len(offsets) == 0 { + return nil + } + + topics := make([]offsetCommitRequestV2Topic, 0, len(offsets)) + for topic, partitions := range offsets { + t := offsetCommitRequestV2Topic{Topic: topic} + for partition, offset := range partitions { + t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{ + Partition: int32(partition), + Offset: offset, + }) + } + topics = append(topics, t) + } + + request := offsetCommitRequestV2{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + RetentionTime: g.retentionMillis, + Topics: topics, + } + + _, err := g.conn.offsetCommit(request) + if err == nil { + // if logging is enabled, print out the partitions that were committed. + g.log(func(l Logger) { + var report []string + for _, t := range request.Topics { + report = append(report, fmt.Sprintf("\ttopic: %s", t.Topic)) + for _, p := range t.Partitions { + report = append(report, fmt.Sprintf("\t\tpartition %d: %d", p.Partition, p.Offset)) + } + } + l.Printf("committed offsets for group %s: \n%s", g.GroupID, strings.Join(report, "\n")) + }) + } + + return err +} + +// heartbeatLoop checks in with the consumer group coordinator at the provided +// interval. It exits if it ever encounters an error, which would signal the +// end of the generation. +func (g *Generation) heartbeatLoop(interval time.Duration) { + g.Start(func(ctx context.Context) { + g.log(func(l Logger) { + l.Printf("started heartbeat for group, %v [%v]", g.GroupID, interval) + }) + defer g.log(func(l Logger) { + l.Printf("stopped heartbeat for group %s\n", g.GroupID) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + _, err := g.conn.heartbeat(heartbeatRequestV0{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + }) + if err != nil { + return + } + } + } + }) +} + +// partitionWatcher queries kafka and watches for partition changes, triggering +// a rebalance if changes are found. Similar to heartbeat it's okay to return on +// error here as if you are unable to ask a broker for basic metadata you're in +// a bad spot and should rebalance. Commonly you will see an error here if there +// is a problem with the connection to the coordinator and a rebalance will +// establish a new connection to the coordinator. +func (g *Generation) partitionWatcher(interval time.Duration, topic string) { + g.Start(func(ctx context.Context) { + g.log(func(l Logger) { + l.Printf("started partition watcher for group, %v, topic %v [%v]", g.GroupID, topic, interval) + }) + defer g.log(func(l Logger) { + l.Printf("stopped partition watcher for group, %v, topic %v", g.GroupID, topic) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + ops, err := g.conn.readPartitions(topic) + if err != nil { + g.logError(func(l Logger) { + l.Printf("Problem getting partitions during startup, %v\n, Returning and setting up nextGeneration", err) + }) + return + } + oParts := len(ops) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ops, err := g.conn.readPartitions(topic) + switch { + case err == nil, errors.Is(err, UnknownTopicOrPartition): + if len(ops) != oParts { + g.log(func(l Logger) { + l.Printf("Partition changes found, reblancing group: %v.", g.GroupID) + }) + return + } + + default: + g.logError(func(l Logger) { + l.Printf("Problem getting partitions while checking for changes, %v", err) + }) + var kafkaError Error + if errors.As(err, &kafkaError) { + continue + } + // other errors imply that we lost the connection to the coordinator, so we + // should abort and reconnect. + return + } + } + } + }) +} + +// coordinator is a subset of the functionality in Conn in order to facilitate +// testing the consumer group...especially for error conditions that are +// difficult to instigate with a live broker running in docker. +type coordinator interface { + io.Closer + findCoordinator(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) + joinGroup(joinGroupRequestV1) (joinGroupResponseV1, error) + syncGroup(syncGroupRequestV0) (syncGroupResponseV0, error) + leaveGroup(leaveGroupRequestV0) (leaveGroupResponseV0, error) + heartbeat(heartbeatRequestV0) (heartbeatResponseV0, error) + offsetFetch(offsetFetchRequestV1) (offsetFetchResponseV1, error) + offsetCommit(offsetCommitRequestV2) (offsetCommitResponseV2, error) + readPartitions(...string) ([]Partition, error) +} + +// timeoutCoordinator wraps the Conn to ensure that every operation has a +// deadline. Otherwise, it would be possible for requests to block indefinitely +// if the remote server never responds. There are many spots where the consumer +// group needs to interact with the broker, so it feels less error prone to +// factor all of the deadline management into this shared location as opposed to +// peppering it all through where the code actually interacts with the broker. +type timeoutCoordinator struct { + timeout time.Duration + sessionTimeout time.Duration + rebalanceTimeout time.Duration + conn *Conn +} + +func (t *timeoutCoordinator) Close() error { + return t.conn.Close() +} + +func (t *timeoutCoordinator) findCoordinator(req findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return findCoordinatorResponseV0{}, err + } + return t.conn.findCoordinator(req) +} + +func (t *timeoutCoordinator) joinGroup(req joinGroupRequestV1) (joinGroupResponseV1, error) { + // in the case of join group, the consumer group coordinator may wait up + // to rebalance timeout in order to wait for all members to join. + if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.rebalanceTimeout)); err != nil { + return joinGroupResponseV1{}, err + } + return t.conn.joinGroup(req) +} + +func (t *timeoutCoordinator) syncGroup(req syncGroupRequestV0) (syncGroupResponseV0, error) { + // in the case of sync group, the consumer group leader is given up to + // the session timeout to respond before the coordinator will give up. + if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.sessionTimeout)); err != nil { + return syncGroupResponseV0{}, err + } + return t.conn.syncGroup(req) +} + +func (t *timeoutCoordinator) leaveGroup(req leaveGroupRequestV0) (leaveGroupResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return leaveGroupResponseV0{}, err + } + return t.conn.leaveGroup(req) +} + +func (t *timeoutCoordinator) heartbeat(req heartbeatRequestV0) (heartbeatResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return heartbeatResponseV0{}, err + } + return t.conn.heartbeat(req) +} + +func (t *timeoutCoordinator) offsetFetch(req offsetFetchRequestV1) (offsetFetchResponseV1, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return offsetFetchResponseV1{}, err + } + return t.conn.offsetFetch(req) +} + +func (t *timeoutCoordinator) offsetCommit(req offsetCommitRequestV2) (offsetCommitResponseV2, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return offsetCommitResponseV2{}, err + } + return t.conn.offsetCommit(req) +} + +func (t *timeoutCoordinator) readPartitions(topics ...string) ([]Partition, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return nil, err + } + return t.conn.ReadPartitions(topics...) +} + +// NewConsumerGroup creates a new ConsumerGroup. It returns an error if the +// provided configuration is invalid. It does not attempt to connect to the +// Kafka cluster. That happens asynchronously, and any errors will be reported +// by Next. +func NewConsumerGroup(config ConsumerGroupConfig) (*ConsumerGroup, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + cg := &ConsumerGroup{ + config: config, + next: make(chan *Generation), + errs: make(chan error), + done: make(chan struct{}), + } + cg.wg.Add(1) + go func() { + cg.run() + cg.wg.Done() + }() + return cg, nil +} + +// ConsumerGroup models a Kafka consumer group. A caller doesn't interact with +// the group directly. Rather, they interact with a Generation. Every time a +// member enters or exits the group, it results in a new Generation. The +// Generation is where partition assignments and offset management occur. +// Callers will use Next to get a handle to the Generation. +type ConsumerGroup struct { + config ConsumerGroupConfig + next chan *Generation + errs chan error + + closeOnce sync.Once + wg sync.WaitGroup + done chan struct{} +} + +// Close terminates the current generation by causing this member to leave and +// releases all local resources used to participate in the consumer group. +// Close will also end the current generation if it is still active. +func (cg *ConsumerGroup) Close() error { + cg.closeOnce.Do(func() { + close(cg.done) + }) + cg.wg.Wait() + return nil +} + +// Next waits for the next consumer group generation. There will never be two +// active generations. Next will never return a new generation until the +// previous one has completed. +// +// If there are errors setting up the next generation, they will be surfaced +// here. +// +// If the ConsumerGroup has been closed, then Next will return ErrGroupClosed. +func (cg *ConsumerGroup) Next(ctx context.Context) (*Generation, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-cg.done: + return nil, ErrGroupClosed + case err := <-cg.errs: + return nil, err + case next := <-cg.next: + return next, nil + } +} + +func (cg *ConsumerGroup) run() { + // the memberID is the only piece of information that is maintained across + // generations. it starts empty and will be assigned on the first nextGeneration + // when the joinGroup request is processed. it may change again later if + // the CG coordinator fails over or if the member is evicted. otherwise, it + // will be constant for the lifetime of this group. + var memberID string + var err error + for { + memberID, err = cg.nextGeneration(memberID) + + // backoff will be set if this go routine should sleep before continuing + // to the next generation. it will be non-nil in the case of an error + // joining or syncing the group. + var backoff <-chan time.Time + + switch { + case err == nil: + // no error...the previous generation finished normally. + continue + + case errors.Is(err, ErrGroupClosed): + // the CG has been closed...leave the group and exit loop. + _ = cg.leaveGroup(memberID) + return + + case errors.Is(err, RebalanceInProgress): + // in case of a RebalanceInProgress, don't leave the group or + // change the member ID, but report the error. the next attempt + // to join the group will then be subject to the rebalance + // timeout, so the broker will be responsible for throttling + // this loop. + + default: + // leave the group and report the error if we had gotten far + // enough so as to have a member ID. also clear the member id + // so we don't attempt to use it again. in order to avoid + // a tight error loop, backoff before the next attempt to join + // the group. + _ = cg.leaveGroup(memberID) + memberID = "" + backoff = time.After(cg.config.JoinGroupBackoff) + } + // ensure that we exit cleanly in case the CG is done and no one is + // waiting to receive on the unbuffered error channel. + select { + case <-cg.done: + return + case cg.errs <- err: + } + // backoff if needed, being sure to exit cleanly if the CG is done. + if backoff != nil { + select { + case <-cg.done: + // exit cleanly if the group is closed. + return + case <-backoff: + } + } + } +} + +func (cg *ConsumerGroup) nextGeneration(memberID string) (string, error) { + // get a new connection to the coordinator on each loop. the previous + // generation could have exited due to losing the connection, so this + // ensures that we always have a clean starting point. it means we will + // re-connect in certain cases, but that shouldn't be an issue given that + // rebalances are relatively infrequent under normal operating + // conditions. + conn, err := cg.coordinator() + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Unable to establish connection to consumer group coordinator for group %s: %v", cg.config.ID, err) + }) + return memberID, err // a prior memberID may still be valid, so don't return "" + } + defer conn.Close() + + var generationID int32 + var groupAssignments GroupMemberAssignments + var assignments map[string][]int32 + + // join group. this will join the group and prepare assignments if our + // consumer is elected leader. it may also change or assign the member ID. + memberID, generationID, groupAssignments, err = cg.joinGroup(conn, memberID) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to join group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + cg.withLogger(func(log Logger) { + log.Printf("Joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + // sync group + assignments, err = cg.syncGroup(conn, memberID, generationID, groupAssignments) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to sync group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // fetch initial offsets. + var offsets map[string]map[int]int64 + offsets, err = cg.fetchOffsets(conn, assignments) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to fetch offsets for group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // create the generation. + gen := Generation{ + ID: generationID, + GroupID: cg.config.ID, + MemberID: memberID, + Assignments: cg.makeAssignments(assignments, offsets), + conn: conn, + done: make(chan struct{}), + joined: make(chan struct{}), + retentionMillis: int64(cg.config.RetentionTime / time.Millisecond), + log: cg.withLogger, + logError: cg.withErrorLogger, + } + + // spawn all of the go routines required to facilitate this generation. if + // any of these functions exit, then the generation is determined to be + // complete. + gen.heartbeatLoop(cg.config.HeartbeatInterval) + if cg.config.WatchPartitionChanges { + for _, topic := range cg.config.Topics { + gen.partitionWatcher(cg.config.PartitionWatchInterval, topic) + } + } + + // make this generation available for retrieval. if the CG is closed before + // we can send it on the channel, exit. that case is required b/c the next + // channel is unbuffered. if the caller to Next has already bailed because + // it's own teardown logic has been invoked, this would deadlock otherwise. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case cg.next <- &gen: + } + + // wait for generation to complete. if the CG is closed before the + // generation is finished, exit and leave the group. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case <-gen.done: + // time for next generation! make sure all the current go routines exit + // before continuing onward. + gen.close() + return memberID, nil + } +} + +// connect returns a connection to ANY broker. +func makeConnect(config ConsumerGroupConfig) func(dialer *Dialer, brokers ...string) (coordinator, error) { + return func(dialer *Dialer, brokers ...string) (coordinator, error) { + var err error + for _, broker := range brokers { + var conn *Conn + if conn, err = dialer.Dial("tcp", broker); err == nil { + return &timeoutCoordinator{ + conn: conn, + timeout: config.Timeout, + sessionTimeout: config.SessionTimeout, + rebalanceTimeout: config.RebalanceTimeout, + }, nil + } + } + return nil, err // err will be non-nil + } +} + +// coordinator establishes a connection to the coordinator for this consumer +// group. +func (cg *ConsumerGroup) coordinator() (coordinator, error) { + // NOTE : could try to cache the coordinator to avoid the double connect + // here. since consumer group balances happen infrequently and are + // an expensive operation, we're not currently optimizing that case + // in order to keep the code simpler. + conn, err := cg.config.connect(cg.config.Dialer, cg.config.Brokers...) + if err != nil { + return nil, err + } + defer conn.Close() + + out, err := conn.findCoordinator(findCoordinatorRequestV0{ + CoordinatorKey: cg.config.ID, + }) + if err == nil && out.ErrorCode != 0 { + err = Error(out.ErrorCode) + } + if err != nil { + return nil, err + } + + address := net.JoinHostPort(out.Coordinator.Host, strconv.Itoa(int(out.Coordinator.Port))) + return cg.config.connect(cg.config.Dialer, address) +} + +// joinGroup attempts to join the reader to the consumer group. +// Returns GroupMemberAssignments is this Reader was selected as +// the leader. Otherwise, GroupMemberAssignments will be nil. +// +// Possible kafka error codes returned: +// * GroupLoadInProgress: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * InconsistentGroupProtocol: +// * InvalidSessionTimeout: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) joinGroup(conn coordinator, memberID string) (string, int32, GroupMemberAssignments, error) { + request, err := cg.makeJoinGroupRequestV1(memberID) + if err != nil { + return "", 0, nil, err + } + + response, err := conn.joinGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return "", 0, nil, err + } + + memberID = response.MemberID + generationID := response.GenerationID + + cg.withLogger(func(l Logger) { + l.Printf("joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + var assignments GroupMemberAssignments + if iAmLeader := response.MemberID == response.LeaderID; iAmLeader { + v, err := cg.assignTopicPartitions(conn, response) + if err != nil { + return memberID, 0, nil, err + } + assignments = v + + cg.withLogger(func(l Logger) { + for memberID, assignment := range assignments { + for topic, partitions := range assignment { + l.Printf("assigned member/topic/partitions %v/%v/%v", memberID, topic, partitions) + } + } + }) + } + + cg.withLogger(func(l Logger) { + l.Printf("joinGroup succeeded for response, %v. generationID=%v, memberID=%v", cg.config.ID, response.GenerationID, response.MemberID) + }) + + return memberID, generationID, assignments, nil +} + +// makeJoinGroupRequestV1 handles the logic of constructing a joinGroup +// request. +func (cg *ConsumerGroup) makeJoinGroupRequestV1(memberID string) (joinGroupRequestV1, error) { + request := joinGroupRequestV1{ + GroupID: cg.config.ID, + MemberID: memberID, + SessionTimeout: int32(cg.config.SessionTimeout / time.Millisecond), + RebalanceTimeout: int32(cg.config.RebalanceTimeout / time.Millisecond), + ProtocolType: defaultProtocolType, + } + + for _, balancer := range cg.config.GroupBalancers { + userData, err := balancer.UserData() + if err != nil { + return joinGroupRequestV1{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %w", balancer.ProtocolName(), err) + } + request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{ + ProtocolName: balancer.ProtocolName(), + ProtocolMetadata: groupMetadata{ + Version: 1, + Topics: cg.config.Topics, + UserData: userData, + }.bytes(), + }) + } + + return request, nil +} + +// assignTopicPartitions uses the selected GroupBalancer to assign members to +// their various partitions. +func (cg *ConsumerGroup) assignTopicPartitions(conn coordinator, group joinGroupResponseV1) (GroupMemberAssignments, error) { + cg.withLogger(func(l Logger) { + l.Printf("selected as leader for group, %s\n", cg.config.ID) + }) + + balancer, ok := findGroupBalancer(group.GroupProtocol, cg.config.GroupBalancers) + if !ok { + // NOTE : this shouldn't happen in practice...the broker should not + // return successfully from joinGroup unless all members support + // at least one common protocol. + return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, cg.config.ID) + } + + members, err := cg.makeMemberProtocolMetadata(group.Members) + if err != nil { + return nil, err + } + + topics := extractTopics(members) + partitions, err := conn.readPartitions(topics...) + + // it's not a failure if the topic doesn't exist yet. it results in no + // assignments for the topic. this matches the behavior of the official + // clients: java, python, and librdkafka. + // a topic watcher can trigger a rebalance when the topic comes into being. + if err != nil && !errors.Is(err, UnknownTopicOrPartition) { + return nil, err + } + + cg.withLogger(func(l Logger) { + l.Printf("using '%v' balancer to assign group, %v", group.GroupProtocol, cg.config.ID) + for _, member := range members { + l.Printf("found member: %v/%#v", member.ID, member.UserData) + } + for _, partition := range partitions { + l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID) + } + }) + + return balancer.AssignGroups(members, partitions), nil +} + +// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember. +func (cg *ConsumerGroup) makeMemberProtocolMetadata(in []joinGroupResponseMemberV1) ([]GroupMember, error) { + members := make([]GroupMember, 0, len(in)) + for _, item := range in { + metadata := groupMetadata{} + reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata)) + if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 { + return nil, fmt.Errorf("unable to read metadata for member, %v: %w", item.MemberID, err) + } + + members = append(members, GroupMember{ + ID: item.MemberID, + Topics: metadata.Topics, + UserData: metadata.UserData, + }) + } + return members, nil +} + +// syncGroup completes the consumer group nextGeneration by accepting the +// memberAssignments (if this Reader is the leader) and returning this +// Readers subscriptions topic => partitions +// +// Possible kafka error codes returned: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * IllegalGeneration: +// * RebalanceInProgress: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) syncGroup(conn coordinator, memberID string, generationID int32, memberAssignments GroupMemberAssignments) (map[string][]int32, error) { + request := cg.makeSyncGroupRequestV0(memberID, generationID, memberAssignments) + response, err := conn.syncGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return nil, err + } + + assignments := groupAssignment{} + reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments)) + if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil { + return nil, err + } + + if len(assignments.Topics) == 0 { + cg.withLogger(func(l Logger) { + l.Printf("received empty assignments for group, %v as member %s for generation %d", cg.config.ID, memberID, generationID) + }) + } + + cg.withLogger(func(l Logger) { + l.Printf("sync group finished for group, %v", cg.config.ID) + }) + + return assignments.Topics, nil +} + +func (cg *ConsumerGroup) makeSyncGroupRequestV0(memberID string, generationID int32, memberAssignments GroupMemberAssignments) syncGroupRequestV0 { + request := syncGroupRequestV0{ + GroupID: cg.config.ID, + GenerationID: generationID, + MemberID: memberID, + } + + if memberAssignments != nil { + request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1) + + for memberID, topics := range memberAssignments { + topics32 := make(map[string][]int32) + for topic, partitions := range topics { + partitions32 := make([]int32, len(partitions)) + for i := range partitions { + partitions32[i] = int32(partitions[i]) + } + topics32[topic] = partitions32 + } + request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{ + MemberID: memberID, + MemberAssignments: groupAssignment{ + Version: 1, + Topics: topics32, + }.bytes(), + }) + } + + cg.withLogger(func(logger Logger) { + logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID) + }) + } + + return request +} + +func (cg *ConsumerGroup) fetchOffsets(conn coordinator, subs map[string][]int32) (map[string]map[int]int64, error) { + req := offsetFetchRequestV1{ + GroupID: cg.config.ID, + Topics: make([]offsetFetchRequestV1Topic, 0, len(cg.config.Topics)), + } + for _, topic := range cg.config.Topics { + req.Topics = append(req.Topics, offsetFetchRequestV1Topic{ + Topic: topic, + Partitions: subs[topic], + }) + } + offsets, err := conn.offsetFetch(req) + if err != nil { + return nil, err + } + + offsetsByTopic := make(map[string]map[int]int64) + for _, res := range offsets.Responses { + offsetsByPartition := map[int]int64{} + offsetsByTopic[res.Topic] = offsetsByPartition + for _, pr := range res.PartitionResponses { + for _, partition := range subs[res.Topic] { + if partition == pr.Partition { + offset := pr.Offset + if offset < 0 { + offset = cg.config.StartOffset + } + offsetsByPartition[int(partition)] = offset + } + } + } + } + + return offsetsByTopic, nil +} + +func (cg *ConsumerGroup) makeAssignments(assignments map[string][]int32, offsets map[string]map[int]int64) map[string][]PartitionAssignment { + topicAssignments := make(map[string][]PartitionAssignment) + for _, topic := range cg.config.Topics { + topicPartitions := assignments[topic] + topicAssignments[topic] = make([]PartitionAssignment, 0, len(topicPartitions)) + for _, partition := range topicPartitions { + var offset int64 + partitionOffsets, ok := offsets[topic] + if ok { + offset, ok = partitionOffsets[int(partition)] + } + if !ok { + offset = cg.config.StartOffset + } + topicAssignments[topic] = append(topicAssignments[topic], PartitionAssignment{ + ID: int(partition), + Offset: offset, + }) + } + } + return topicAssignments +} + +func (cg *ConsumerGroup) leaveGroup(memberID string) error { + // don't attempt to leave the group if no memberID was ever assigned. + if memberID == "" { + return nil + } + + cg.withLogger(func(log Logger) { + log.Printf("Leaving group %s, member %s", cg.config.ID, memberID) + }) + + // IMPORTANT : leaveGroup establishes its own connection to the coordinator + // because it is often called after some other operation failed. + // said failure could be the result of connection-level issues, + // so we want to re-establish the connection to ensure that we + // are able to process the cleanup step. + coordinator, err := cg.coordinator() + if err != nil { + return err + } + + _, err = coordinator.leaveGroup(leaveGroupRequestV0{ + GroupID: cg.config.ID, + MemberID: memberID, + }) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("leave group failed for group, %v, and member, %v: %v", cg.config.ID, memberID, err) + }) + } + + _ = coordinator.Close() + + return err +} + +func (cg *ConsumerGroup) withLogger(do func(Logger)) { + if cg.config.Logger != nil { + do(cg.config.Logger) + } +} + +func (cg *ConsumerGroup) withErrorLogger(do func(Logger)) { + if cg.config.ErrorLogger != nil { + do(cg.config.ErrorLogger) + } else { + cg.withLogger(do) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/crc32.go b/vendor/github.com/segmentio/kafka-go/crc32.go new file mode 100644 index 000000000..fef683428 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/crc32.go @@ -0,0 +1,55 @@ +package kafka + +import ( + "encoding/binary" + "hash/crc32" +) + +type crc32Writer struct { + table *crc32.Table + buffer [8]byte + crc32 uint32 +} + +func (w *crc32Writer) update(b []byte) { + w.crc32 = crc32.Update(w.crc32, w.table, b) +} + +func (w *crc32Writer) writeInt8(i int8) { + w.buffer[0] = byte(i) + w.update(w.buffer[:1]) +} + +func (w *crc32Writer) writeInt16(i int16) { + binary.BigEndian.PutUint16(w.buffer[:2], uint16(i)) + w.update(w.buffer[:2]) +} + +func (w *crc32Writer) writeInt32(i int32) { + binary.BigEndian.PutUint32(w.buffer[:4], uint32(i)) + w.update(w.buffer[:4]) +} + +func (w *crc32Writer) writeInt64(i int64) { + binary.BigEndian.PutUint64(w.buffer[:8], uint64(i)) + w.update(w.buffer[:8]) +} + +func (w *crc32Writer) writeBytes(b []byte) { + n := len(b) + if b == nil { + n = -1 + } + w.writeInt32(int32(n)) + w.update(b) +} + +func (w *crc32Writer) Write(b []byte) (int, error) { + w.update(b) + return len(b), nil +} + +func (w *crc32Writer) WriteString(s string) (int, error) { + w.update([]byte(s)) + return len(s), nil +} diff --git a/vendor/github.com/segmentio/kafka-go/createacls.go b/vendor/github.com/segmentio/kafka-go/createacls.go new file mode 100644 index 000000000..601974171 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createacls.go @@ -0,0 +1,202 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "github.com/segmentio/kafka-go/protocol/createacls" +) + +// CreateACLsRequest represents a request sent to a kafka broker to add +// new ACLs. +type CreateACLsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of ACL to create. + ACLs []ACLEntry +} + +// CreateACLsResponse represents a response from a kafka broker to an ACL +// creation request. +type CreateACLsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of errors that occurred while attempting to create + // the ACLs. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors []error +} + +type ACLPermissionType int8 + +const ( + ACLPermissionTypeUnknown ACLPermissionType = 0 + ACLPermissionTypeAny ACLPermissionType = 1 + ACLPermissionTypeDeny ACLPermissionType = 2 + ACLPermissionTypeAllow ACLPermissionType = 3 +) + +func (apt ACLPermissionType) String() string { + mapping := map[ACLPermissionType]string{ + ACLPermissionTypeUnknown: "Unknown", + ACLPermissionTypeAny: "Any", + ACLPermissionTypeDeny: "Deny", + ACLPermissionTypeAllow: "Allow", + } + s, ok := mapping[apt] + if !ok { + s = mapping[ACLPermissionTypeUnknown] + } + return s +} + +// MarshalText transforms an ACLPermissionType into its string representation. +func (apt ACLPermissionType) MarshalText() ([]byte, error) { + return []byte(apt.String()), nil +} + +// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType. +func (apt *ACLPermissionType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]ACLPermissionType{ + "unknown": ACLPermissionTypeUnknown, + "any": ACLPermissionTypeAny, + "deny": ACLPermissionTypeDeny, + "allow": ACLPermissionTypeAllow, + } + parsed, ok := mapping[normalized] + if !ok { + *apt = ACLPermissionTypeUnknown + return fmt.Errorf("cannot parse %s as an ACLPermissionType", normalized) + } + *apt = parsed + return nil +} + +type ACLOperationType int8 + +const ( + ACLOperationTypeUnknown ACLOperationType = 0 + ACLOperationTypeAny ACLOperationType = 1 + ACLOperationTypeAll ACLOperationType = 2 + ACLOperationTypeRead ACLOperationType = 3 + ACLOperationTypeWrite ACLOperationType = 4 + ACLOperationTypeCreate ACLOperationType = 5 + ACLOperationTypeDelete ACLOperationType = 6 + ACLOperationTypeAlter ACLOperationType = 7 + ACLOperationTypeDescribe ACLOperationType = 8 + ACLOperationTypeClusterAction ACLOperationType = 9 + ACLOperationTypeDescribeConfigs ACLOperationType = 10 + ACLOperationTypeAlterConfigs ACLOperationType = 11 + ACLOperationTypeIdempotentWrite ACLOperationType = 12 +) + +func (aot ACLOperationType) String() string { + mapping := map[ACLOperationType]string{ + ACLOperationTypeUnknown: "Unknown", + ACLOperationTypeAny: "Any", + ACLOperationTypeAll: "All", + ACLOperationTypeRead: "Read", + ACLOperationTypeWrite: "Write", + ACLOperationTypeCreate: "Create", + ACLOperationTypeDelete: "Delete", + ACLOperationTypeAlter: "Alter", + ACLOperationTypeDescribe: "Describe", + ACLOperationTypeClusterAction: "ClusterAction", + ACLOperationTypeDescribeConfigs: "DescribeConfigs", + ACLOperationTypeAlterConfigs: "AlterConfigs", + ACLOperationTypeIdempotentWrite: "IdempotentWrite", + } + s, ok := mapping[aot] + if !ok { + s = mapping[ACLOperationTypeUnknown] + } + return s +} + +// MarshalText transforms an ACLOperationType into its string representation. +func (aot ACLOperationType) MarshalText() ([]byte, error) { + return []byte(aot.String()), nil +} + +// UnmarshalText takes a string representation of the resource type and converts it to an ACLPermissionType. +func (aot *ACLOperationType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]ACLOperationType{ + "unknown": ACLOperationTypeUnknown, + "any": ACLOperationTypeAny, + "all": ACLOperationTypeAll, + "read": ACLOperationTypeRead, + "write": ACLOperationTypeWrite, + "create": ACLOperationTypeCreate, + "delete": ACLOperationTypeDelete, + "alter": ACLOperationTypeAlter, + "describe": ACLOperationTypeDescribe, + "clusteraction": ACLOperationTypeClusterAction, + "describeconfigs": ACLOperationTypeDescribeConfigs, + "alterconfigs": ACLOperationTypeAlterConfigs, + "idempotentwrite": ACLOperationTypeIdempotentWrite, + } + parsed, ok := mapping[normalized] + if !ok { + *aot = ACLOperationTypeUnknown + return fmt.Errorf("cannot parse %s as an ACLOperationType", normalized) + } + *aot = parsed + return nil + +} + +type ACLEntry struct { + ResourceType ResourceType + ResourceName string + ResourcePatternType PatternType + Principal string + Host string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +// CreateACLs sends ACLs creation request to a kafka broker and returns the +// response. +func (c *Client) CreateACLs(ctx context.Context, req *CreateACLsRequest) (*CreateACLsResponse, error) { + acls := make([]createacls.RequestACLs, 0, len(req.ACLs)) + + for _, acl := range req.ACLs { + acls = append(acls, createacls.RequestACLs{ + ResourceType: int8(acl.ResourceType), + ResourceName: acl.ResourceName, + ResourcePatternType: int8(acl.ResourcePatternType), + Principal: acl.Principal, + Host: acl.Host, + Operation: int8(acl.Operation), + PermissionType: int8(acl.PermissionType), + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &createacls.Request{ + Creations: acls, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreateACLs: %w", err) + } + + res := m.(*createacls.Response) + ret := &CreateACLsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make([]error, 0, len(res.Results)), + } + + for _, t := range res.Results { + ret.Errors = append(ret.Errors, makeError(t.ErrorCode, t.ErrorMessage)) + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/createpartitions.go b/vendor/github.com/segmentio/kafka-go/createpartitions.go new file mode 100644 index 000000000..d4c0d0e70 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createpartitions.go @@ -0,0 +1,103 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/createpartitions" +) + +// CreatePartitionsRequest represents a request sent to a kafka broker to create +// and update topic parititions. +type CreatePartitionsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of topics to create and their configuration. + Topics []TopicPartitionsConfig + + // When set to true, topics are not created but the configuration is + // validated as if they were. + ValidateOnly bool +} + +// CreatePartitionsResponse represents a response from a kafka broker to a partition +// creation request. +type CreatePartitionsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// CreatePartitions sends a partitions creation request to a kafka broker and returns the +// response. +func (c *Client) CreatePartitions(ctx context.Context, req *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + topics := make([]createpartitions.RequestTopic, len(req.Topics)) + + for i, t := range req.Topics { + topics[i] = createpartitions.RequestTopic{ + Name: t.Name, + Count: t.Count, + Assignments: t.assignments(), + } + } + + m, err := c.roundTrip(ctx, req.Addr, &createpartitions.Request{ + Topics: topics, + TimeoutMs: c.timeoutMs(ctx, defaultCreatePartitionsTimeout), + ValidateOnly: req.ValidateOnly, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreatePartitions: %w", err) + } + + res := m.(*createpartitions.Response) + ret := &CreatePartitionsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Results)), + } + + for _, t := range res.Results { + ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} + +type TopicPartitionsConfig struct { + // Topic name + Name string + + // Topic partition's count. + Count int32 + + // TopicPartitionAssignments among kafka brokers for this topic partitions. + TopicPartitionAssignments []TopicPartitionAssignment +} + +func (t *TopicPartitionsConfig) assignments() []createpartitions.RequestAssignment { + if len(t.TopicPartitionAssignments) == 0 { + return nil + } + assignments := make([]createpartitions.RequestAssignment, len(t.TopicPartitionAssignments)) + for i, a := range t.TopicPartitionAssignments { + assignments[i] = createpartitions.RequestAssignment{ + BrokerIDs: a.BrokerIDs, + } + } + return assignments +} + +type TopicPartitionAssignment struct { + // Broker IDs + BrokerIDs []int32 +} diff --git a/vendor/github.com/segmentio/kafka-go/createtopics.go b/vendor/github.com/segmentio/kafka-go/createtopics.go new file mode 100644 index 000000000..8ad9ebf44 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createtopics.go @@ -0,0 +1,390 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/createtopics" +) + +// CreateTopicRequests represents a request sent to a kafka broker to create +// new topics. +type CreateTopicsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of topics to create and their configuration. + Topics []TopicConfig + + // When set to true, topics are not created but the configuration is + // validated as if they were. + // + // This field will be ignored if the kafka broker did not support the + // CreateTopics API in version 1 or above. + ValidateOnly bool +} + +// CreateTopicResponse represents a response from a kafka broker to a topic +// creation request. +type CreateTopicsResponse struct { + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // CreateTopics API in version 2 or above. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// CreateTopics sends a topic creation request to a kafka broker and returns the +// response. +func (c *Client) CreateTopics(ctx context.Context, req *CreateTopicsRequest) (*CreateTopicsResponse, error) { + topics := make([]createtopics.RequestTopic, len(req.Topics)) + + for i, t := range req.Topics { + topics[i] = createtopics.RequestTopic{ + Name: t.Topic, + NumPartitions: int32(t.NumPartitions), + ReplicationFactor: int16(t.ReplicationFactor), + Assignments: t.assignments(), + Configs: t.configs(), + } + } + + m, err := c.roundTrip(ctx, req.Addr, &createtopics.Request{ + Topics: topics, + TimeoutMs: c.timeoutMs(ctx, defaultCreateTopicsTimeout), + ValidateOnly: req.ValidateOnly, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreateTopics: %w", err) + } + + res := m.(*createtopics.Response) + ret := &CreateTopicsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Topics)), + } + + for _, t := range res.Topics { + ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} + +type ConfigEntry struct { + ConfigName string + ConfigValue string +} + +func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry { + return createTopicsRequestV0ConfigEntry(c) +} + +type createTopicsRequestV0ConfigEntry struct { + ConfigName string + ConfigValue string +} + +func (t createTopicsRequestV0ConfigEntry) size() int32 { + return sizeofString(t.ConfigName) + + sizeofString(t.ConfigValue) +} + +func (t createTopicsRequestV0ConfigEntry) writeTo(wb *writeBuffer) { + wb.writeString(t.ConfigName) + wb.writeString(t.ConfigValue) +} + +type ReplicaAssignment struct { + Partition int + // The list of brokers where the partition should be allocated. There must + // be as many entries in thie list as there are replicas of the partition. + // The first entry represents the broker that will be the preferred leader + // for the partition. + // + // This field changed in 0.4 from `int` to `[]int`. It was invalid to pass + // a single integer as this is supposed to be a list. While this introduces + // a breaking change, it probably never worked before. + Replicas []int +} + +func (a *ReplicaAssignment) partitionIndex() int32 { + return int32(a.Partition) +} + +func (a *ReplicaAssignment) brokerIDs() []int32 { + if len(a.Replicas) == 0 { + return nil + } + replicas := make([]int32, len(a.Replicas)) + for i, r := range a.Replicas { + replicas[i] = int32(r) + } + return replicas +} + +func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment { + return createTopicsRequestV0ReplicaAssignment{ + Partition: int32(a.Partition), + Replicas: a.brokerIDs(), + } +} + +type createTopicsRequestV0ReplicaAssignment struct { + Partition int32 + Replicas []int32 +} + +func (t createTopicsRequestV0ReplicaAssignment) size() int32 { + return sizeofInt32(t.Partition) + + (int32(len(t.Replicas)+1) * sizeofInt32(0)) // N+1 because the array length is a int32 +} + +func (t createTopicsRequestV0ReplicaAssignment) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt32(int32(len(t.Replicas))) + for _, r := range t.Replicas { + wb.writeInt32(int32(r)) + } +} + +type TopicConfig struct { + // Topic name + Topic string + + // NumPartitions created. -1 indicates unset. + NumPartitions int + + // ReplicationFactor for the topic. -1 indicates unset. + ReplicationFactor int + + // ReplicaAssignments among kafka brokers for this topic partitions. If this + // is set num_partitions and replication_factor must be unset. + ReplicaAssignments []ReplicaAssignment + + // ConfigEntries holds topic level configuration for topic to be set. + ConfigEntries []ConfigEntry +} + +func (t *TopicConfig) assignments() []createtopics.RequestAssignment { + if len(t.ReplicaAssignments) == 0 { + return nil + } + assignments := make([]createtopics.RequestAssignment, len(t.ReplicaAssignments)) + for i, a := range t.ReplicaAssignments { + assignments[i] = createtopics.RequestAssignment{ + PartitionIndex: a.partitionIndex(), + BrokerIDs: a.brokerIDs(), + } + } + return assignments +} + +func (t *TopicConfig) configs() []createtopics.RequestConfig { + if len(t.ConfigEntries) == 0 { + return nil + } + configs := make([]createtopics.RequestConfig, len(t.ConfigEntries)) + for i, c := range t.ConfigEntries { + configs[i] = createtopics.RequestConfig{ + Name: c.ConfigName, + Value: c.ConfigValue, + } + } + return configs +} + +func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic { + requestV0ReplicaAssignments := make([]createTopicsRequestV0ReplicaAssignment, 0, len(t.ReplicaAssignments)) + for _, a := range t.ReplicaAssignments { + requestV0ReplicaAssignments = append( + requestV0ReplicaAssignments, + a.toCreateTopicsRequestV0ReplicaAssignment()) + } + requestV0ConfigEntries := make([]createTopicsRequestV0ConfigEntry, 0, len(t.ConfigEntries)) + for _, c := range t.ConfigEntries { + requestV0ConfigEntries = append( + requestV0ConfigEntries, + c.toCreateTopicsRequestV0ConfigEntry()) + } + + return createTopicsRequestV0Topic{ + Topic: t.Topic, + NumPartitions: int32(t.NumPartitions), + ReplicationFactor: int16(t.ReplicationFactor), + ReplicaAssignments: requestV0ReplicaAssignments, + ConfigEntries: requestV0ConfigEntries, + } +} + +type createTopicsRequestV0Topic struct { + // Topic name + Topic string + + // NumPartitions created. -1 indicates unset. + NumPartitions int32 + + // ReplicationFactor for the topic. -1 indicates unset. + ReplicationFactor int16 + + // ReplicaAssignments among kafka brokers for this topic partitions. If this + // is set num_partitions and replication_factor must be unset. + ReplicaAssignments []createTopicsRequestV0ReplicaAssignment + + // ConfigEntries holds topic level configuration for topic to be set. + ConfigEntries []createTopicsRequestV0ConfigEntry +} + +func (t createTopicsRequestV0Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofInt32(t.NumPartitions) + + sizeofInt16(t.ReplicationFactor) + + sizeofArray(len(t.ReplicaAssignments), func(i int) int32 { return t.ReplicaAssignments[i].size() }) + + sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() }) +} + +func (t createTopicsRequestV0Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32(t.NumPartitions) + wb.writeInt16(t.ReplicationFactor) + wb.writeArray(len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(wb) }) + wb.writeArray(len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(wb) }) +} + +// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics +type createTopicsRequestV0 struct { + // Topics contains n array of single topic creation requests. Can not + // have multiple entries for the same topic. + Topics []createTopicsRequestV0Topic + + // Timeout ms to wait for a topic to be completely created on the + // controller node. Values <= 0 will trigger topic creation and return immediately + Timeout int32 +} + +func (t createTopicsRequestV0) size() int32 { + return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) + + sizeofInt32(t.Timeout) +} + +func (t createTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) + wb.writeInt32(t.Timeout) +} + +type createTopicsResponseV0TopicError struct { + // Topic name + Topic string + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t createTopicsResponseV0TopicError) size() int32 { + return sizeofString(t.Topic) + + sizeofInt16(t.ErrorCode) +} + +func (t createTopicsResponseV0TopicError) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) +} + +func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics +type createTopicsResponseV0 struct { + TopicErrors []createTopicsResponseV0TopicError +} + +func (t createTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() }) +} + +func (t createTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(wb) }) +} + +func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var topic createTopicsResponseV0TopicError + if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil { + return + } + t.TopicErrors = append(t.TopicErrors, topic) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} + +func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) { + var response createTopicsResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + if request.Timeout == 0 { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) + } + return c.writeRequest(createTopics, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return response, err + } + for _, tr := range response.TopicErrors { + if tr.ErrorCode == int16(TopicAlreadyExists) { + continue + } + if tr.ErrorCode != 0 { + return response, Error(tr.ErrorCode) + } + } + + return response, nil +} + +// CreateTopics creates one topic per provided configuration with idempotent +// operational semantics. In other words, if CreateTopics is invoked with a +// configuration for an existing topic, it will have no effect. +func (c *Conn) CreateTopics(topics ...TopicConfig) error { + requestV0Topics := make([]createTopicsRequestV0Topic, 0, len(topics)) + for _, t := range topics { + requestV0Topics = append( + requestV0Topics, + t.toCreateTopicsRequestV0Topic()) + } + + _, err := c.createTopics(createTopicsRequestV0{ + Topics: requestV0Topics, + }) + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/deleteacls.go b/vendor/github.com/segmentio/kafka-go/deleteacls.go new file mode 100644 index 000000000..64cbd26d1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/deleteacls.go @@ -0,0 +1,114 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/deleteacls" +) + +// DeleteACLsRequest represents a request sent to a kafka broker to delete +// ACLs. +type DeleteACLsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of ACL filters to use for deletion. + Filters []DeleteACLsFilter +} + +type DeleteACLsFilter struct { + ResourceTypeFilter ResourceType + ResourceNameFilter string + ResourcePatternTypeFilter PatternType + PrincipalFilter string + HostFilter string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +// DeleteACLsResponse represents a response from a kafka broker to an ACL +// deletion request. +type DeleteACLsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of the results from the deletion request. + Results []DeleteACLsResult +} + +type DeleteACLsResult struct { + Error error + MatchingACLs []DeleteACLsMatchingACLs +} + +type DeleteACLsMatchingACLs struct { + Error error + ResourceType ResourceType + ResourceName string + ResourcePatternType PatternType + Principal string + Host string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +// DeleteACLs sends ACLs deletion request to a kafka broker and returns the +// response. +func (c *Client) DeleteACLs(ctx context.Context, req *DeleteACLsRequest) (*DeleteACLsResponse, error) { + filters := make([]deleteacls.RequestFilter, 0, len(req.Filters)) + + for _, filter := range req.Filters { + filters = append(filters, deleteacls.RequestFilter{ + ResourceTypeFilter: int8(filter.ResourceTypeFilter), + ResourceNameFilter: filter.ResourceNameFilter, + ResourcePatternTypeFilter: int8(filter.ResourcePatternTypeFilter), + PrincipalFilter: filter.PrincipalFilter, + HostFilter: filter.HostFilter, + Operation: int8(filter.Operation), + PermissionType: int8(filter.PermissionType), + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &deleteacls.Request{ + Filters: filters, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DeleteACLs: %w", err) + } + + res := m.(*deleteacls.Response) + + results := make([]DeleteACLsResult, 0, len(res.FilterResults)) + + for _, result := range res.FilterResults { + matchingACLs := make([]DeleteACLsMatchingACLs, 0, len(result.MatchingACLs)) + + for _, matchingACL := range result.MatchingACLs { + matchingACLs = append(matchingACLs, DeleteACLsMatchingACLs{ + Error: makeError(matchingACL.ErrorCode, matchingACL.ErrorMessage), + ResourceType: ResourceType(matchingACL.ResourceType), + ResourceName: matchingACL.ResourceName, + ResourcePatternType: PatternType(matchingACL.ResourcePatternType), + Principal: matchingACL.Principal, + Host: matchingACL.Host, + Operation: ACLOperationType(matchingACL.Operation), + PermissionType: ACLPermissionType(matchingACL.PermissionType), + }) + } + + results = append(results, DeleteACLsResult{ + Error: makeError(result.ErrorCode, result.ErrorMessage), + MatchingACLs: matchingACLs, + }) + } + + ret := &DeleteACLsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Results: results, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/deletegroups.go b/vendor/github.com/segmentio/kafka-go/deletegroups.go new file mode 100644 index 000000000..6317ae7fa --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/deletegroups.go @@ -0,0 +1,60 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/deletegroups" +) + +// DeleteGroupsRequest represents a request sent to a kafka broker to delete +// consumer groups. +type DeleteGroupsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Identifiers of groups to delete. + GroupIDs []string +} + +// DeleteGroupsResponse represents a response from a kafka broker to a consumer group +// deletion request. +type DeleteGroupsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mapping of group ids to errors that occurred while attempting to delete those groups. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// DeleteGroups sends a delete groups request and returns the response. The request is sent to the group coordinator of the first group +// of the request. All deleted groups must be managed by the same group coordinator. +func (c *Client) DeleteGroups( + ctx context.Context, + req *DeleteGroupsRequest, +) (*DeleteGroupsResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &deletegroups.Request{ + GroupIDs: req.GroupIDs, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DeleteGroups: %w", err) + } + + r := m.(*deletegroups.Response) + + ret := &DeleteGroupsResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Errors: make(map[string]error, len(r.Responses)), + } + + for _, t := range r.Responses { + ret.Errors[t.GroupID] = makeError(t.ErrorCode, "") + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/deletetopics.go b/vendor/github.com/segmentio/kafka-go/deletetopics.go new file mode 100644 index 000000000..d758d9fd6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/deletetopics.go @@ -0,0 +1,175 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/deletetopics" +) + +// DeleteTopicsRequest represents a request sent to a kafka broker to delete +// topics. +type DeleteTopicsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Names of topics to delete. + Topics []string +} + +// DeleteTopicsResponse represents a response from a kafka broker to a topic +// deletion request. +type DeleteTopicsResponse struct { + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // DeleteTopics API in version 1 or above. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to delete + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// DeleteTopics sends a topic deletion request to a kafka broker and returns the +// response. +func (c *Client) DeleteTopics(ctx context.Context, req *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &deletetopics.Request{ + TopicNames: req.Topics, + TimeoutMs: c.timeoutMs(ctx, defaultDeleteTopicsTimeout), + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DeleteTopics: %w", err) + } + + res := m.(*deletetopics.Response) + ret := &DeleteTopicsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Responses)), + } + + for _, t := range res.Responses { + if t.ErrorCode == 0 { + ret.Errors[t.Name] = nil + } else { + ret.Errors[t.Name] = Error(t.ErrorCode) + } + } + + return ret, nil +} + +// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics +type deleteTopicsRequestV0 struct { + // Topics holds the topic names + Topics []string + + // Timeout holds the time in ms to wait for a topic to be completely deleted + // on the controller node. Values <= 0 will trigger topic deletion and return + // immediately. + Timeout int32 +} + +func (t deleteTopicsRequestV0) size() int32 { + return sizeofStringArray(t.Topics) + + sizeofInt32(t.Timeout) +} + +func (t deleteTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeStringArray(t.Topics) + wb.writeInt32(t.Timeout) +} + +type deleteTopicsResponseV0 struct { + // TopicErrorCodes holds per topic error codes + TopicErrorCodes []deleteTopicsResponseV0TopicErrorCode +} + +func (t deleteTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() }) +} + +func (t *deleteTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + var item deleteTopicsResponseV0TopicErrorCode + if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { + return + } + t.TopicErrorCodes = append(t.TopicErrorCodes, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + return +} + +func (t deleteTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(wb) }) +} + +type deleteTopicsResponseV0TopicErrorCode struct { + // Topic holds the topic name + Topic string + + // ErrorCode holds the error code + ErrorCode int16 +} + +func (t deleteTopicsResponseV0TopicErrorCode) size() int32 { + return sizeofString(t.Topic) + + sizeofInt16(t.ErrorCode) +} + +func (t *deleteTopicsResponseV0TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +func (t deleteTopicsResponseV0TopicErrorCode) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) +} + +// deleteTopics deletes the specified topics. +// +// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics +func (c *Conn) deleteTopics(request deleteTopicsRequestV0) (deleteTopicsResponseV0, error) { + var response deleteTopicsResponseV0 + err := c.writeOperation( + func(deadline time.Time, id int32) error { + if request.Timeout == 0 { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) + } + return c.writeRequest(deleteTopics, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return deleteTopicsResponseV0{}, err + } + for _, c := range response.TopicErrorCodes { + if c.ErrorCode != 0 { + return response, Error(c.ErrorCode) + } + } + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describeacls.go b/vendor/github.com/segmentio/kafka-go/describeacls.go new file mode 100644 index 000000000..d1093bbed --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeacls.go @@ -0,0 +1,107 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeacls" +) + +// DescribeACLsRequest represents a request sent to a kafka broker to describe +// existing ACLs. +type DescribeACLsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Filter to filter ACLs on. + Filter ACLFilter +} + +type ACLFilter struct { + ResourceTypeFilter ResourceType + ResourceNameFilter string + // ResourcePatternTypeFilter was added in v1 and is not available prior to that. + ResourcePatternTypeFilter PatternType + PrincipalFilter string + HostFilter string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +// DescribeACLsResponse represents a response from a kafka broker to an ACL +// describe request. +type DescribeACLsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Error that occurred while attempting to describe + // the ACLs. + Error error + + // ACL resources returned from the describe request. + Resources []ACLResource +} + +type ACLResource struct { + ResourceType ResourceType + ResourceName string + PatternType PatternType + ACLs []ACLDescription +} + +type ACLDescription struct { + Principal string + Host string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +func (c *Client) DescribeACLs(ctx context.Context, req *DescribeACLsRequest) (*DescribeACLsResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &describeacls.Request{ + Filter: describeacls.ACLFilter{ + ResourceTypeFilter: int8(req.Filter.ResourceTypeFilter), + ResourceNameFilter: req.Filter.ResourceNameFilter, + ResourcePatternTypeFilter: int8(req.Filter.ResourcePatternTypeFilter), + PrincipalFilter: req.Filter.PrincipalFilter, + HostFilter: req.Filter.HostFilter, + Operation: int8(req.Filter.Operation), + PermissionType: int8(req.Filter.PermissionType), + }, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeACLs: %w", err) + } + + res := m.(*describeacls.Response) + resources := make([]ACLResource, len(res.Resources)) + + for resourceIdx, respResource := range res.Resources { + descriptions := make([]ACLDescription, len(respResource.ACLs)) + + for descriptionIdx, respDescription := range respResource.ACLs { + descriptions[descriptionIdx] = ACLDescription{ + Principal: respDescription.Principal, + Host: respDescription.Host, + Operation: ACLOperationType(respDescription.Operation), + PermissionType: ACLPermissionType(respDescription.PermissionType), + } + } + + resources[resourceIdx] = ACLResource{ + ResourceType: ResourceType(respResource.ResourceType), + ResourceName: respResource.ResourceName, + PatternType: PatternType(respResource.PatternType), + ACLs: descriptions, + } + } + + ret := &DescribeACLsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, res.ErrorMessage), + Resources: resources, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describeclientquotas.go b/vendor/github.com/segmentio/kafka-go/describeclientquotas.go new file mode 100644 index 000000000..6291dcd98 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeclientquotas.go @@ -0,0 +1,126 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeclientquotas" +) + +// DescribeClientQuotasRequest represents a request sent to a kafka broker to +// describe client quotas. +type DescribeClientQuotasRequest struct { + // Address of the kafka broker to send the request to + Addr net.Addr + + // List of quota components to describe. + Components []DescribeClientQuotasRequestComponent + + // Whether the match is strict, i.e. should exclude entities with + // unspecified entity types. + Strict bool +} + +type DescribeClientQuotasRequestComponent struct { + // The entity type that the filter component applies to. + EntityType string + + // How to match the entity (0 = exact name, 1 = default name, + // 2 = any specified name). + MatchType int8 + + // The string to match against, or null if unused for the match type. + Match string +} + +// DescribeClientQuotasReesponse represents a response from a kafka broker to a describe client quota request. +type DescribeClientQuotasResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // List of describe client quota responses. + Entries []DescribeClientQuotasResponseQuotas +} + +type DescribeClientQuotasEntity struct { + // The quota entity type. + EntityType string + + // The name of the quota entity, or null if the default. + EntityName string +} + +type DescribeClientQuotasValue struct { + // The quota configuration key. + Key string + + // The quota configuration value. + Value float64 +} + +type DescribeClientQuotasResponseQuotas struct { + // List of client quota entities and their descriptions. + Entities []DescribeClientQuotasEntity + + // The client quota configuration values. + Values []DescribeClientQuotasValue +} + +// DescribeClientQuotas sends a describe client quotas request to a kafka broker and returns +// the response. +func (c *Client) DescribeClientQuotas(ctx context.Context, req *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) { + components := make([]describeclientquotas.Component, len(req.Components)) + + for componentIdx, component := range req.Components { + components[componentIdx] = describeclientquotas.Component{ + EntityType: component.EntityType, + MatchType: component.MatchType, + Match: component.Match, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &describeclientquotas.Request{ + Components: components, + Strict: req.Strict, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeClientQuotas: %w", err) + } + + res := m.(*describeclientquotas.Response) + responseEntries := make([]DescribeClientQuotasResponseQuotas, len(res.Entries)) + + for responseEntryIdx, responseEntry := range res.Entries { + responseEntities := make([]DescribeClientQuotasEntity, len(responseEntry.Entities)) + for responseEntityIdx, responseEntity := range responseEntry.Entities { + responseEntities[responseEntityIdx] = DescribeClientQuotasEntity{ + EntityType: responseEntity.EntityType, + EntityName: responseEntity.EntityName, + } + } + + responseValues := make([]DescribeClientQuotasValue, len(responseEntry.Values)) + for responseValueIdx, responseValue := range responseEntry.Values { + responseValues[responseValueIdx] = DescribeClientQuotasValue{ + Key: responseValue.Key, + Value: responseValue.Value, + } + } + responseEntries[responseEntryIdx] = DescribeClientQuotasResponseQuotas{ + Entities: responseEntities, + Values: responseValues, + } + } + ret := &DescribeClientQuotasResponse{ + Throttle: time.Duration(res.ThrottleTimeMs), + Entries: responseEntries, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describeconfigs.go b/vendor/github.com/segmentio/kafka-go/describeconfigs.go new file mode 100644 index 000000000..17f4f305f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeconfigs.go @@ -0,0 +1,162 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeconfigs" +) + +// DescribeConfigsRequest represents a request sent to a kafka broker to describe configs. +type DescribeConfigsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of resources to get details for. + Resources []DescribeConfigRequestResource + + // Ignored if API version is less than v1 + IncludeSynonyms bool + + // Ignored if API version is less than v3 + IncludeDocumentation bool +} + +type DescribeConfigRequestResource struct { + // Resource Type + ResourceType ResourceType + + // Resource Name + ResourceName string + + // ConfigNames is a list of configurations to update. + ConfigNames []string +} + +// DescribeConfigsResponse represents a response from a kafka broker to a describe config request. +type DescribeConfigsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Resources + Resources []DescribeConfigResponseResource +} + +// DescribeConfigResponseResource. +type DescribeConfigResponseResource struct { + // Resource Type + ResourceType int8 + + // Resource Name + ResourceName string + + // Error + Error error + + // ConfigEntries + ConfigEntries []DescribeConfigResponseConfigEntry +} + +// DescribeConfigResponseConfigEntry. +type DescribeConfigResponseConfigEntry struct { + ConfigName string + ConfigValue string + ReadOnly bool + + // Ignored if API version is greater than v0 + IsDefault bool + + // Ignored if API version is less than v1 + ConfigSource int8 + + IsSensitive bool + + // Ignored if API version is less than v1 + ConfigSynonyms []DescribeConfigResponseConfigSynonym + + // Ignored if API version is less than v3 + ConfigType int8 + + // Ignored if API version is less than v3 + ConfigDocumentation string +} + +// DescribeConfigResponseConfigSynonym. +type DescribeConfigResponseConfigSynonym struct { + // Ignored if API version is less than v1 + ConfigName string + + // Ignored if API version is less than v1 + ConfigValue string + + // Ignored if API version is less than v1 + ConfigSource int8 +} + +// DescribeConfigs sends a config altering request to a kafka broker and returns the +// response. +func (c *Client) DescribeConfigs(ctx context.Context, req *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + resources := make([]describeconfigs.RequestResource, len(req.Resources)) + + for i, t := range req.Resources { + resources[i] = describeconfigs.RequestResource{ + ResourceType: int8(t.ResourceType), + ResourceName: t.ResourceName, + ConfigNames: t.ConfigNames, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &describeconfigs.Request{ + Resources: resources, + IncludeSynonyms: req.IncludeSynonyms, + IncludeDocumentation: req.IncludeDocumentation, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeConfigs: %w", err) + } + + res := m.(*describeconfigs.Response) + ret := &DescribeConfigsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Resources: make([]DescribeConfigResponseResource, len(res.Resources)), + } + + for i, t := range res.Resources { + + configEntries := make([]DescribeConfigResponseConfigEntry, len(t.ConfigEntries)) + for j, v := range t.ConfigEntries { + + configSynonyms := make([]DescribeConfigResponseConfigSynonym, len(v.ConfigSynonyms)) + for k, cs := range v.ConfigSynonyms { + configSynonyms[k] = DescribeConfigResponseConfigSynonym{ + ConfigName: cs.ConfigName, + ConfigValue: cs.ConfigValue, + ConfigSource: cs.ConfigSource, + } + } + + configEntries[j] = DescribeConfigResponseConfigEntry{ + ConfigName: v.ConfigName, + ConfigValue: v.ConfigValue, + ReadOnly: v.ReadOnly, + ConfigSource: v.ConfigSource, + IsDefault: v.IsDefault, + IsSensitive: v.IsSensitive, + ConfigSynonyms: configSynonyms, + ConfigType: v.ConfigType, + ConfigDocumentation: v.ConfigDocumentation, + } + } + + ret.Resources[i] = DescribeConfigResponseResource{ + ResourceType: t.ResourceType, + ResourceName: t.ResourceName, + Error: makeError(t.ErrorCode, t.ErrorMessage), + ConfigEntries: configEntries, + } + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describegroups.go b/vendor/github.com/segmentio/kafka-go/describegroups.go new file mode 100644 index 000000000..4faf7a01b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describegroups.go @@ -0,0 +1,298 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + + "github.com/segmentio/kafka-go/protocol/describegroups" +) + +// DescribeGroupsRequest is a request to the DescribeGroups API. +type DescribeGroupsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // GroupIDs is a slice of groups to get details for. + GroupIDs []string +} + +// DescribeGroupsResponse is a response from the DescribeGroups API. +type DescribeGroupsResponse struct { + // Groups is a slice of details for the requested groups. + Groups []DescribeGroupsResponseGroup +} + +// DescribeGroupsResponseGroup contains the response details for a single group. +type DescribeGroupsResponseGroup struct { + // Error is set to a non-nil value if there was an error fetching the details + // for this group. + Error error + + // GroupID is the ID of the group. + GroupID string + + // GroupState is a description of the group state. + GroupState string + + // Members contains details about each member of the group. + Members []DescribeGroupsResponseMember +} + +// MemberInfo represents the membership information for a single group member. +type DescribeGroupsResponseMember struct { + // MemberID is the ID of the group member. + MemberID string + + // ClientID is the ID of the client that the group member is using. + ClientID string + + // ClientHost is the host of the client that the group member is connecting from. + ClientHost string + + // MemberMetadata contains metadata about this group member. + MemberMetadata DescribeGroupsResponseMemberMetadata + + // MemberAssignments contains the topic partitions that this member is assigned to. + MemberAssignments DescribeGroupsResponseAssignments +} + +// GroupMemberMetadata stores metadata associated with a group member. +type DescribeGroupsResponseMemberMetadata struct { + // Version is the version of the metadata. + Version int + + // Topics is the list of topics that the member is assigned to. + Topics []string + + // UserData is the user data for the member. + UserData []byte + + // OwnedPartitions contains the partitions owned by this group member; only set if + // consumers are using a cooperative rebalancing assignor protocol. + OwnedPartitions []DescribeGroupsResponseMemberMetadataOwnedPartition +} + +type DescribeGroupsResponseMemberMetadataOwnedPartition struct { + // Topic is the name of the topic. + Topic string + + // Partitions is the partitions that are owned by the group in the topic. + Partitions []int +} + +// GroupMemberAssignmentsInfo stores the topic partition assignment data for a group member. +type DescribeGroupsResponseAssignments struct { + // Version is the version of the assignments data. + Version int + + // Topics contains the details of the partition assignments for each topic. + Topics []GroupMemberTopic + + // UserData is the user data for the member. + UserData []byte +} + +// GroupMemberTopic is a mapping from a topic to a list of partitions in the topic. It is used +// to represent the topic partitions that have been assigned to a group member. +type GroupMemberTopic struct { + // Topic is the name of the topic. + Topic string + + // Partitions is a slice of partition IDs that this member is assigned to in the topic. + Partitions []int +} + +// DescribeGroups calls the Kafka DescribeGroups API to get information about one or more +// consumer groups. See https://kafka.apache.org/protocol#The_Messages_DescribeGroups for +// more information. +func (c *Client) DescribeGroups( + ctx context.Context, + req *DescribeGroupsRequest, +) (*DescribeGroupsResponse, error) { + protoResp, err := c.roundTrip( + ctx, + req.Addr, + &describegroups.Request{ + Groups: req.GroupIDs, + }, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*describegroups.Response) + resp := &DescribeGroupsResponse{} + + for _, apiGroup := range apiResp.Groups { + group := DescribeGroupsResponseGroup{ + Error: makeError(apiGroup.ErrorCode, ""), + GroupID: apiGroup.GroupID, + GroupState: apiGroup.GroupState, + } + + for _, member := range apiGroup.Members { + decodedMetadata, err := decodeMemberMetadata(member.MemberMetadata) + if err != nil { + return nil, err + } + decodedAssignments, err := decodeMemberAssignments(member.MemberAssignment) + if err != nil { + return nil, err + } + + group.Members = append(group.Members, DescribeGroupsResponseMember{ + MemberID: member.MemberID, + ClientID: member.ClientID, + ClientHost: member.ClientHost, + MemberAssignments: decodedAssignments, + MemberMetadata: decodedMetadata, + }) + } + resp.Groups = append(resp.Groups, group) + } + + return resp, nil +} + +// decodeMemberMetadata converts raw metadata bytes to a +// DescribeGroupsResponseMemberMetadata struct. +// +// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49 +// for protocol details. +func decodeMemberMetadata(rawMetadata []byte) (DescribeGroupsResponseMemberMetadata, error) { + mm := DescribeGroupsResponseMemberMetadata{} + + if len(rawMetadata) == 0 { + return mm, nil + } + + buf := bytes.NewBuffer(rawMetadata) + bufReader := bufio.NewReader(buf) + remain := len(rawMetadata) + + var err error + var version16 int16 + + if remain, err = readInt16(bufReader, remain, &version16); err != nil { + return mm, err + } + mm.Version = int(version16) + + if remain, err = readStringArray(bufReader, remain, &mm.Topics); err != nil { + return mm, err + } + if remain, err = readBytes(bufReader, remain, &mm.UserData); err != nil { + return mm, err + } + + if mm.Version == 1 && remain > 0 { + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + op := DescribeGroupsResponseMemberMetadataOwnedPartition{} + if fnRemain, fnErr = readString(r, size, &op.Topic); fnErr != nil { + return + } + + ps := []int32{} + if fnRemain, fnErr = readInt32Array(r, fnRemain, &ps); fnErr != nil { + return + } + + for _, p := range ps { + op.Partitions = append(op.Partitions, int(p)) + } + + mm.OwnedPartitions = append(mm.OwnedPartitions, op) + return + } + + if remain, err = readArrayWith(bufReader, remain, fn); err != nil { + return mm, err + } + } + + if remain != 0 { + return mm, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain) + } + + return mm, nil +} + +// decodeMemberAssignments converts raw assignment bytes to a DescribeGroupsResponseAssignments +// struct. +// +// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49 +// for protocol details. +func decodeMemberAssignments(rawAssignments []byte) (DescribeGroupsResponseAssignments, error) { + ma := DescribeGroupsResponseAssignments{} + + if len(rawAssignments) == 0 { + return ma, nil + } + + buf := bytes.NewBuffer(rawAssignments) + bufReader := bufio.NewReader(buf) + remain := len(rawAssignments) + + var err error + var version16 int16 + + if remain, err = readInt16(bufReader, remain, &version16); err != nil { + return ma, err + } + ma.Version = int(version16) + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + item := GroupMemberTopic{} + + if fnRemain, fnErr = readString(r, size, &item.Topic); fnErr != nil { + return + } + + partitions := []int32{} + + if fnRemain, fnErr = readInt32Array(r, fnRemain, &partitions); fnErr != nil { + return + } + for _, partition := range partitions { + item.Partitions = append(item.Partitions, int(partition)) + } + + ma.Topics = append(ma.Topics, item) + return + } + if remain, err = readArrayWith(bufReader, remain, fn); err != nil { + return ma, err + } + + if remain, err = readBytes(bufReader, remain, &ma.UserData); err != nil { + return ma, err + } + + if remain != 0 { + return ma, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain) + } + + return ma, nil +} + +// readInt32Array reads an array of int32s. It's adapted from the implementation of +// readStringArray. +func readInt32Array(r *bufio.Reader, sz int, v *[]int32) (remain int, err error) { + var content []int32 + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value int32 + if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { + return + } + content = append(content, value) + return + } + if remain, err = readArrayWith(r, sz, fn); err != nil { + return + } + + *v = content + return +} diff --git a/vendor/github.com/segmentio/kafka-go/describeuserscramcredentials.go b/vendor/github.com/segmentio/kafka-go/describeuserscramcredentials.go new file mode 100644 index 000000000..7194ea1e0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeuserscramcredentials.go @@ -0,0 +1,97 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeuserscramcredentials" +) + +// DescribeUserScramCredentialsRequest represents a request sent to a kafka broker to +// describe user scram credentials. +type DescribeUserScramCredentialsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of Scram users to describe + Users []UserScramCredentialsUser +} + +type UserScramCredentialsUser struct { + Name string +} + +// DescribeUserScramCredentialsResponse represents a response from a kafka broker to a describe user +// credentials request. +type DescribeUserScramCredentialsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Top level error that occurred while attempting to describe + // the user scram credentials. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // List of described user scram credentials. + Results []DescribeUserScramCredentialsResponseResult +} + +type DescribeUserScramCredentialsResponseResult struct { + User string + CredentialInfos []DescribeUserScramCredentialsCredentialInfo + Error error +} + +type DescribeUserScramCredentialsCredentialInfo struct { + Mechanism ScramMechanism + Iterations int +} + +// DescribeUserScramCredentials sends a user scram credentials describe request to a kafka broker and returns +// the response. +func (c *Client) DescribeUserScramCredentials(ctx context.Context, req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) { + users := make([]describeuserscramcredentials.RequestUser, len(req.Users)) + + for userIdx, user := range req.Users { + users[userIdx] = describeuserscramcredentials.RequestUser{ + Name: user.Name, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &describeuserscramcredentials.Request{ + Users: users, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeUserScramCredentials: %w", err) + } + + res := m.(*describeuserscramcredentials.Response) + responseResults := make([]DescribeUserScramCredentialsResponseResult, len(res.Results)) + + for responseIdx, responseResult := range res.Results { + credentialInfos := make([]DescribeUserScramCredentialsCredentialInfo, len(responseResult.CredentialInfos)) + + for credentialInfoIdx, credentialInfo := range responseResult.CredentialInfos { + credentialInfos[credentialInfoIdx] = DescribeUserScramCredentialsCredentialInfo{ + Mechanism: ScramMechanism(credentialInfo.Mechanism), + Iterations: int(credentialInfo.Iterations), + } + } + responseResults[responseIdx] = DescribeUserScramCredentialsResponseResult{ + User: responseResult.User, + CredentialInfos: credentialInfos, + Error: makeError(responseResult.ErrorCode, responseResult.ErrorMessage), + } + } + ret := &DescribeUserScramCredentialsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, res.ErrorMessage), + Results: responseResults, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/dialer.go b/vendor/github.com/segmentio/kafka-go/dialer.go new file mode 100644 index 000000000..7786ed320 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/dialer.go @@ -0,0 +1,493 @@ +package kafka + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/segmentio/kafka-go/sasl" +) + +// The Dialer type mirrors the net.Dialer API but is designed to open kafka +// connections instead of raw network connections. +type Dialer struct { + // Unique identifier for client connections established by this Dialer. + ClientID string + + // Optionally specifies the function that the dialer uses to establish + // network connections. If nil, net.(*Dialer).DialContext is used instead. + // + // When DialFunc is set, LocalAddr, DualStack, FallbackDelay, and KeepAlive + // are ignored. + DialFunc func(ctx context.Context, network string, address string) (net.Conn, error) + + // Timeout is the maximum amount of time a dial will wait for a connect to + // complete. If Deadline is also set, it may fail earlier. + // + // The default is no timeout. + // + // When dialing a name with multiple IP addresses, the timeout may be + // divided between them. + // + // With or without a timeout, the operating system may impose its own + // earlier timeout. For instance, TCP timeouts are often around 3 minutes. + Timeout time.Duration + + // Deadline is the absolute point in time after which dials will fail. + // If Timeout is set, it may fail earlier. + // Zero means no deadline, or dependent on the operating system as with the + // Timeout option. + Deadline time.Time + + // LocalAddr is the local address to use when dialing an address. + // The address must be of a compatible type for the network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr + + // DualStack enables RFC 6555-compliant "Happy Eyeballs" dialing when the + // network is "tcp" and the destination is a host name with both IPv4 and + // IPv6 addresses. This allows a client to tolerate networks where one + // address family is silently broken. + DualStack bool + + // FallbackDelay specifies the length of time to wait before spawning a + // fallback connection, when DualStack is enabled. + // If zero, a default delay of 300ms is used. + FallbackDelay time.Duration + + // KeepAlive specifies the keep-alive period for an active network + // connection. + // If zero, keep-alives are not enabled. Network protocols that do not + // support keep-alives ignore this field. + KeepAlive time.Duration + + // Resolver optionally gives a hook to convert the broker address into an + // alternate host or IP address which is useful for custom service discovery. + // If a custom resolver returns any possible hosts, the first one will be + // used and the original discarded. If a port number is included with the + // resolved host, it will only be used if a port number was not previously + // specified. If no port is specified or resolved, the default of 9092 will be + // used. + Resolver Resolver + + // TLS enables Dialer to open secure connections. If nil, standard net.Conn + // will be used. + TLS *tls.Config + + // SASLMechanism configures the Dialer to use SASL authentication. If nil, + // no authentication will be performed. + SASLMechanism sasl.Mechanism + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that the connection will be non-transactional. + TransactionalID string +} + +// Dial connects to the address on the named network. +func (d *Dialer) Dial(network string, address string) (*Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +// DialContext connects to the address on the named network using the provided +// context. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Once successfully connected, +// any expiration of the context will not affect the connection. +// +// When using TCP, and the host in the address parameter resolves to multiple +// network addresses, any dial timeout (from d.Timeout or ctx) is spread over +// each consecutive dial, such that each is given an appropriate fraction of the +// time to connect. For example, if a host has 4 IP addresses and the timeout is +// 1 minute, the connect to each single address will be given 15 seconds to +// complete before trying the next one. +func (d *Dialer) DialContext(ctx context.Context, network string, address string) (*Conn, error) { + return d.connect( + ctx, + network, + address, + ConnConfig{ + ClientID: d.ClientID, + TransactionalID: d.TransactionalID, + }, + ) +} + +// DialLeader opens a connection to the leader of the partition for a given +// topic. +// +// The address given to the DialContext method may not be the one that the +// connection will end up being established to, because the dialer will lookup +// the partition leader for the topic and return a connection to that server. +// The original address is only used as a mechanism to discover the +// configuration of the kafka cluster that we're connecting to. +func (d *Dialer) DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { + p, err := d.LookupPartition(ctx, network, address, topic, partition) + if err != nil { + return nil, err + } + return d.DialPartition(ctx, network, address, p) +} + +// DialPartition opens a connection to the leader of the partition specified by partition +// descriptor. It's strongly advised to use descriptor of the partition that comes out of +// functions LookupPartition or LookupPartitions. +func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return d.connect(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)), ConnConfig{ + ClientID: d.ClientID, + Topic: partition.Topic, + Partition: partition.ID, + Broker: partition.Leader.ID, + Rack: partition.Leader.Rack, + TransactionalID: d.TransactionalID, + }) +} + +// LookupLeader searches for the kafka broker that is the leader of the +// partition for a given topic, returning a Broker value representing it. +func (d *Dialer) LookupLeader(ctx context.Context, network string, address string, topic string, partition int) (Broker, error) { + p, err := d.LookupPartition(ctx, network, address, topic, partition) + return p.Leader, err +} + +// LookupPartition searches for the description of specified partition id. +func (d *Dialer) LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { + c, err := d.DialContext(ctx, network, address) + if err != nil { + return Partition{}, err + } + defer c.Close() + + brkch := make(chan Partition, 1) + errch := make(chan error, 1) + + go func() { + for attempt := 0; true; attempt++ { + if attempt != 0 { + if !sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) { + errch <- ctx.Err() + return + } + } + + partitions, err := c.ReadPartitions(topic) + if err != nil { + if isTemporary(err) { + continue + } + errch <- err + return + } + + for _, p := range partitions { + if p.ID == partition { + brkch <- p + return + } + } + } + + errch <- UnknownTopicOrPartition + }() + + var prt Partition + select { + case prt = <-brkch: + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + return prt, err +} + +// LookupPartitions returns the list of partitions that exist for the given topic. +func (d *Dialer) LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { + conn, err := d.DialContext(ctx, network, address) + if err != nil { + return nil, err + } + defer conn.Close() + + prtch := make(chan []Partition, 1) + errch := make(chan error, 1) + + go func() { + if prt, err := conn.ReadPartitions(topic); err != nil { + errch <- err + } else { + prtch <- prt + } + }() + + var prt []Partition + select { + case prt = <-prtch: + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + return prt, err +} + +// connectTLS returns a tls.Conn that has already completed the Handshake. +func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) { + tlsConn = tls.Client(conn, config) + errch := make(chan error) + + go func() { + defer close(errch) + errch <- tlsConn.Handshake() + }() + + select { + case <-ctx.Done(): + conn.Close() + tlsConn.Close() + <-errch // ignore possible error from Handshake + err = ctx.Err() + + case err = <-errch: + } + + return +} + +// connect opens a socket connection to the broker, wraps it to create a +// kafka connection, and performs SASL authentication if configured to do so. +func (d *Dialer) connect(ctx context.Context, network, address string, connCfg ConnConfig) (*Conn, error) { + if d.Timeout != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.Timeout) + defer cancel() + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + c, err := d.dialContext(ctx, network, address) + if err != nil { + return nil, fmt.Errorf("failed to dial: %w", err) + } + + conn := NewConnWith(c, connCfg) + + if d.SASLMechanism != nil { + host, port, err := splitHostPortNumber(address) + if err != nil { + return nil, fmt.Errorf("could not determine host/port for SASL authentication: %w", err) + } + metadata := &sasl.Metadata{ + Host: host, + Port: port, + } + if err := d.authenticateSASL(sasl.WithMetadata(ctx, metadata), conn); err != nil { + _ = conn.Close() + return nil, fmt.Errorf("could not successfully authenticate to %s:%d with SASL: %w", host, port, err) + } + } + + return conn, nil +} + +// authenticateSASL performs all of the required requests to authenticate this +// connection. If any step fails, this function returns with an error. A nil +// error indicates successful authentication. +// +// In case of error, this function *does not* close the connection. That is the +// responsibility of the caller. +func (d *Dialer) authenticateSASL(ctx context.Context, conn *Conn) error { + if err := conn.saslHandshake(d.SASLMechanism.Name()); err != nil { + return fmt.Errorf("SASL handshake failed: %w", err) + } + + sess, state, err := d.SASLMechanism.Start(ctx) + if err != nil { + return fmt.Errorf("SASL authentication process could not be started: %w", err) + } + + for completed := false; !completed; { + challenge, err := conn.saslAuthenticate(state) + switch { + case err == nil: + case errors.Is(err, io.EOF): + // the broker may communicate a failed exchange by closing the + // connection (esp. in the case where we're passing opaque sasl + // data over the wire since there's no protocol info). + return SASLAuthenticationFailed + default: + return err + } + + completed, state, err = sess.Next(ctx, challenge) + if err != nil { + return fmt.Errorf("SASL authentication process has failed: %w", err) + } + } + + return nil +} + +func (d *Dialer) dialContext(ctx context.Context, network string, addr string) (net.Conn, error) { + address, err := lookupHost(ctx, addr, d.Resolver) + if err != nil { + return nil, fmt.Errorf("failed to resolve host: %w", err) + } + + dial := d.DialFunc + if dial == nil { + dial = (&net.Dialer{ + LocalAddr: d.LocalAddr, + DualStack: d.DualStack, + FallbackDelay: d.FallbackDelay, + KeepAlive: d.KeepAlive, + }).DialContext + } + + conn, err := dial(ctx, network, address) + if err != nil { + return nil, fmt.Errorf("failed to open connection to %s: %w", address, err) + } + + if d.TLS != nil { + c := d.TLS + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if c.ServerName == "" { + c = d.TLS.Clone() + // Copied from tls.go in the standard library. + colonPos := strings.LastIndex(address, ":") + if colonPos == -1 { + colonPos = len(address) + } + hostname := address[:colonPos] + c.ServerName = hostname + } + return d.connectTLS(ctx, conn, c) + } + + return conn, nil +} + +// DefaultDialer is the default dialer used when none is specified. +var DefaultDialer = &Dialer{ + Timeout: 10 * time.Second, + DualStack: true, +} + +// Dial is a convenience wrapper for DefaultDialer.Dial. +func Dial(network string, address string) (*Conn, error) { + return DefaultDialer.Dial(network, address) +} + +// DialContext is a convenience wrapper for DefaultDialer.DialContext. +func DialContext(ctx context.Context, network string, address string) (*Conn, error) { + return DefaultDialer.DialContext(ctx, network, address) +} + +// DialLeader is a convenience wrapper for DefaultDialer.DialLeader. +func DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { + return DefaultDialer.DialLeader(ctx, network, address, topic, partition) +} + +// DialPartition is a convenience wrapper for DefaultDialer.DialPartition. +func DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return DefaultDialer.DialPartition(ctx, network, address, partition) +} + +// LookupPartition is a convenience wrapper for DefaultDialer.LookupPartition. +func LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { + return DefaultDialer.LookupPartition(ctx, network, address, topic, partition) +} + +// LookupPartitions is a convenience wrapper for DefaultDialer.LookupPartitions. +func LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { + return DefaultDialer.LookupPartitions(ctx, network, address, topic) +} + +func sleep(ctx context.Context, duration time.Duration) bool { + if duration == 0 { + select { + default: + return true + case <-ctx.Done(): + return false + } + } + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case <-timer.C: + return true + case <-ctx.Done(): + return false + } +} + +func backoff(attempt int, min time.Duration, max time.Duration) time.Duration { + d := time.Duration(attempt*attempt) * min + if d > max { + d = max + } + return d +} + +func canonicalAddress(s string) string { + return net.JoinHostPort(splitHostPort(s)) +} + +func splitHostPort(s string) (host string, port string) { + host, port, _ = net.SplitHostPort(s) + if len(host) == 0 && len(port) == 0 { + host = s + port = "9092" + } + return +} + +func splitHostPortNumber(s string) (host string, portNumber int, err error) { + host, port := splitHostPort(s) + portNumber, err = strconv.Atoi(port) + if err != nil { + return host, 0, fmt.Errorf("%s: %w", s, err) + } + return host, portNumber, nil +} + +func lookupHost(ctx context.Context, address string, resolver Resolver) (string, error) { + host, port := splitHostPort(address) + + if resolver != nil { + resolved, err := resolver.LookupHost(ctx, host) + if err != nil { + return "", fmt.Errorf("failed to resolve host %s: %w", host, err) + } + + // if the resolver doesn't return anything, we'll fall back on the provided + // address instead + if len(resolved) > 0 { + resolvedHost, resolvedPort := splitHostPort(resolved[0]) + + // we'll always prefer the resolved host + host = resolvedHost + + // in the case of port though, the provided address takes priority, and we + // only use the resolved address to set the port when not specified + if port == "" { + port = resolvedPort + } + } + } + + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/github.com/segmentio/kafka-go/discard.go b/vendor/github.com/segmentio/kafka-go/discard.go new file mode 100644 index 000000000..0cb1be9d0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/discard.go @@ -0,0 +1,38 @@ +package kafka + +import "bufio" + +func discardN(r *bufio.Reader, sz int, n int) (int, error) { + var err error + if n <= sz { + n, err = r.Discard(n) + } else { + n, err = r.Discard(sz) + if err == nil { + err = errShortRead + } + } + return sz - n, err +} + +func discardInt32(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 4) +} + +func discardString(r *bufio.Reader, sz int) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} + +func discardBytes(r *bufio.Reader, sz int) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml b/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml new file mode 100644 index 000000000..6feb1844b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml @@ -0,0 +1,32 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:2.12-2.4.1 + restart: on-failure:3 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_VERSION: '2.4.1' + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml b/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml new file mode 100644 index 000000000..56123f85c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml @@ -0,0 +1,29 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:0.10.1.1 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose.yml b/vendor/github.com/segmentio/kafka-go/docker-compose.yml new file mode 100644 index 000000000..dc0c2e85e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:2.12-2.3.1 + restart: on-failure:3 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_VERSION: '2.3.1' + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' + KAFKA_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer' + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/electleaders.go b/vendor/github.com/segmentio/kafka-go/electleaders.go new file mode 100644 index 000000000..2dd63b73d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/electleaders.go @@ -0,0 +1,89 @@ +package kafka + +import ( + "context" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/electleaders" +) + +// ElectLeadersRequest is a request to the ElectLeaders API. +type ElectLeadersRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // Topic is the name of the topic to do the leader elections in. + Topic string + + // Partitions is the list of partitions to run leader elections for. + Partitions []int + + // Timeout is the amount of time to wait for the election to run. + Timeout time.Duration +} + +// ElectLeadersResponse is a response from the ElectLeaders API. +type ElectLeadersResponse struct { + // ErrorCode is set to a non-nil value if a top-level error occurred. + Error error + + // PartitionResults contains the results for each partition leader election. + PartitionResults []ElectLeadersResponsePartitionResult +} + +// ElectLeadersResponsePartitionResult contains the response details for a single partition. +type ElectLeadersResponsePartitionResult struct { + // Partition is the ID of the partition. + Partition int + + // Error is set to a non-nil value if an error occurred electing leaders + // for this partition. + Error error +} + +func (c *Client) ElectLeaders( + ctx context.Context, + req *ElectLeadersRequest, +) (*ElectLeadersResponse, error) { + partitions32 := []int32{} + for _, partition := range req.Partitions { + partitions32 = append(partitions32, int32(partition)) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + &electleaders.Request{ + TopicPartitions: []electleaders.RequestTopicPartitions{ + { + Topic: req.Topic, + PartitionIDs: partitions32, + }, + }, + TimeoutMs: int32(req.Timeout.Milliseconds()), + }, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*electleaders.Response) + + resp := &ElectLeadersResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + + for _, topicResult := range apiResp.ReplicaElectionResults { + for _, partitionResult := range topicResult.PartitionResults { + resp.PartitionResults = append( + resp.PartitionResults, + ElectLeadersResponsePartitionResult{ + Partition: int(partitionResult.PartitionID), + Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage), + }, + ) + } + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/endtxn.go b/vendor/github.com/segmentio/kafka-go/endtxn.go new file mode 100644 index 000000000..ebfeab2ee --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/endtxn.go @@ -0,0 +1,61 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/endtxn" +) + +// EndTxnRequest represets a request sent to a kafka broker to end a transaction. +type EndTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // The Producer ID (PID) for the current producer session + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // Committed should be set to true if the transaction was committed, false otherwise. + Committed bool +} + +// EndTxnResponse represents a resposne from a kafka broker to an end transaction request. +type EndTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Error is non-nil if an error occureda and contains the kafka error code. + // Programs may use the standard errors.Is function to test the error + // against kafka error codes. + Error error +} + +// EndTxn sends an EndTxn request to a kafka broker and returns its response. +func (c *Client) EndTxn(ctx context.Context, req *EndTxnRequest) (*EndTxnResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &endtxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + Committed: req.Committed, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).EndTxn: %w", err) + } + + r := m.(*endtxn.Response) + + res := &EndTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Error: makeError(r.ErrorCode, ""), + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/error.go b/vendor/github.com/segmentio/kafka-go/error.go new file mode 100644 index 000000000..4a7a8a278 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/error.go @@ -0,0 +1,712 @@ +package kafka + +import ( + "errors" + "fmt" + "io" + "syscall" +) + +// Error represents the different error codes that may be returned by kafka. +// https://kafka.apache.org/protocol#protocol_error_codes +type Error int + +const ( + Unknown Error = -1 + OffsetOutOfRange Error = 1 + InvalidMessage Error = 2 + UnknownTopicOrPartition Error = 3 + InvalidMessageSize Error = 4 + LeaderNotAvailable Error = 5 + NotLeaderForPartition Error = 6 + RequestTimedOut Error = 7 + BrokerNotAvailable Error = 8 + ReplicaNotAvailable Error = 9 + MessageSizeTooLarge Error = 10 + StaleControllerEpoch Error = 11 + OffsetMetadataTooLarge Error = 12 + NetworkException Error = 13 + GroupLoadInProgress Error = 14 + GroupCoordinatorNotAvailable Error = 15 + NotCoordinatorForGroup Error = 16 + InvalidTopic Error = 17 + RecordListTooLarge Error = 18 + NotEnoughReplicas Error = 19 + NotEnoughReplicasAfterAppend Error = 20 + InvalidRequiredAcks Error = 21 + IllegalGeneration Error = 22 + InconsistentGroupProtocol Error = 23 + InvalidGroupId Error = 24 + UnknownMemberId Error = 25 + InvalidSessionTimeout Error = 26 + RebalanceInProgress Error = 27 + InvalidCommitOffsetSize Error = 28 + TopicAuthorizationFailed Error = 29 + GroupAuthorizationFailed Error = 30 + ClusterAuthorizationFailed Error = 31 + InvalidTimestamp Error = 32 + UnsupportedSASLMechanism Error = 33 + IllegalSASLState Error = 34 + UnsupportedVersion Error = 35 + TopicAlreadyExists Error = 36 + InvalidPartitionNumber Error = 37 + InvalidReplicationFactor Error = 38 + InvalidReplicaAssignment Error = 39 + InvalidConfiguration Error = 40 + NotController Error = 41 + InvalidRequest Error = 42 + UnsupportedForMessageFormat Error = 43 + PolicyViolation Error = 44 + OutOfOrderSequenceNumber Error = 45 + DuplicateSequenceNumber Error = 46 + InvalidProducerEpoch Error = 47 + InvalidTransactionState Error = 48 + InvalidProducerIDMapping Error = 49 + InvalidTransactionTimeout Error = 50 + ConcurrentTransactions Error = 51 + TransactionCoordinatorFenced Error = 52 + TransactionalIDAuthorizationFailed Error = 53 + SecurityDisabled Error = 54 + BrokerAuthorizationFailed Error = 55 + KafkaStorageError Error = 56 + LogDirNotFound Error = 57 + SASLAuthenticationFailed Error = 58 + UnknownProducerId Error = 59 + ReassignmentInProgress Error = 60 + DelegationTokenAuthDisabled Error = 61 + DelegationTokenNotFound Error = 62 + DelegationTokenOwnerMismatch Error = 63 + DelegationTokenRequestNotAllowed Error = 64 + DelegationTokenAuthorizationFailed Error = 65 + DelegationTokenExpired Error = 66 + InvalidPrincipalType Error = 67 + NonEmptyGroup Error = 68 + GroupIdNotFound Error = 69 + FetchSessionIDNotFound Error = 70 + InvalidFetchSessionEpoch Error = 71 + ListenerNotFound Error = 72 + TopicDeletionDisabled Error = 73 + FencedLeaderEpoch Error = 74 + UnknownLeaderEpoch Error = 75 + UnsupportedCompressionType Error = 76 + StaleBrokerEpoch Error = 77 + OffsetNotAvailable Error = 78 + MemberIDRequired Error = 79 + PreferredLeaderNotAvailable Error = 80 + GroupMaxSizeReached Error = 81 + FencedInstanceID Error = 82 + EligibleLeadersNotAvailable Error = 83 + ElectionNotNeeded Error = 84 + NoReassignmentInProgress Error = 85 + GroupSubscribedToTopic Error = 86 + InvalidRecord Error = 87 + UnstableOffsetCommit Error = 88 + ThrottlingQuotaExceeded Error = 89 + ProducerFenced Error = 90 + ResourceNotFound Error = 91 + DuplicateResource Error = 92 + UnacceptableCredential Error = 93 + InconsistentVoterSet Error = 94 + InvalidUpdateVersion Error = 95 + FeatureUpdateFailed Error = 96 + PrincipalDeserializationFailure Error = 97 + SnapshotNotFound Error = 98 + PositionOutOfRange Error = 99 + UnknownTopicID Error = 100 + DuplicateBrokerRegistration Error = 101 + BrokerIDNotRegistered Error = 102 + InconsistentTopicID Error = 103 + InconsistentClusterID Error = 104 + TransactionalIDNotFound Error = 105 + FetchSessionTopicIDError Error = 106 +) + +// Error satisfies the error interface. +func (e Error) Error() string { + return fmt.Sprintf("[%d] %s: %s", e, e.Title(), e.Description()) +} + +// Timeout returns true if the error was due to a timeout. +func (e Error) Timeout() bool { + return e == RequestTimedOut +} + +// Temporary returns true if the operation that generated the error may succeed +// if retried at a later time. +// Kafka error documentation specifies these as "retriable" +// https://kafka.apache.org/protocol#protocol_error_codes +func (e Error) Temporary() bool { + switch e { + case InvalidMessage, + UnknownTopicOrPartition, + LeaderNotAvailable, + NotLeaderForPartition, + RequestTimedOut, + NetworkException, + GroupLoadInProgress, + GroupCoordinatorNotAvailable, + NotCoordinatorForGroup, + NotEnoughReplicas, + NotEnoughReplicasAfterAppend, + NotController, + KafkaStorageError, + FetchSessionIDNotFound, + InvalidFetchSessionEpoch, + ListenerNotFound, + FencedLeaderEpoch, + UnknownLeaderEpoch, + OffsetNotAvailable, + PreferredLeaderNotAvailable, + EligibleLeadersNotAvailable, + ElectionNotNeeded, + NoReassignmentInProgress, + GroupSubscribedToTopic, + UnstableOffsetCommit, + ThrottlingQuotaExceeded, + UnknownTopicID, + InconsistentTopicID, + FetchSessionTopicIDError: + return true + default: + return false + } +} + +// Title returns a human readable title for the error. +func (e Error) Title() string { + switch e { + case Unknown: + return "Unknown" + case OffsetOutOfRange: + return "Offset Out Of Range" + case InvalidMessage: + return "Invalid Message" + case UnknownTopicOrPartition: + return "Unknown Topic Or Partition" + case InvalidMessageSize: + return "Invalid Message Size" + case LeaderNotAvailable: + return "Leader Not Available" + case NotLeaderForPartition: + return "Not Leader For Partition" + case RequestTimedOut: + return "Request Timed Out" + case BrokerNotAvailable: + return "Broker Not Available" + case ReplicaNotAvailable: + return "Replica Not Available" + case MessageSizeTooLarge: + return "Message Size Too Large" + case StaleControllerEpoch: + return "Stale Controller Epoch" + case OffsetMetadataTooLarge: + return "Offset Metadata Too Large" + case GroupLoadInProgress: + return "Group Load In Progress" + case GroupCoordinatorNotAvailable: + return "Group Coordinator Not Available" + case NotCoordinatorForGroup: + return "Not Coordinator For Group" + case InvalidTopic: + return "Invalid Topic" + case RecordListTooLarge: + return "Record List Too Large" + case NotEnoughReplicas: + return "Not Enough Replicas" + case NotEnoughReplicasAfterAppend: + return "Not Enough Replicas After Append" + case InvalidRequiredAcks: + return "Invalid Required Acks" + case IllegalGeneration: + return "Illegal Generation" + case InconsistentGroupProtocol: + return "Inconsistent Group Protocol" + case InvalidGroupId: + return "Invalid Group ID" + case UnknownMemberId: + return "Unknown Member ID" + case InvalidSessionTimeout: + return "Invalid Session Timeout" + case RebalanceInProgress: + return "Rebalance In Progress" + case InvalidCommitOffsetSize: + return "Invalid Commit Offset Size" + case TopicAuthorizationFailed: + return "Topic Authorization Failed" + case GroupAuthorizationFailed: + return "Group Authorization Failed" + case ClusterAuthorizationFailed: + return "Cluster Authorization Failed" + case InvalidTimestamp: + return "Invalid Timestamp" + case UnsupportedSASLMechanism: + return "Unsupported SASL Mechanism" + case IllegalSASLState: + return "Illegal SASL State" + case UnsupportedVersion: + return "Unsupported Version" + case TopicAlreadyExists: + return "Topic Already Exists" + case InvalidPartitionNumber: + return "Invalid Partition Number" + case InvalidReplicationFactor: + return "Invalid Replication Factor" + case InvalidReplicaAssignment: + return "Invalid Replica Assignment" + case InvalidConfiguration: + return "Invalid Configuration" + case NotController: + return "Not Controller" + case InvalidRequest: + return "Invalid Request" + case UnsupportedForMessageFormat: + return "Unsupported For Message Format" + case PolicyViolation: + return "Policy Violation" + case OutOfOrderSequenceNumber: + return "Out Of Order Sequence Number" + case DuplicateSequenceNumber: + return "Duplicate Sequence Number" + case InvalidProducerEpoch: + return "Invalid Producer Epoch" + case InvalidTransactionState: + return "Invalid Transaction State" + case InvalidProducerIDMapping: + return "Invalid Producer ID Mapping" + case InvalidTransactionTimeout: + return "Invalid Transaction Timeout" + case ConcurrentTransactions: + return "Concurrent Transactions" + case TransactionCoordinatorFenced: + return "Transaction Coordinator Fenced" + case TransactionalIDAuthorizationFailed: + return "Transactional ID Authorization Failed" + case SecurityDisabled: + return "Security Disabled" + case BrokerAuthorizationFailed: + return "Broker Authorization Failed" + case KafkaStorageError: + return "Kafka Storage Error" + case LogDirNotFound: + return "Log Dir Not Found" + case SASLAuthenticationFailed: + return "SASL Authentication Failed" + case UnknownProducerId: + return "Unknown Producer ID" + case ReassignmentInProgress: + return "Reassignment In Progress" + case DelegationTokenAuthDisabled: + return "Delegation Token Auth Disabled" + case DelegationTokenNotFound: + return "Delegation Token Not Found" + case DelegationTokenOwnerMismatch: + return "Delegation Token Owner Mismatch" + case DelegationTokenRequestNotAllowed: + return "Delegation Token Request Not Allowed" + case DelegationTokenAuthorizationFailed: + return "Delegation Token Authorization Failed" + case DelegationTokenExpired: + return "Delegation Token Expired" + case InvalidPrincipalType: + return "Invalid Principal Type" + case NonEmptyGroup: + return "Non Empty Group" + case GroupIdNotFound: + return "Group ID Not Found" + case FetchSessionIDNotFound: + return "Fetch Session ID Not Found" + case InvalidFetchSessionEpoch: + return "Invalid Fetch Session Epoch" + case ListenerNotFound: + return "Listener Not Found" + case TopicDeletionDisabled: + return "Topic Deletion Disabled" + case FencedLeaderEpoch: + return "Fenced Leader Epoch" + case UnknownLeaderEpoch: + return "Unknown Leader Epoch" + case UnsupportedCompressionType: + return "Unsupported Compression Type" + case MemberIDRequired: + return "Member ID Required" + case EligibleLeadersNotAvailable: + return "Eligible Leader Not Available" + case ElectionNotNeeded: + return "Election Not Needed" + case NoReassignmentInProgress: + return "No Reassignment In Progress" + case GroupSubscribedToTopic: + return "Group Subscribed To Topic" + case InvalidRecord: + return "Invalid Record" + case UnstableOffsetCommit: + return "Unstable Offset Commit" + case ThrottlingQuotaExceeded: + return "Throttling Quota Exceeded" + case ProducerFenced: + return "Producer Fenced" + case ResourceNotFound: + return "Resource Not Found" + case DuplicateResource: + return "Duplicate Resource" + case UnacceptableCredential: + return "Unacceptable Credential" + case InconsistentVoterSet: + return "Inconsistent Voter Set" + case InvalidUpdateVersion: + return "Invalid Update Version" + case FeatureUpdateFailed: + return "Feature Update Failed" + case PrincipalDeserializationFailure: + return "Principal Deserialization Failure" + case SnapshotNotFound: + return "Snapshot Not Found" + case PositionOutOfRange: + return "Position Out Of Range" + case UnknownTopicID: + return "Unknown Topic ID" + case DuplicateBrokerRegistration: + return "Duplicate Broker Registration" + case BrokerIDNotRegistered: + return "Broker ID Not Registered" + case InconsistentTopicID: + return "Inconsistent Topic ID" + case InconsistentClusterID: + return "Inconsistent Cluster ID" + case TransactionalIDNotFound: + return "Transactional ID Not Found" + case FetchSessionTopicIDError: + return "Fetch Session Topic ID Error" + } + return "" +} + +// Description returns a human readable description of cause of the error. +func (e Error) Description() string { + switch e { + case Unknown: + return "an unexpected server error occurred" + case OffsetOutOfRange: + return "the requested offset is outside the range of offsets maintained by the server for the given topic/partition" + case InvalidMessage: + return "the message contents does not match its CRC" + case UnknownTopicOrPartition: + return "the request is for a topic or partition that does not exist on this broker" + case InvalidMessageSize: + return "the message has a negative size" + case LeaderNotAvailable: + return "the cluster is in the middle of a leadership election and there is currently no leader for this partition and hence it is unavailable for writes" + case NotLeaderForPartition: + return "the client attempted to send messages to a replica that is not the leader for some partition, the client's metadata are likely out of date" + case RequestTimedOut: + return "the request exceeded the user-specified time limit in the request" + case BrokerNotAvailable: + return "not a client facing error and is used mostly by tools when a broker is not alive" + case ReplicaNotAvailable: + return "a replica is expected on a broker, but is not (this can be safely ignored)" + case MessageSizeTooLarge: + return "the server has a configurable maximum message size to avoid unbounded memory allocation and the client attempted to produce a message larger than this maximum" + case StaleControllerEpoch: + return "internal error code for broker-to-broker communication" + case OffsetMetadataTooLarge: + return "the client specified a string larger than configured maximum for offset metadata" + case GroupLoadInProgress: + return "the broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition), or in response to group membership requests (such as heartbeats) when group metadata is being loaded by the coordinator" + case GroupCoordinatorNotAvailable: + return "the broker returns this error code for group coordinator requests, offset commits, and most group management requests if the offsets topic has not yet been created, or if the group coordinator is not active" + case NotCoordinatorForGroup: + return "the broker returns this error code if it receives an offset fetch or commit request for a group that it is not a coordinator for" + case InvalidTopic: + return "a request which attempted to access an invalid topic (e.g. one which has an illegal name), or if an attempt was made to write to an internal topic (such as the consumer offsets topic)" + case RecordListTooLarge: + return "a message batch in a produce request exceeds the maximum configured segment size" + case NotEnoughReplicas: + return "the number of in-sync replicas is lower than the configured minimum and requiredAcks is -1" + case NotEnoughReplicasAfterAppend: + return "the message was written to the log, but with fewer in-sync replicas than required." + case InvalidRequiredAcks: + return "the requested requiredAcks is invalid (anything other than -1, 1, or 0)" + case IllegalGeneration: + return "the generation id provided in the request is not the current generation" + case InconsistentGroupProtocol: + return "the member provided a protocol type or set of protocols which is not compatible with the current group" + case InvalidGroupId: + return "the group id is empty or null" + case UnknownMemberId: + return "the member id is not in the current generation" + case InvalidSessionTimeout: + return "the requested session timeout is outside of the allowed range on the broker" + case RebalanceInProgress: + return "the coordinator has begun rebalancing the group, the client should rejoin the group" + case InvalidCommitOffsetSize: + return "an offset commit was rejected because of oversize metadata" + case TopicAuthorizationFailed: + return "the client is not authorized to access the requested topic" + case GroupAuthorizationFailed: + return "the client is not authorized to access a particular group id" + case ClusterAuthorizationFailed: + return "the client is not authorized to use an inter-broker or administrative API" + case InvalidTimestamp: + return "the timestamp of the message is out of acceptable range" + case UnsupportedSASLMechanism: + return "the broker does not support the requested SASL mechanism" + case IllegalSASLState: + return "the request is not valid given the current SASL state" + case UnsupportedVersion: + return "the version of API is not supported" + case TopicAlreadyExists: + return "a topic with this name already exists" + case InvalidPartitionNumber: + return "the number of partitions is invalid" + case InvalidReplicationFactor: + return "the replication-factor is invalid" + case InvalidReplicaAssignment: + return "the replica assignment is invalid" + case InvalidConfiguration: + return "the configuration is invalid" + case NotController: + return "this is not the correct controller for this cluster" + case InvalidRequest: + return "this most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker, se the broker logs for more details" + case UnsupportedForMessageFormat: + return "the message format version on the broker does not support the request" + case PolicyViolation: + return "the request parameters do not satisfy the configured policy" + case OutOfOrderSequenceNumber: + return "the broker received an out of order sequence number" + case DuplicateSequenceNumber: + return "the broker received a duplicate sequence number" + case InvalidProducerEpoch: + return "the producer attempted an operation with an old epoch, either there is a newer producer with the same transactional ID, or the producer's transaction has been expired by the broker" + case InvalidTransactionState: + return "the producer attempted a transactional operation in an invalid state" + case InvalidProducerIDMapping: + return "the producer attempted to use a producer id which is not currently assigned to its transactional ID" + case InvalidTransactionTimeout: + return "the transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" + case ConcurrentTransactions: + return "the producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" + case TransactionCoordinatorFenced: + return "the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" + case TransactionalIDAuthorizationFailed: + return "the transactional ID authorization failed" + case SecurityDisabled: + return "the security features are disabled" + case BrokerAuthorizationFailed: + return "the broker authorization failed" + case KafkaStorageError: + return "disk error when trying to access log file on the disk" + case LogDirNotFound: + return "the user-specified log directory is not found in the broker config" + case SASLAuthenticationFailed: + return "SASL Authentication failed" + case UnknownProducerId: + return "the broker could not locate the producer metadata associated with the producer ID" + case ReassignmentInProgress: + return "a partition reassignment is in progress" + case DelegationTokenAuthDisabled: + return "delegation token feature is not enabled" + case DelegationTokenNotFound: + return "delegation token is not found on server" + case DelegationTokenOwnerMismatch: + return "specified principal is not valid owner/renewer" + case DelegationTokenRequestNotAllowed: + return "delegation token requests are not allowed on plaintext/1-way ssl channels and on delegation token authenticated channels" + case DelegationTokenAuthorizationFailed: + return "delegation token authorization failed" + case DelegationTokenExpired: + return "delegation token is expired" + case InvalidPrincipalType: + return "supplied principaltype is not supported" + case NonEmptyGroup: + return "the group is not empty" + case GroupIdNotFound: + return "the group ID does not exist" + case FetchSessionIDNotFound: + return "the fetch session ID was not found" + case InvalidFetchSessionEpoch: + return "the fetch session epoch is invalid" + case ListenerNotFound: + return "there is no listener on the leader broker that matches the listener on which metadata request was processed" + case TopicDeletionDisabled: + return "topic deletion is disabled" + case FencedLeaderEpoch: + return "the leader epoch in the request is older than the epoch on the broker" + case UnknownLeaderEpoch: + return "the leader epoch in the request is newer than the epoch on the broker" + case UnsupportedCompressionType: + return "the requesting client does not support the compression type of given partition" + case MemberIDRequired: + return "the group member needs to have a valid member id before actually entering a consumer group" + case EligibleLeadersNotAvailable: + return "eligible topic partition leaders are not available" + case ElectionNotNeeded: + return "leader election not needed for topic partition" + case NoReassignmentInProgress: + return "no partition reassignment is in progress" + case GroupSubscribedToTopic: + return "deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it" + case InvalidRecord: + return "this record has failed the validation on broker and hence be rejected" + case UnstableOffsetCommit: + return "there are unstable offsets that need to be cleared" + case ThrottlingQuotaExceeded: + return "The throttling quota has been exceeded" + case ProducerFenced: + return "There is a newer producer with the same transactionalId which fences the current one" + case ResourceNotFound: + return "A request illegally referred to a resource that does not exist" + case DuplicateResource: + return "A request illegally referred to the same resource twice" + case UnacceptableCredential: + return "Requested credential would not meet criteria for acceptability" + case InconsistentVoterSet: + return "Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters" + case InvalidUpdateVersion: + return "The given update version was invalid" + case FeatureUpdateFailed: + return "Unable to update finalized features due to an unexpected server error" + case PrincipalDeserializationFailure: + return "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup" + case SnapshotNotFound: + return "Requested snapshot was not found" + case PositionOutOfRange: + return "Requested position is not greater than or equal to zero, and less than the size of the snapshot" + case UnknownTopicID: + return "This server does not host this topic ID" + case DuplicateBrokerRegistration: + return "This broker ID is already in use" + case BrokerIDNotRegistered: + return "The given broker ID was not registered" + case InconsistentTopicID: + return "The log's topic ID did not match the topic ID in the request" + case InconsistentClusterID: + return "The clusterId in the request does not match that found on the server" + case TransactionalIDNotFound: + return "The transactionalId could not be found" + case FetchSessionTopicIDError: + return "The fetch session encountered inconsistent topic ID usage" + } + return "" +} + +func isTimeout(err error) bool { + var timeoutError interface{ Timeout() bool } + if errors.As(err, &timeoutError) { + return timeoutError.Timeout() + } + return false +} + +func isTemporary(err error) bool { + var tempError interface{ Temporary() bool } + if errors.As(err, &tempError) { + return tempError.Temporary() + } + return false +} + +func isTransientNetworkError(err error) bool { + return errors.Is(err, io.ErrUnexpectedEOF) || + errors.Is(err, syscall.ECONNREFUSED) || + errors.Is(err, syscall.ECONNRESET) || + errors.Is(err, syscall.EPIPE) +} + +func silentEOF(err error) error { + if errors.Is(err, io.EOF) { + err = nil + } + return err +} + +func dontExpectEOF(err error) error { + if errors.Is(err, io.EOF) { + return io.ErrUnexpectedEOF + } + return err +} + +func coalesceErrors(errs ...error) error { + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +type MessageTooLargeError struct { + Message Message + Remaining []Message +} + +func messageTooLarge(msgs []Message, i int) MessageTooLargeError { + remain := make([]Message, 0, len(msgs)-1) + remain = append(remain, msgs[:i]...) + remain = append(remain, msgs[i+1:]...) + return MessageTooLargeError{ + Message: msgs[i], + Remaining: remain, + } +} + +func (e MessageTooLargeError) Error() string { + return MessageSizeTooLarge.Error() +} + +func makeError(code int16, message string) error { + if code == 0 { + return nil + } + if message == "" { + return Error(code) + } + return fmt.Errorf("%w: %s", Error(code), message) +} + +// WriteError is returned by kafka.(*Writer).WriteMessages when the writer is +// not configured to write messages asynchronously. WriteError values contain +// a list of errors where each entry matches the position of a message in the +// WriteMessages call. The program can determine the status of each message by +// looping over the error: +// +// switch err := w.WriteMessages(ctx, msgs...).(type) { +// case nil: +// case kafka.WriteErrors: +// for i := range msgs { +// if err[i] != nil { +// // handle the error writing msgs[i] +// ... +// } +// } +// default: +// // handle other errors +// ... +// } +type WriteErrors []error + +// Count counts the number of non-nil errors in err. +func (err WriteErrors) Count() int { + n := 0 + + for _, e := range err { + if e != nil { + n++ + } + } + + return n +} + +func (err WriteErrors) Error() string { + errCount := err.Count() + errors := make([]string, 0, errCount) + for _, writeError := range err { + if writeError == nil { + continue + } + errors = append(errors, writeError.Error()) + } + return fmt.Sprintf("Kafka write errors (%d/%d), errors: %v", errCount, len(err), errors) +} diff --git a/vendor/github.com/segmentio/kafka-go/fetch.go b/vendor/github.com/segmentio/kafka-go/fetch.go new file mode 100644 index 000000000..eafd0de88 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/fetch.go @@ -0,0 +1,289 @@ +package kafka + +import ( + "context" + "fmt" + "math" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + fetchAPI "github.com/segmentio/kafka-go/protocol/fetch" +) + +// FetchRequest represents a request sent to a kafka broker to retrieve records +// from a topic partition. +type FetchRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Topic, partition, and offset to retrieve records from. The offset may be + // one of the special FirstOffset or LastOffset constants, in which case the + // request will automatically discover the first or last offset of the + // partition and submit the request for these. + Topic string + Partition int + Offset int64 + + // Size and time limits of the response returned by the broker. + MinBytes int64 + MaxBytes int64 + MaxWait time.Duration + + // The isolation level for the request. + // + // Defaults to ReadUncommitted. + // + // This field requires the kafka broker to support the Fetch API in version + // 4 or above (otherwise the value is ignored). + IsolationLevel IsolationLevel +} + +// FetchResponse represents a response from a kafka broker to a fetch request. +type FetchResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The topic and partition that the response came for (will match the values + // in the request). + Topic string + Partition int + + // Information about the topic partition layout returned from the broker. + // + // LastStableOffset requires the kafka broker to support the Fetch API in + // version 4 or above (otherwise the value is zero). + // + /// LogStartOffset requires the kafka broker to support the Fetch API in + // version 5 or above (otherwise the value is zero). + HighWatermark int64 + LastStableOffset int64 + LogStartOffset int64 + + // An error that may have occurred while attempting to fetch the records. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error + + // The set of records returned in the response. + // + // The program is expected to call the RecordSet's Close method when it + // finished reading the records. + // + // Note that kafka may return record batches that start at an offset before + // the one that was requested. It is the program's responsibility to skip + // the offsets that it is not interested in. + Records RecordReader +} + +// Fetch sends a fetch request to a kafka broker and returns the response. +// +// If the broker returned an invalid response with no topics, an error wrapping +// protocol.ErrNoTopic is returned. +// +// If the broker returned an invalid response with no partitions, an error +// wrapping ErrNoPartitions is returned. +func (c *Client) Fetch(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { + timeout := c.timeout(ctx, math.MaxInt64) + maxWait := req.maxWait() + + if maxWait < timeout { + timeout = maxWait + } + + offset := req.Offset + switch offset { + case FirstOffset, LastOffset: + topic, partition := req.Topic, req.Partition + + r, err := c.ListOffsets(ctx, &ListOffsetsRequest{ + Addr: req.Addr, + Topics: map[string][]OffsetRequest{ + topic: {{ + Partition: partition, + Timestamp: offset, + }}, + }, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) + } + + for _, p := range r.Topics[topic] { + if p.Partition == partition { + if p.Error != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", p.Error) + } + switch offset { + case FirstOffset: + offset = p.FirstOffset + case LastOffset: + offset = p.LastOffset + } + break + } + } + } + + m, err := c.roundTrip(ctx, req.Addr, &fetchAPI.Request{ + ReplicaID: -1, + MaxWaitTime: milliseconds(timeout), + MinBytes: int32(req.MinBytes), + MaxBytes: int32(req.MaxBytes), + IsolationLevel: int8(req.IsolationLevel), + SessionID: -1, + SessionEpoch: -1, + Topics: []fetchAPI.RequestTopic{{ + Topic: req.Topic, + Partitions: []fetchAPI.RequestPartition{{ + Partition: int32(req.Partition), + CurrentLeaderEpoch: -1, + FetchOffset: offset, + LogStartOffset: -1, + PartitionMaxBytes: int32(req.MaxBytes), + }}, + }}, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) + } + + res := m.(*fetchAPI.Response) + if len(res.Topics) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoTopic) + } + topic := &res.Topics[0] + if len(topic.Partitions) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoPartition) + } + partition := &topic.Partitions[0] + + ret := &FetchResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topic: topic.Topic, + Partition: int(partition.Partition), + Error: makeError(res.ErrorCode, ""), + HighWatermark: partition.HighWatermark, + LastStableOffset: partition.LastStableOffset, + LogStartOffset: partition.LogStartOffset, + Records: partition.RecordSet.Records, + } + + if partition.ErrorCode != 0 { + ret.Error = makeError(partition.ErrorCode, "") + } + + if ret.Records == nil { + ret.Records = NewRecordReader() + } + + return ret, nil +} + +func (req *FetchRequest) maxWait() time.Duration { + if req.MaxWait > 0 { + return req.MaxWait + } + return defaultMaxWait +} + +type fetchRequestV2 struct { + ReplicaID int32 + MaxWaitTime int32 + MinBytes int32 + Topics []fetchRequestTopicV2 +} + +func (r fetchRequestV2) size() int32 { + return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r fetchRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeInt32(r.MaxWaitTime) + wb.writeInt32(r.MinBytes) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type fetchRequestTopicV2 struct { + TopicName string + Partitions []fetchRequestPartitionV2 +} + +func (t fetchRequestTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type fetchRequestPartitionV2 struct { + Partition int32 + FetchOffset int64 + MaxBytes int32 +} + +func (p fetchRequestPartitionV2) size() int32 { + return 4 + 8 + 4 +} + +func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.FetchOffset) + wb.writeInt32(p.MaxBytes) +} + +type fetchResponseV2 struct { + ThrottleTime int32 + Topics []fetchResponseTopicV2 +} + +func (r fetchResponseV2) size() int32 { + return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r fetchResponseV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTime) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type fetchResponseTopicV2 struct { + TopicName string + Partitions []fetchResponsePartitionV2 +} + +func (t fetchResponseTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type fetchResponsePartitionV2 struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + MessageSetSize int32 + MessageSet messageSet +} + +func (p fetchResponsePartitionV2) size() int32 { + return 4 + 2 + 8 + 4 + p.MessageSet.size() +} + +func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.HighwaterMarkOffset) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) +} diff --git a/vendor/github.com/segmentio/kafka-go/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/findcoordinator.go new file mode 100644 index 000000000..cbf07153d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/findcoordinator.go @@ -0,0 +1,170 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/findcoordinator" +) + +// CoordinatorKeyType is used to specify the type of coordinator to look for. +type CoordinatorKeyType int8 + +const ( + // CoordinatorKeyTypeConsumer type is used when looking for a Group coordinator. + CoordinatorKeyTypeConsumer CoordinatorKeyType = 0 + + // CoordinatorKeyTypeTransaction type is used when looking for a Transaction coordinator. + CoordinatorKeyTypeTransaction CoordinatorKeyType = 1 +) + +// FindCoordinatorRequest is the request structure for the FindCoordinator function. +type FindCoordinatorRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The coordinator key. + Key string + + // The coordinator key type. (Group, transaction, etc.) + KeyType CoordinatorKeyType +} + +// FindCoordinatorResponseCoordinator contains details about the found coordinator. +type FindCoordinatorResponseCoordinator struct { + // NodeID holds the broker id. + NodeID int + + // Host of the broker + Host string + + // Port on which broker accepts requests + Port int +} + +// FindCoordinatorResponse is the response structure for the FindCoordinator function. +type FindCoordinatorResponse struct { + // The Transaction/Group Coordinator details + Coordinator *FindCoordinatorResponseCoordinator + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to retrieve Coordinator + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. + Error error +} + +// FindCoordinator sends a findCoordinator request to a kafka broker and returns the +// response. +func (c *Client) FindCoordinator(ctx context.Context, req *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + + m, err := c.roundTrip(ctx, req.Addr, &findcoordinator.Request{ + Key: req.Key, + KeyType: int8(req.KeyType), + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).FindCoordinator: %w", err) + } + + res := m.(*findcoordinator.Response) + coordinator := &FindCoordinatorResponseCoordinator{ + NodeID: int(res.NodeID), + Host: res.Host, + Port: int(res.Port), + } + ret := &FindCoordinatorResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, res.ErrorMessage), + Coordinator: coordinator, + } + + return ret, nil +} + +// FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction +// +// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator +type findCoordinatorRequestV0 struct { + // CoordinatorKey holds id to use for finding the coordinator (for groups, this is + // the groupId, for transactional producers, this is the transactional id) + CoordinatorKey string +} + +func (t findCoordinatorRequestV0) size() int32 { + return sizeofString(t.CoordinatorKey) +} + +func (t findCoordinatorRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.CoordinatorKey) +} + +type findCoordinatorResponseCoordinatorV0 struct { + // NodeID holds the broker id. + NodeID int32 + + // Host of the broker + Host string + + // Port on which broker accepts requests + Port int32 +} + +func (t findCoordinatorResponseCoordinatorV0) size() int32 { + return sizeofInt32(t.NodeID) + + sizeofString(t.Host) + + sizeofInt32(t.Port) +} + +func (t findCoordinatorResponseCoordinatorV0) writeTo(wb *writeBuffer) { + wb.writeInt32(t.NodeID) + wb.writeString(t.Host) + wb.writeInt32(t.Port) +} + +func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.NodeID); err != nil { + return + } + if remain, err = readString(r, remain, &t.Host); err != nil { + return + } + if remain, err = readInt32(r, remain, &t.Port); err != nil { + return + } + return +} + +type findCoordinatorResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // Coordinator holds host and port information for the coordinator + Coordinator findCoordinatorResponseCoordinatorV0 +} + +func (t findCoordinatorResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + + t.Coordinator.size() +} + +func (t findCoordinatorResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + t.Coordinator.writeTo(wb) +} + +func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { + return + } + if remain, err = (&t.Coordinator).readFrom(r, remain); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/groupbalancer.go b/vendor/github.com/segmentio/kafka-go/groupbalancer.go new file mode 100644 index 000000000..9491bc501 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/groupbalancer.go @@ -0,0 +1,339 @@ +package kafka + +import ( + "sort" +) + +// GroupMember describes a single participant in a consumer group. +type GroupMember struct { + // ID is the unique ID for this member as taken from the JoinGroup response. + ID string + + // Topics is a list of topics that this member is consuming. + Topics []string + + // UserData contains any information that the GroupBalancer sent to the + // consumer group coordinator. + UserData []byte +} + +// GroupMemberAssignments holds MemberID => topic => partitions. +type GroupMemberAssignments map[string]map[string][]int + +// GroupBalancer encapsulates the client side rebalancing logic. +type GroupBalancer interface { + // ProtocolName of the GroupBalancer + ProtocolName() string + + // UserData provides the GroupBalancer an opportunity to embed custom + // UserData into the metadata. + // + // Will be used by JoinGroup to begin the consumer group handshake. + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest + UserData() ([]byte, error) + + // DefineMemberships returns which members will be consuming + // which topic partitions + AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments +} + +// RangeGroupBalancer groups consumers by partition +// +// Example: 5 partitions, 2 consumers +// C0: [0, 1, 2] +// C1: [3, 4] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 1] +// C1: [2, 3] +// C2: [4, 5] +// +type RangeGroupBalancer struct{} + +func (r RangeGroupBalancer) ProtocolName() string { + return "range" +} + +func (r RangeGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RangeGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + + for topic, members := range membersByTopic { + partitions := findPartitions(topic, topicPartitions) + partitionCount := len(partitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + minIndex := memberIndex * partitionCount / memberCount + maxIndex := (memberIndex + 1) * partitionCount / memberCount + + for partitionIndex, partition := range partitions { + if partitionIndex >= minIndex && partitionIndex < maxIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// RoundrobinGroupBalancer divides partitions evenly among consumers +// +// Example: 5 partitions, 2 consumers +// C0: [0, 2, 4] +// C1: [1, 3] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 3] +// C1: [1, 4] +// C2: [2, 5] +// +type RoundRobinGroupBalancer struct{} + +func (r RoundRobinGroupBalancer) ProtocolName() string { + return "roundrobin" +} + +func (r RoundRobinGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RoundRobinGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + for topic, members := range membersByTopic { + partitionIDs := findPartitions(topic, topicPartitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + for partitionIndex, partition := range partitionIDs { + if (partitionIndex % memberCount) == memberIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// RackAffinityGroupBalancer makes a best effort to pair up consumers with +// partitions whose leader is in the same rack. This strategy can have +// performance benefits by minimizing round trip latency between the consumer +// and the broker. In environments where network traffic across racks incurs +// charges (such as cross AZ data transfer in AWS), this strategy is also a cost +// optimization measure because it keeps network traffic within the local rack +// where possible. +// +// The primary objective is to spread partitions evenly across consumers with a +// secondary focus on maximizing the number of partitions where the leader and +// the consumer are in the same rack. For best affinity, it's recommended to +// have a balanced spread of consumers and partition leaders across racks. +// +// This balancer requires Kafka version 0.10.0.0+ or later. Earlier versions do +// not return the brokers' racks in the metadata request. +type RackAffinityGroupBalancer struct { + // Rack is the name of the rack where this consumer is running. It will be + // communicated to the consumer group leader via the UserData so that + // assignments can be made with affinity to the partition leader. + Rack string +} + +func (r RackAffinityGroupBalancer) ProtocolName() string { + return "rack-affinity" +} + +func (r RackAffinityGroupBalancer) AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments { + membersByTopic := make(map[string][]GroupMember) + for _, m := range members { + for _, t := range m.Topics { + membersByTopic[t] = append(membersByTopic[t], m) + } + } + + partitionsByTopic := make(map[string][]Partition) + for _, p := range partitions { + partitionsByTopic[p.Topic] = append(partitionsByTopic[p.Topic], p) + } + + assignments := GroupMemberAssignments{} + for topic := range membersByTopic { + topicAssignments := r.assignTopic(membersByTopic[topic], partitionsByTopic[topic]) + for member, parts := range topicAssignments { + memberAssignments, ok := assignments[member] + if !ok { + memberAssignments = make(map[string][]int) + assignments[member] = memberAssignments + } + memberAssignments[topic] = parts + } + } + return assignments +} + +func (r RackAffinityGroupBalancer) UserData() ([]byte, error) { + return []byte(r.Rack), nil +} + +func (r *RackAffinityGroupBalancer) assignTopic(members []GroupMember, partitions []Partition) map[string][]int { + zonedPartitions := make(map[string][]int) + for _, part := range partitions { + zone := part.Leader.Rack + zonedPartitions[zone] = append(zonedPartitions[zone], part.ID) + } + + zonedConsumers := make(map[string][]string) + for _, member := range members { + zone := string(member.UserData) + zonedConsumers[zone] = append(zonedConsumers[zone], member.ID) + } + + targetPerMember := len(partitions) / len(members) + remainder := len(partitions) % len(members) + assignments := make(map[string][]int) + + // assign as many as possible in zone. this will assign up to partsPerMember + // to each consumer. it will also prefer to allocate remainder partitions + // in zone if possible. + for zone, parts := range zonedPartitions { + consumers := zonedConsumers[zone] + if len(consumers) == 0 { + continue + } + + // don't over-allocate. cap partition assignments at the calculated + // target. + partsPerMember := len(parts) / len(consumers) + if partsPerMember > targetPerMember { + partsPerMember = targetPerMember + } + + for _, consumer := range consumers { + assignments[consumer] = append(assignments[consumer], parts[:partsPerMember]...) + parts = parts[partsPerMember:] + } + + // if we had enough partitions for each consumer in this zone to hit its + // target, attempt to use any leftover partitions to satisfy the total + // remainder by adding at most 1 partition per consumer. + leftover := len(parts) + if partsPerMember == targetPerMember { + if leftover > remainder { + leftover = remainder + } + if leftover > len(consumers) { + leftover = len(consumers) + } + remainder -= leftover + } + + // this loop covers the case where we're assigning extra partitions or + // if there weren't enough to satisfy the targetPerMember and the zoned + // partitions didn't divide evenly. + for i := 0; i < leftover; i++ { + assignments[consumers[i]] = append(assignments[consumers[i]], parts[i]) + } + parts = parts[leftover:] + + if len(parts) == 0 { + delete(zonedPartitions, zone) + } else { + zonedPartitions[zone] = parts + } + } + + // assign out remainders regardless of zone. + var remaining []int + for _, partitions := range zonedPartitions { + remaining = append(remaining, partitions...) + } + + for _, member := range members { + assigned := assignments[member.ID] + delta := targetPerMember - len(assigned) + // if it were possible to assign the remainder in zone, it's been taken + // care of already. now we will portion out any remainder to a member + // that can take it. + if delta >= 0 && remainder > 0 { + delta++ + remainder-- + } + if delta > 0 { + assignments[member.ID] = append(assigned, remaining[:delta]...) + remaining = remaining[delta:] + } + } + + return assignments +} + +// findPartitions extracts the partition ids associated with the topic from the +// list of Partitions provided. +func findPartitions(topic string, partitions []Partition) []int { + var ids []int + for _, partition := range partitions { + if partition.Topic == topic { + ids = append(ids, partition.ID) + } + } + return ids +} + +// findMembersByTopic groups the memberGroupMetadata by topic. +func findMembersByTopic(members []GroupMember) map[string][]GroupMember { + membersByTopic := map[string][]GroupMember{} + for _, member := range members { + for _, topic := range member.Topics { + membersByTopic[topic] = append(membersByTopic[topic], member) + } + } + + // normalize ordering of members to enabling grouping across topics by partitions + // + // Want: + // C0 [T0/P0, T1/P0] + // C1 [T0/P1, T1/P1] + // + // Not: + // C0 [T0/P0, T1/P1] + // C1 [T0/P1, T1/P0] + // + // Even though the later is still round robin, the partitions are crossed + // + for _, members := range membersByTopic { + sort.Slice(members, func(i, j int) bool { + return members[i].ID < members[j].ID + }) + } + + return membersByTopic +} + +// findGroupBalancer returns the GroupBalancer with the specified protocolName +// from the slice provided. +func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) { + for _, balancer := range balancers { + if balancer.ProtocolName() == protocolName { + return balancer, true + } + } + return nil, false +} diff --git a/vendor/github.com/segmentio/kafka-go/heartbeat.go b/vendor/github.com/segmentio/kafka-go/heartbeat.go new file mode 100644 index 000000000..a0444dae1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/heartbeat.go @@ -0,0 +1,109 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + heartbeatAPI "github.com/segmentio/kafka-go/protocol/heartbeat" +) + +// HeartbeatRequest represents a heartbeat sent to kafka to indicate consume liveness. +type HeartbeatRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // GroupID is the ID of the group. + GroupID string + + // GenerationID is the current generation for the group. + GenerationID int32 + + // MemberID is the ID of the group member. + MemberID string + + // GroupInstanceID is a unique identifier for the consumer. + GroupInstanceID string +} + +// HeartbeatResponse represents a response from a heartbeat request. +type HeartbeatResponse struct { + // Error is set to non-nil if an error occurred. + Error error + + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // Heartbeat API in version 1 or above. + Throttle time.Duration +} + +type heartbeatRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string +} + +// Heartbeat sends a heartbeat request to a kafka broker and returns the response. +func (c *Client) Heartbeat(ctx context.Context, req *HeartbeatRequest) (*HeartbeatResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &heartbeatAPI.Request{ + GroupID: req.GroupID, + GenerationID: req.GenerationID, + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Heartbeat: %w", err) + } + + res := m.(*heartbeatAPI.Response) + + ret := &HeartbeatResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + } + + if res.ErrorCode != 0 { + ret.Error = Error(res.ErrorCode) + } + + return ret, nil +} + +func (t heartbeatRequestV0) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) +} + +func (t heartbeatRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) +} + +type heartbeatResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t heartbeatResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) +} + +func (t heartbeatResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) +} + +func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go b/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go new file mode 100644 index 000000000..a14714d87 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go @@ -0,0 +1,133 @@ +package kafka + +import ( + "context" + "net" + + "github.com/segmentio/kafka-go/protocol/incrementalalterconfigs" +) + +type ConfigOperation int8 + +const ( + ConfigOperationSet ConfigOperation = 0 + ConfigOperationDelete ConfigOperation = 1 + ConfigOperationAppend ConfigOperation = 2 + ConfigOperationSubtract ConfigOperation = 3 +) + +// IncrementalAlterConfigsRequest is a request to the IncrementalAlterConfigs API. +type IncrementalAlterConfigsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // Resources contains the list of resources to update configs for. + Resources []IncrementalAlterConfigsRequestResource + + // ValidateOnly indicates whether Kafka should validate the changes without actually + // applying them. + ValidateOnly bool +} + +// IncrementalAlterConfigsRequestResource contains the details of a single resource type whose +// configs should be altered. +type IncrementalAlterConfigsRequestResource struct { + // ResourceType is the type of resource to update. + ResourceType ResourceType + + // ResourceName is the name of the resource to update (i.e., topic name or broker ID). + ResourceName string + + // Configs contains the list of config key/values to update. + Configs []IncrementalAlterConfigsRequestConfig +} + +// IncrementalAlterConfigsRequestConfig describes a single config key/value pair that should +// be altered. +type IncrementalAlterConfigsRequestConfig struct { + // Name is the name of the config. + Name string + + // Value is the value to set for this config. + Value string + + // ConfigOperation indicates how this config should be updated (e.g., add, delete, etc.). + ConfigOperation ConfigOperation +} + +// IncrementalAlterConfigsResponse is a response from the IncrementalAlterConfigs API. +type IncrementalAlterConfigsResponse struct { + // Resources contains details of each resource config that was updated. + Resources []IncrementalAlterConfigsResponseResource +} + +// IncrementalAlterConfigsResponseResource contains the response details for a single resource +// whose configs were updated. +type IncrementalAlterConfigsResponseResource struct { + // Error is set to a non-nil value if an error occurred while updating this specific + // config. + Error error + + // ResourceType is the type of resource that was updated. + ResourceType ResourceType + + // ResourceName is the name of the resource that was updated. + ResourceName string +} + +func (c *Client) IncrementalAlterConfigs( + ctx context.Context, + req *IncrementalAlterConfigsRequest, +) (*IncrementalAlterConfigsResponse, error) { + apiReq := &incrementalalterconfigs.Request{ + ValidateOnly: req.ValidateOnly, + } + + for _, res := range req.Resources { + apiRes := incrementalalterconfigs.RequestResource{ + ResourceType: int8(res.ResourceType), + ResourceName: res.ResourceName, + } + + for _, config := range res.Configs { + apiRes.Configs = append( + apiRes.Configs, + incrementalalterconfigs.RequestConfig{ + Name: config.Name, + Value: config.Value, + ConfigOperation: int8(config.ConfigOperation), + }, + ) + } + + apiReq.Resources = append( + apiReq.Resources, + apiRes, + ) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + + resp := &IncrementalAlterConfigsResponse{} + + apiResp := protoResp.(*incrementalalterconfigs.Response) + for _, res := range apiResp.Responses { + resp.Resources = append( + resp.Resources, + IncrementalAlterConfigsResponseResource{ + Error: makeError(res.ErrorCode, res.ErrorMessage), + ResourceType: ResourceType(res.ResourceType), + ResourceName: res.ResourceName, + }, + ) + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/initproducerid.go b/vendor/github.com/segmentio/kafka-go/initproducerid.go new file mode 100644 index 000000000..5cc6b8f24 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/initproducerid.go @@ -0,0 +1,82 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/initproducerid" +) + +// InitProducerIDRequest is the request structure for the InitProducerId function. +type InitProducerIDRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // Time after which a transaction should time out + TransactionTimeoutMs int + + // The Producer ID (PID). + // This is used to disambiguate requests if a transactional id is reused following its expiration. + // Only supported in version >=3 of the request, will be ignore otherwise. + ProducerID int + + // The producer's current epoch. + // This will be checked against the producer epoch on the broker, + // and the request will return an error if they do not match. + // Only supported in version >=3 of the request, will be ignore otherwise. + ProducerEpoch int +} + +// ProducerSession contains useful information about the producer session from the broker's response. +type ProducerSession struct { + // The Producer ID (PID) for the current producer session + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int +} + +// InitProducerIDResponse is the response structure for the InitProducerId function. +type InitProducerIDResponse struct { + // The Transaction/Group Coordinator details + Producer *ProducerSession + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to retrieve initProducerId + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. + Error error +} + +// InitProducerID sends a initProducerId request to a kafka broker and returns the +// response. +func (c *Client) InitProducerID(ctx context.Context, req *InitProducerIDRequest) (*InitProducerIDResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &initproducerid.Request{ + TransactionalID: req.TransactionalID, + TransactionTimeoutMs: int32(req.TransactionTimeoutMs), + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).InitProducerId: %w", err) + } + + res := m.(*initproducerid.Response) + + return &InitProducerIDResponse{ + Producer: &ProducerSession{ + ProducerID: int(res.ProducerID), + ProducerEpoch: int(res.ProducerEpoch), + }, + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, ""), + }, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/joingroup.go b/vendor/github.com/segmentio/kafka-go/joingroup.go new file mode 100644 index 000000000..30823a69a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/joingroup.go @@ -0,0 +1,377 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/consumer" + "github.com/segmentio/kafka-go/protocol/joingroup" +) + +// JoinGroupRequest is the request structure for the JoinGroup function. +type JoinGroupRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // GroupID of the group to join. + GroupID string + + // The duration after which the coordinator considers the consumer dead + // if it has not received a heartbeat. + SessionTimeout time.Duration + + // The duration the coordination will wait for each member to rejoin when rebalancing the group. + RebalanceTimeout time.Duration + + // The ID assigned by the group coordinator. + MemberID string + + // The unique identifier for the consumer instance. + GroupInstanceID string + + // The name for the class of protocols implemented by the group being joined. + ProtocolType string + + // The list of protocols the member supports. + Protocols []GroupProtocol +} + +// GroupProtocol represents a consumer group protocol. +type GroupProtocol struct { + // The protocol name. + Name string + + // The protocol metadata. + Metadata GroupProtocolSubscription +} + +type GroupProtocolSubscription struct { + // The Topics to subscribe to. + Topics []string + + // UserData assosiated with the subscription for the given protocol + UserData []byte + + // Partitions owned by this consumer. + OwnedPartitions map[string][]int +} + +// JoinGroupResponse is the response structure for the JoinGroup function. +type JoinGroupResponse struct { + // An error that may have occurred when attempting to join the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The generation ID of the group. + GenerationID int + + // The group protocol selected by the coordinatior. + ProtocolName string + + // The group protocol name. + ProtocolType string + + // The leader of the group. + LeaderID string + + // The group member ID. + MemberID string + + // The members of the group. + Members []JoinGroupResponseMember +} + +// JoinGroupResponseMember represents a group memmber in a reponse to a JoinGroup request. +type JoinGroupResponseMember struct { + // The group memmber ID. + ID string + + // The unique identifier of the consumer instance. + GroupInstanceID string + + // The group member metadata. + Metadata GroupProtocolSubscription +} + +// JoinGroup sends a join group request to the coordinator and returns the response. +func (c *Client) JoinGroup(ctx context.Context, req *JoinGroupRequest) (*JoinGroupResponse, error) { + joinGroup := joingroup.Request{ + GroupID: req.GroupID, + SessionTimeoutMS: int32(req.SessionTimeout.Milliseconds()), + RebalanceTimeoutMS: int32(req.RebalanceTimeout.Milliseconds()), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + ProtocolType: req.ProtocolType, + Protocols: make([]joingroup.RequestProtocol, 0, len(req.Protocols)), + } + + for _, proto := range req.Protocols { + protoMeta := consumer.Subscription{ + Version: consumer.MaxVersionSupported, + Topics: proto.Metadata.Topics, + UserData: proto.Metadata.UserData, + OwnedPartitions: make([]consumer.TopicPartition, 0, len(proto.Metadata.OwnedPartitions)), + } + for topic, partitions := range proto.Metadata.OwnedPartitions { + tp := consumer.TopicPartition{ + Topic: topic, + Partitions: make([]int32, 0, len(partitions)), + } + for _, partition := range partitions { + tp.Partitions = append(tp.Partitions, int32(partition)) + } + protoMeta.OwnedPartitions = append(protoMeta.OwnedPartitions, tp) + } + + metaBytes, err := protocol.Marshal(consumer.MaxVersionSupported, protoMeta) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + + joinGroup.Protocols = append(joinGroup.Protocols, joingroup.RequestProtocol{ + Name: proto.Name, + Metadata: metaBytes, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &joinGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + + r := m.(*joingroup.Response) + + res := &JoinGroupResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMS), + GenerationID: int(r.GenerationID), + ProtocolName: r.ProtocolName, + ProtocolType: r.ProtocolType, + LeaderID: r.LeaderID, + MemberID: r.MemberID, + Members: make([]JoinGroupResponseMember, 0, len(r.Members)), + } + + for _, member := range r.Members { + var meta consumer.Subscription + err = protocol.Unmarshal(member.Metadata, consumer.MaxVersionSupported, &meta) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + subscription := GroupProtocolSubscription{ + Topics: meta.Topics, + UserData: meta.UserData, + OwnedPartitions: make(map[string][]int, len(meta.OwnedPartitions)), + } + for _, owned := range meta.OwnedPartitions { + subscription.OwnedPartitions[owned.Topic] = make([]int, 0, len(owned.Partitions)) + for _, partition := range owned.Partitions { + subscription.OwnedPartitions[owned.Topic] = append(subscription.OwnedPartitions[owned.Topic], int(partition)) + } + } + res.Members = append(res.Members, JoinGroupResponseMember{ + ID: member.MemberID, + GroupInstanceID: member.GroupInstanceID, + Metadata: subscription, + }) + } + + return res, nil +} + +type groupMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (t groupMetadata) size() int32 { + return sizeofInt16(t.Version) + + sizeofStringArray(t.Topics) + + sizeofBytes(t.UserData) +} + +func (t groupMetadata) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeStringArray(t.Topics) + wb.writeBytes(t.UserData) +} + +func (t groupMetadata) bytes() []byte { + buf := bytes.NewBuffer(nil) + t.writeTo(&writeBuffer{w: buf}) + return buf.Bytes() +} + +func (t *groupMetadata) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.Version); err != nil { + return + } + if remain, err = readStringArray(r, remain, &t.Topics); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.UserData); err != nil { + return + } + return +} + +type joinGroupRequestGroupProtocolV1 struct { + ProtocolName string + ProtocolMetadata []byte +} + +func (t joinGroupRequestGroupProtocolV1) size() int32 { + return sizeofString(t.ProtocolName) + + sizeofBytes(t.ProtocolMetadata) +} + +func (t joinGroupRequestGroupProtocolV1) writeTo(wb *writeBuffer) { + wb.writeString(t.ProtocolName) + wb.writeBytes(t.ProtocolMetadata) +} + +type joinGroupRequestV1 struct { + // GroupID holds the unique group identifier + GroupID string + + // SessionTimeout holds the coordinator considers the consumer dead if it + // receives no heartbeat after this timeout in ms. + SessionTimeout int32 + + // RebalanceTimeout holds the maximum time that the coordinator will wait + // for each member to rejoin when rebalancing the group in ms + RebalanceTimeout int32 + + // MemberID assigned by the group coordinator or the zero string if joining + // for the first time. + MemberID string + + // ProtocolType holds the unique name for class of protocols implemented by group + ProtocolType string + + // GroupProtocols holds the list of protocols that the member supports + GroupProtocols []joinGroupRequestGroupProtocolV1 +} + +func (t joinGroupRequestV1) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.SessionTimeout) + + sizeofInt32(t.RebalanceTimeout) + + sizeofString(t.MemberID) + + sizeofString(t.ProtocolType) + + sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() }) +} + +func (t joinGroupRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.SessionTimeout) + wb.writeInt32(t.RebalanceTimeout) + wb.writeString(t.MemberID) + wb.writeString(t.ProtocolType) + wb.writeArray(len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(wb) }) +} + +type joinGroupResponseMemberV1 struct { + // MemberID assigned by the group coordinator + MemberID string + MemberMetadata []byte +} + +func (t joinGroupResponseMemberV1) size() int32 { + return sizeofString(t.MemberID) + + sizeofBytes(t.MemberMetadata) +} + +func (t joinGroupResponseMemberV1) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberMetadata) +} + +func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.MemberID); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil { + return + } + return +} + +type joinGroupResponseV1 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // GenerationID holds the generation of the group. + GenerationID int32 + + // GroupProtocol holds the group protocol selected by the coordinator + GroupProtocol string + + // LeaderID holds the leader of the group + LeaderID string + + // MemberID assigned by the group coordinator + MemberID string + Members []joinGroupResponseMemberV1 +} + +func (t joinGroupResponseV1) size() int32 { + return sizeofInt16(t.ErrorCode) + + sizeofInt32(t.GenerationID) + + sizeofString(t.GroupProtocol) + + sizeofString(t.LeaderID) + + sizeofString(t.MemberID) + + sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() }) +} + +func (t joinGroupResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeInt32(t.GenerationID) + wb.writeString(t.GroupProtocol) + wb.writeString(t.LeaderID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) }) +} + +func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { + return + } + if remain, err = readInt32(r, remain, &t.GenerationID); err != nil { + return + } + if remain, err = readString(r, remain, &t.GroupProtocol); err != nil { + return + } + if remain, err = readString(r, remain, &t.LeaderID); err != nil { + return + } + if remain, err = readString(r, remain, &t.MemberID); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var item joinGroupResponseMemberV1 + if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { + return + } + t.Members = append(t.Members, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/kafka.go b/vendor/github.com/segmentio/kafka-go/kafka.go new file mode 100644 index 000000000..d2d36e413 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/kafka.go @@ -0,0 +1,100 @@ +package kafka + +import "github.com/segmentio/kafka-go/protocol" + +// Broker represents a kafka broker in a kafka cluster. +type Broker struct { + Host string + Port int + ID int + Rack string +} + +// Topic represents a topic in a kafka cluster. +type Topic struct { + // Name of the topic. + Name string + + // True if the topic is internal. + Internal bool + + // The list of partition currently available on this topic. + Partitions []Partition + + // An error that may have occurred while attempting to read the topic + // metadata. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// Partition carries the metadata associated with a kafka partition. +type Partition struct { + // Name of the topic that the partition belongs to, and its index in the + // topic. + Topic string + ID int + + // Leader, replicas, and ISR for the partition. + // + // When no physical host is known to be running a broker, the Host and Port + // fields will be set to the zero values. The logical broker ID is always + // set to the value known to the kafka cluster, even if the broker is not + // currently backed by a physical host. + Leader Broker + Replicas []Broker + Isr []Broker + + // Available only with metadata API level >= 6: + OfflineReplicas []Broker + + // An error that may have occurred while attempting to read the partition + // metadata. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// Marshal encodes v into a binary representation of the value in the kafka data +// format. +// +// If v is a, or contains struct types, the kafka struct fields are interpreted +// and may contain one of these values: +// +// nullable valid on bytes and strings, encodes as a nullable value +// compact valid on strings, encodes as a compact string +// +// The kafka struct tags should not contain min and max versions. If you need to +// encode types based on specific versions of kafka APIs, use the Version type +// instead. +func Marshal(v interface{}) ([]byte, error) { + return protocol.Marshal(-1, v) +} + +// Unmarshal decodes a binary representation from b into v. +// +// See Marshal for details. +func Unmarshal(b []byte, v interface{}) error { + return protocol.Unmarshal(b, -1, v) +} + +// Version represents a version number for kafka APIs. +type Version int16 + +// Marshal is like the top-level Marshal function, but will only encode struct +// fields for which n falls within the min and max versions specified on the +// struct tag. +func (n Version) Marshal(v interface{}) ([]byte, error) { + return protocol.Marshal(int16(n), v) +} + +// Unmarshal is like the top-level Unmarshal function, but will only decode +// struct fields for which n falls within the min and max versions specified on +// the struct tag. +func (n Version) Unmarshal(b []byte, v interface{}) error { + return protocol.Unmarshal(b, int16(n), v) +} diff --git a/vendor/github.com/segmentio/kafka-go/leavegroup.go b/vendor/github.com/segmentio/kafka-go/leavegroup.go new file mode 100644 index 000000000..ad59a55c0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/leavegroup.go @@ -0,0 +1,147 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/leavegroup" +) + +// LeaveGroupRequest is the request structure for the LeaveGroup function. +type LeaveGroupRequest struct { + // Address of the kafka broker to sent he request to. + Addr net.Addr + + // GroupID of the group to leave. + GroupID string + + // List of leaving member identities. + Members []LeaveGroupRequestMember +} + +// LeaveGroupRequestMember represents the indentify of a member leaving a group. +type LeaveGroupRequestMember struct { + // The member ID to remove from the group. + ID string + + // The group instance ID to remove from the group. + GroupInstanceID string +} + +// LeaveGroupResponse is the response structure for the LeaveGroup function. +type LeaveGroupResponse struct { + // An error that may have occurred when attempting to leave the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of leaving member responses. + Members []LeaveGroupResponseMember +} + +// LeaveGroupResponseMember represents a member leaving the group. +type LeaveGroupResponseMember struct { + // The member ID of the member leaving the group. + ID string + + // The group instance ID to remove from the group. + GroupInstanceID string + + // An error that may have occured when attempting to remove the member from the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +func (c *Client) LeaveGroup(ctx context.Context, req *LeaveGroupRequest) (*LeaveGroupResponse, error) { + leaveGroup := leavegroup.Request{ + GroupID: req.GroupID, + Members: make([]leavegroup.RequestMember, 0, len(req.Members)), + } + + for _, member := range req.Members { + leaveGroup.Members = append(leaveGroup.Members, leavegroup.RequestMember{ + MemberID: member.ID, + GroupInstanceID: member.GroupInstanceID, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &leaveGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).LeaveGroup: %w", err) + } + + r := m.(*leavegroup.Response) + + res := &LeaveGroupResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMS), + } + + if len(r.Members) == 0 { + // If we're using a version of the api without the + // members array in the response, just add a member + // so the api is consistent across versions. + r.Members = []leavegroup.ResponseMember{ + { + MemberID: req.Members[0].ID, + GroupInstanceID: req.Members[0].GroupInstanceID, + }, + } + } + + res.Members = make([]LeaveGroupResponseMember, 0, len(r.Members)) + for _, member := range r.Members { + res.Members = append(res.Members, LeaveGroupResponseMember{ + ID: member.MemberID, + GroupInstanceID: member.GroupInstanceID, + Error: makeError(member.ErrorCode, ""), + }) + } + + return res, nil +} + +type leaveGroupRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // MemberID assigned by the group coordinator or the zero string if joining + // for the first time. + MemberID string +} + +func (t leaveGroupRequestV0) size() int32 { + return sizeofString(t.GroupID) + sizeofString(t.MemberID) +} + +func (t leaveGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.MemberID) +} + +type leaveGroupResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t leaveGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) +} + +func (t leaveGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) +} + +func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + remain, err = readInt16(r, size, &t.ErrorCode) + return +} diff --git a/vendor/github.com/segmentio/kafka-go/listgroups.go b/vendor/github.com/segmentio/kafka-go/listgroups.go new file mode 100644 index 000000000..229de9352 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/listgroups.go @@ -0,0 +1,139 @@ +package kafka + +import ( + "bufio" + "context" + "net" + + "github.com/segmentio/kafka-go/protocol/listgroups" +) + +// ListGroupsRequest is a request to the ListGroups API. +type ListGroupsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr +} + +// ListGroupsResponse is a response from the ListGroups API. +type ListGroupsResponse struct { + // Error is set to a non-nil value if a top-level error occurred while fetching + // groups. + Error error + + // Groups contains the list of groups. + Groups []ListGroupsResponseGroup +} + +// ListGroupsResponseGroup contains the response details for a single group. +type ListGroupsResponseGroup struct { + // GroupID is the ID of the group. + GroupID string + + // Coordinator is the ID of the coordinator broker for the group. + Coordinator int +} + +func (c *Client) ListGroups( + ctx context.Context, + req *ListGroupsRequest, +) (*ListGroupsResponse, error) { + protoResp, err := c.roundTrip(ctx, req.Addr, &listgroups.Request{}) + if err != nil { + return nil, err + } + apiResp := protoResp.(*listgroups.Response) + resp := &ListGroupsResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + + for _, apiGroupInfo := range apiResp.Groups { + resp.Groups = append(resp.Groups, ListGroupsResponseGroup{ + GroupID: apiGroupInfo.GroupID, + Coordinator: int(apiGroupInfo.BrokerID), + }) + } + + return resp, nil +} + +// TODO: Remove everything below and use protocol-based version above everywhere. +type listGroupsRequestV1 struct { +} + +func (t listGroupsRequestV1) size() int32 { + return 0 +} + +func (t listGroupsRequestV1) writeTo(wb *writeBuffer) { +} + +type listGroupsResponseGroupV1 struct { + // GroupID holds the unique group identifier + GroupID string + ProtocolType string +} + +func (t listGroupsResponseGroupV1) size() int32 { + return sizeofString(t.GroupID) + sizeofString(t.ProtocolType) +} + +func (t listGroupsResponseGroupV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.ProtocolType) +} + +func (t *listGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.GroupID); err != nil { + return + } + if remain, err = readString(r, remain, &t.ProtocolType); err != nil { + return + } + return +} + +type listGroupsResponseV1 struct { + // ThrottleTimeMS holds the duration in milliseconds for which the request + // was throttled due to quota violation (Zero if the request did not violate + // any quota) + ThrottleTimeMS int32 + + // ErrorCode holds response error code + ErrorCode int16 + Groups []listGroupsResponseGroupV1 +} + +func (t listGroupsResponseV1) size() int32 { + return sizeofInt32(t.ThrottleTimeMS) + + sizeofInt16(t.ErrorCode) + + sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) +} + +func (t listGroupsResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt32(t.ThrottleTimeMS) + wb.writeInt16(t.ErrorCode) + wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) }) +} + +func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + + fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + var item listGroupsResponseGroupV1 + if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { + return + } + t.Groups = append(t.Groups, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/listoffset.go b/vendor/github.com/segmentio/kafka-go/listoffset.go new file mode 100644 index 000000000..11c5d04b4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/listoffset.go @@ -0,0 +1,286 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/listoffsets" +) + +// OffsetRequest represents a request to retrieve a single partition offset. +type OffsetRequest struct { + Partition int + Timestamp int64 +} + +// FirstOffsetOf constructs an OffsetRequest which asks for the first offset of +// the parition given as argument. +func FirstOffsetOf(partition int) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: FirstOffset} +} + +// LastOffsetOf constructs an OffsetRequest which asks for the last offset of +// the partition given as argument. +func LastOffsetOf(partition int) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: LastOffset} +} + +// TimeOffsetOf constructs an OffsetRequest which asks for a partition offset +// at a given time. +func TimeOffsetOf(partition int, at time.Time) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: timestamp(at)} +} + +// PartitionOffsets carries information about offsets available in a topic +// partition. +type PartitionOffsets struct { + Partition int + FirstOffset int64 + LastOffset int64 + Offsets map[int64]time.Time + Error error +} + +// ListOffsetsRequest represents a request sent to a kafka broker to list of the +// offsets of topic partitions. +type ListOffsetsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // A mapping of topic names to list of partitions that the program wishes to + // get the offsets for. + Topics map[string][]OffsetRequest + + // The isolation level for the request. + // + // Defaults to ReadUncommitted. + // + // This field requires the kafka broker to support the ListOffsets API in + // version 2 or above (otherwise the value is ignored). + IsolationLevel IsolationLevel +} + +// ListOffsetsResponse represents a response from a kafka broker to a offset +// listing request. +type ListOffsetsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mappings of topics names to partition offsets, there will be one entry + // for each topic in the request. + Topics map[string][]PartitionOffsets +} + +// ListOffsets sends an offset request to a kafka broker and returns the +// response. +func (c *Client) ListOffsets(ctx context.Context, req *ListOffsetsRequest) (*ListOffsetsResponse, error) { + type topicPartition struct { + topic string + partition int + } + + partitionOffsets := make(map[topicPartition]PartitionOffsets) + + for topicName, requests := range req.Topics { + for _, r := range requests { + key := topicPartition{ + topic: topicName, + partition: r.Partition, + } + + partition, ok := partitionOffsets[key] + if !ok { + partition = PartitionOffsets{ + Partition: r.Partition, + FirstOffset: -1, + LastOffset: -1, + Offsets: make(map[int64]time.Time), + } + } + + switch r.Timestamp { + case FirstOffset: + partition.FirstOffset = 0 + case LastOffset: + partition.LastOffset = 0 + } + + partitionOffsets[topicPartition{ + topic: topicName, + partition: r.Partition, + }] = partition + } + } + + topics := make([]listoffsets.RequestTopic, 0, len(req.Topics)) + + for topicName, requests := range req.Topics { + partitions := make([]listoffsets.RequestPartition, len(requests)) + + for i, r := range requests { + partitions[i] = listoffsets.RequestPartition{ + Partition: int32(r.Partition), + CurrentLeaderEpoch: -1, + Timestamp: r.Timestamp, + } + } + + topics = append(topics, listoffsets.RequestTopic{ + Topic: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &listoffsets.Request{ + ReplicaID: -1, + IsolationLevel: int8(req.IsolationLevel), + Topics: topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).ListOffsets: %w", err) + } + + res := m.(*listoffsets.Response) + ret := &ListOffsetsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topics: make(map[string][]PartitionOffsets, len(res.Topics)), + } + + for _, t := range res.Topics { + for _, p := range t.Partitions { + key := topicPartition{ + topic: t.Topic, + partition: int(p.Partition), + } + + partition := partitionOffsets[key] + + switch p.Timestamp { + case FirstOffset: + partition.FirstOffset = p.Offset + case LastOffset: + partition.LastOffset = p.Offset + default: + partition.Offsets[p.Offset] = makeTime(p.Timestamp) + } + + if p.ErrorCode != 0 { + partition.Error = Error(p.ErrorCode) + } + + partitionOffsets[key] = partition + } + } + + for key, partition := range partitionOffsets { + ret.Topics[key.topic] = append(ret.Topics[key.topic], partition) + } + + return ret, nil +} + +type listOffsetRequestV1 struct { + ReplicaID int32 + Topics []listOffsetRequestTopicV1 +} + +func (r listOffsetRequestV1) size() int32 { + return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r listOffsetRequestV1) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type listOffsetRequestTopicV1 struct { + TopicName string + Partitions []listOffsetRequestPartitionV1 +} + +func (t listOffsetRequestTopicV1) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t listOffsetRequestTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type listOffsetRequestPartitionV1 struct { + Partition int32 + Time int64 +} + +func (p listOffsetRequestPartitionV1) size() int32 { + return 4 + 8 +} + +func (p listOffsetRequestPartitionV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.Time) +} + +type listOffsetResponseV1 []listOffsetResponseTopicV1 + +func (r listOffsetResponseV1) size() int32 { + return sizeofArray(len(r), func(i int) int32 { return r[i].size() }) +} + +func (r listOffsetResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r), func(i int) { r[i].writeTo(wb) }) +} + +type listOffsetResponseTopicV1 struct { + TopicName string + PartitionOffsets []partitionOffsetV1 +} + +func (t listOffsetResponseTopicV1) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() }) +} + +func (t listOffsetResponseTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(wb) }) +} + +type partitionOffsetV1 struct { + Partition int32 + ErrorCode int16 + Timestamp int64 + Offset int64 +} + +func (p partitionOffsetV1) size() int32 { + return 4 + 2 + 8 + 8 +} + +func (p partitionOffsetV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.Offset) +} + +func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/listpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/listpartitionreassignments.go new file mode 100644 index 000000000..aa01fff3f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/listpartitionreassignments.go @@ -0,0 +1,135 @@ +package kafka + +import ( + "context" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/listpartitionreassignments" +) + +// ListPartitionReassignmentsRequest is a request to the ListPartitionReassignments API. +type ListPartitionReassignmentsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Topics we want reassignments for, mapped by their name, or nil to list everything. + Topics map[string]ListPartitionReassignmentsRequestTopic + + // Timeout is the amount of time to wait for the request to complete. + Timeout time.Duration +} + +// ListPartitionReassignmentsRequestTopic contains the requested partitions for a single +// topic. +type ListPartitionReassignmentsRequestTopic struct { + // The partitions to list partition reassignments for. + PartitionIndexes []int +} + +// ListPartitionReassignmentsResponse is a response from the ListPartitionReassignments API. +type ListPartitionReassignmentsResponse struct { + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered. + Error error + + // Topics contains results for each topic, mapped by their name. + Topics map[string]ListPartitionReassignmentsResponseTopic +} + +// ListPartitionReassignmentsResponseTopic contains the detailed result of +// ongoing reassignments for a topic. +type ListPartitionReassignmentsResponseTopic struct { + // Partitions contains result for topic partitions. + Partitions []ListPartitionReassignmentsResponsePartition +} + +// ListPartitionReassignmentsResponsePartition contains the detailed result of +// ongoing reassignments for a single partition. +type ListPartitionReassignmentsResponsePartition struct { + // PartitionIndex contains index of the partition. + PartitionIndex int + + // Replicas contains the current replica set. + Replicas []int + + // AddingReplicas contains the set of replicas we are currently adding. + AddingReplicas []int + + // RemovingReplicas contains the set of replicas we are currently removing. + RemovingReplicas []int +} + +func (c *Client) ListPartitionReassignments( + ctx context.Context, + req *ListPartitionReassignmentsRequest, +) (*ListPartitionReassignmentsResponse, error) { + apiReq := &listpartitionreassignments.Request{ + TimeoutMs: int32(req.Timeout.Milliseconds()), + } + + for topicName, topicReq := range req.Topics { + apiReq.Topics = append( + apiReq.Topics, + listpartitionreassignments.RequestTopic{ + Name: topicName, + PartitionIndexes: intToInt32Array(topicReq.PartitionIndexes), + }, + ) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*listpartitionreassignments.Response) + + resp := &ListPartitionReassignmentsResponse{ + Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage), + Topics: make(map[string]ListPartitionReassignmentsResponseTopic), + } + + for _, topicResult := range apiResp.Topics { + respTopic := ListPartitionReassignmentsResponseTopic{} + for _, partitionResult := range topicResult.Partitions { + respTopic.Partitions = append( + respTopic.Partitions, + ListPartitionReassignmentsResponsePartition{ + PartitionIndex: int(partitionResult.PartitionIndex), + Replicas: int32ToIntArray(partitionResult.Replicas), + AddingReplicas: int32ToIntArray(partitionResult.AddingReplicas), + RemovingReplicas: int32ToIntArray(partitionResult.RemovingReplicas), + }, + ) + } + resp.Topics[topicResult.Name] = respTopic + } + + return resp, nil +} + +func intToInt32Array(arr []int) []int32 { + if arr == nil { + return nil + } + res := make([]int32, len(arr)) + for i := range arr { + res[i] = int32(arr[i]) + } + return res +} + +func int32ToIntArray(arr []int32) []int { + if arr == nil { + return nil + } + res := make([]int, len(arr)) + for i := range arr { + res[i] = int(arr[i]) + } + return res +} diff --git a/vendor/github.com/segmentio/kafka-go/logger.go b/vendor/github.com/segmentio/kafka-go/logger.go new file mode 100644 index 000000000..d359ab789 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/logger.go @@ -0,0 +1,17 @@ +package kafka + +// Logger interface API for log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LoggerFunc is a bridge between Logger and any third party logger +// Usage: +// l := NewLogger() // some logger +// r := kafka.NewReader(kafka.ReaderConfig{ +// Logger: kafka.LoggerFunc(l.Infof), +// ErrorLogger: kafka.LoggerFunc(l.Errorf), +// }) +type LoggerFunc func(string, ...interface{}) + +func (f LoggerFunc) Printf(msg string, args ...interface{}) { f(msg, args...) } diff --git a/vendor/github.com/segmentio/kafka-go/message.go b/vendor/github.com/segmentio/kafka-go/message.go new file mode 100644 index 000000000..0539e6038 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/message.go @@ -0,0 +1,132 @@ +package kafka + +import ( + "time" +) + +// Message is a data structure representing kafka messages. +type Message struct { + // Topic indicates which topic this message was consumed from via Reader. + // + // When being used with Writer, this can be used to configure the topic if + // not already specified on the writer itself. + Topic string + + // Partition is read-only and MUST NOT be set when writing messages + Partition int + Offset int64 + HighWaterMark int64 + Key []byte + Value []byte + Headers []Header + + // This field is used to hold arbitrary data you wish to include, so it + // will be available when handle it on the Writer's `Completion` method, + // this support the application can do any post operation on each message. + WriterData interface{} + + // If not set at the creation, Time will be automatically set when + // writing the message. + Time time.Time +} + +func (msg Message) message(cw *crc32Writer) message { + m := message{ + MagicByte: 1, + Key: msg.Key, + Value: msg.Value, + Timestamp: timestamp(msg.Time), + } + if cw != nil { + m.CRC = m.crc32(cw) + } + return m +} + +const timestampSize = 8 + +func (msg *Message) size() int32 { + return 4 + 1 + 1 + sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + timestampSize +} + +func (msg *Message) headerSize() int { + return varArrayLen(len(msg.Headers), func(i int) int { + h := &msg.Headers[i] + return varStringLen(h.Key) + varBytesLen(h.Value) + }) +} + +func (msg *Message) totalSize() int32 { + return int32(msg.headerSize()) + msg.size() +} + +type message struct { + CRC int32 + MagicByte int8 + Attributes int8 + Timestamp int64 + Key []byte + Value []byte +} + +func (m message) crc32(cw *crc32Writer) int32 { + cw.crc32 = 0 + cw.writeInt8(m.MagicByte) + cw.writeInt8(m.Attributes) + if m.MagicByte != 0 { + cw.writeInt64(m.Timestamp) + } + cw.writeBytes(m.Key) + cw.writeBytes(m.Value) + return int32(cw.crc32) +} + +func (m message) size() int32 { + size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value) + if m.MagicByte != 0 { + size += timestampSize + } + return size +} + +func (m message) writeTo(wb *writeBuffer) { + wb.writeInt32(m.CRC) + wb.writeInt8(m.MagicByte) + wb.writeInt8(m.Attributes) + if m.MagicByte != 0 { + wb.writeInt64(m.Timestamp) + } + wb.writeBytes(m.Key) + wb.writeBytes(m.Value) +} + +type messageSetItem struct { + Offset int64 + MessageSize int32 + Message message +} + +func (m messageSetItem) size() int32 { + return 8 + 4 + m.Message.size() +} + +func (m messageSetItem) writeTo(wb *writeBuffer) { + wb.writeInt64(m.Offset) + wb.writeInt32(m.MessageSize) + m.Message.writeTo(wb) +} + +type messageSet []messageSetItem + +func (s messageSet) size() (size int32) { + for _, m := range s { + size += m.size() + } + return +} + +func (s messageSet) writeTo(wb *writeBuffer) { + for _, m := range s { + m.writeTo(wb) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/message_reader.go b/vendor/github.com/segmentio/kafka-go/message_reader.go new file mode 100644 index 000000000..a0a0385ef --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/message_reader.go @@ -0,0 +1,555 @@ +package kafka + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" +) + +type readBytesFunc func(*bufio.Reader, int, int) (int, error) + +// messageSetReader processes the messages encoded into a fetch response. +// The response may contain a mix of Record Batches (newer format) and Messages +// (older format). +type messageSetReader struct { + *readerStack // used for decompressing compressed messages and record batches + empty bool // if true, short circuits messageSetReader methods + debug bool // enable debug log messages + // How many bytes are expected to remain in the response. + // + // This is used to detect truncation of the response. + lengthRemain int + + decompressed *bytes.Buffer +} + +type readerStack struct { + reader *bufio.Reader + remain int + base int64 + parent *readerStack + count int // how many messages left in the current message set + header messagesHeader // the current header for a subset of messages within the set. +} + +// messagesHeader describes a set of records. there may be many messagesHeader's in a message set. +type messagesHeader struct { + firstOffset int64 + length int32 + crc int32 + magic int8 + // v1 composes attributes specific to v0 and v1 message headers + v1 struct { + attributes int8 + timestamp int64 + } + // v2 composes attributes specific to v2 message headers + v2 struct { + leaderEpoch int32 + attributes int16 + lastOffsetDelta int32 + firstTimestamp int64 + lastTimestamp int64 + producerID int64 + producerEpoch int16 + baseSequence int32 + count int32 + } +} + +func (h messagesHeader) compression() (codec CompressionCodec, err error) { + const compressionCodecMask = 0x07 + var code int8 + switch h.magic { + case 0, 1: + code = h.v1.attributes & compressionCodecMask + case 2: + code = int8(h.v2.attributes & compressionCodecMask) + default: + err = h.badMagic() + return + } + if code != 0 { + codec, err = resolveCodec(code) + } + return +} + +func (h messagesHeader) badMagic() error { + return fmt.Errorf("unsupported magic byte %d in header", h.magic) +} + +func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, error) { + res := &messageSetReader{ + readerStack: &readerStack{ + reader: reader, + remain: remain, + }, + decompressed: acquireBuffer(), + } + err := res.readHeader() + return res, err +} + +func (r *messageSetReader) remaining() (remain int) { + if r.empty { + return 0 + } + for s := r.readerStack; s != nil; s = s.parent { + remain += s.remain + } + return +} + +func (r *messageSetReader) discard() (err error) { + switch { + case r.empty: + case r.readerStack == nil: + default: + // rewind up to the top-most reader b/c it's the only one that's doing + // actual i/o. the rest are byte buffers that have been pushed on the stack + // while reading compressed message sets. + for r.parent != nil { + r.readerStack = r.parent + } + err = r.discardN(r.remain) + } + return +} + +func (r *messageSetReader) readMessage(min int64, key readBytesFunc, val readBytesFunc) ( + offset int64, lastOffset int64, timestamp int64, headers []Header, err error) { + + if r.empty { + err = RequestTimedOut + return + } + if err = r.readHeader(); err != nil { + return + } + switch r.header.magic { + case 0, 1: + offset, timestamp, headers, err = r.readMessageV1(min, key, val) + // Set an invalid value so that it can be ignored + lastOffset = -1 + case 2: + offset, lastOffset, timestamp, headers, err = r.readMessageV2(min, key, val) + default: + err = r.header.badMagic() + } + return +} + +func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readBytesFunc) ( + offset int64, timestamp int64, headers []Header, err error) { + + for r.readerStack != nil { + if r.remain == 0 { + r.readerStack = r.parent + continue + } + if err = r.readHeader(); err != nil { + return + } + offset = r.header.firstOffset + timestamp = r.header.v1.timestamp + var codec CompressionCodec + if codec, err = r.header.compression(); err != nil { + return + } + if r.debug { + r.log("Reading with codec=%T", codec) + } + if codec != nil { + // discard next four bytes...will be -1 to indicate null key + if err = r.discardN(4); err != nil { + return + } + + // read and decompress the contained message set. + r.decompressed.Reset() + if err = r.readBytesWith(func(br *bufio.Reader, sz int, n int) (remain int, err error) { + // x4 as a guess that the average compression ratio is near 75% + r.decompressed.Grow(4 * n) + limitReader := io.LimitedReader{R: br, N: int64(n)} + codecReader := codec.NewReader(&limitReader) + _, err = r.decompressed.ReadFrom(codecReader) + remain = sz - (n - int(limitReader.N)) + codecReader.Close() + return + }); err != nil { + return + } + + // the compressed message's offset will be equal to the offset of + // the last message in the set. within the compressed set, the + // offsets will be relative, so we have to scan through them to + // get the base offset. for example, if there are four compressed + // messages at offsets 10-13, then the container message will have + // offset 13 and the contained messages will be 0,1,2,3. the base + // offset for the container, then is 13-3=10. + if offset, err = extractOffset(offset, r.decompressed.Bytes()); err != nil { + return + } + + // mark the outer message as being read + r.markRead() + + // then push the decompressed bytes onto the stack. + r.readerStack = &readerStack{ + // Allocate a buffer of size 0, which gets capped at 16 bytes + // by the bufio package. We are already reading buffered data + // here, no need to reserve another 4KB buffer. + reader: bufio.NewReaderSize(r.decompressed, 0), + remain: r.decompressed.Len(), + base: offset, + parent: r.readerStack, + } + continue + } + + // adjust the offset in case we're reading compressed messages. the + // base will be zero otherwise. + offset += r.base + + // When the messages are compressed kafka may return messages at an + // earlier offset than the one that was requested, it's the client's + // responsibility to ignore those. + // + // At this point, the message header has been read, so discarding + // the rest of the message means we have to discard the key, and then + // the value. Each of those are preceded by a 4-byte length. Discarding + // them is then reading that length variable and then discarding that + // amount. + if offset < min { + // discard the key + if err = r.discardBytes(); err != nil { + return + } + // discard the value + if err = r.discardBytes(); err != nil { + return + } + // since we have fully consumed the message, mark as read + r.markRead() + continue + } + if err = r.readBytesWith(key); err != nil { + return + } + if err = r.readBytesWith(val); err != nil { + return + } + r.markRead() + return + } + err = errShortRead + return +} + +func (r *messageSetReader) readMessageV2(_ int64, key readBytesFunc, val readBytesFunc) ( + offset int64, lastOffset int64, timestamp int64, headers []Header, err error) { + if err = r.readHeader(); err != nil { + return + } + if r.count == int(r.header.v2.count) { // first time reading this set, so check for compression headers. + var codec CompressionCodec + if codec, err = r.header.compression(); err != nil { + return + } + if codec != nil { + batchRemain := int(r.header.length - 49) // TODO: document this magic number + if batchRemain > r.remain { + err = errShortRead + return + } + if batchRemain < 0 { + err = fmt.Errorf("batch remain < 0 (%d)", batchRemain) + return + } + r.decompressed.Reset() + // x4 as a guess that the average compression ratio is near 75% + r.decompressed.Grow(4 * batchRemain) + limitReader := io.LimitedReader{R: r.reader, N: int64(batchRemain)} + codecReader := codec.NewReader(&limitReader) + _, err = r.decompressed.ReadFrom(codecReader) + codecReader.Close() + if err != nil { + return + } + r.remain -= batchRemain - int(limitReader.N) + r.readerStack = &readerStack{ + reader: bufio.NewReaderSize(r.decompressed, 0), // the new stack reads from the decompressed buffer + remain: r.decompressed.Len(), + base: -1, // base is unused here + parent: r.readerStack, + header: r.header, + count: r.count, + } + // all of the messages in this set are in the decompressed set just pushed onto the reader + // stack. here we set the parent count to 0 so that when the child set is exhausted, the + // reader will then try to read the header of the next message set + r.readerStack.parent.count = 0 + } + } + remainBefore := r.remain + var length int64 + if err = r.readVarInt(&length); err != nil { + return + } + lengthOfLength := remainBefore - r.remain + var attrs int8 + if err = r.readInt8(&attrs); err != nil { + return + } + var timestampDelta int64 + if err = r.readVarInt(×tampDelta); err != nil { + return + } + timestamp = r.header.v2.firstTimestamp + timestampDelta + var offsetDelta int64 + if err = r.readVarInt(&offsetDelta); err != nil { + return + } + offset = r.header.firstOffset + offsetDelta + if err = r.runFunc(key); err != nil { + return + } + if err = r.runFunc(val); err != nil { + return + } + var headerCount int64 + if err = r.readVarInt(&headerCount); err != nil { + return + } + if headerCount > 0 { + headers = make([]Header, headerCount) + for i := range headers { + if err = r.readMessageHeader(&headers[i]); err != nil { + return + } + } + } + lastOffset = r.header.firstOffset + int64(r.header.v2.lastOffsetDelta) + r.lengthRemain -= int(length) + lengthOfLength + r.markRead() + return +} + +func (r *messageSetReader) discardBytes() (err error) { + r.remain, err = discardBytes(r.reader, r.remain) + return +} + +func (r *messageSetReader) discardN(sz int) (err error) { + r.remain, err = discardN(r.reader, r.remain, sz) + return +} + +func (r *messageSetReader) markRead() { + if r.count == 0 { + panic("markRead: negative count") + } + r.count-- + r.unwindStack() + if r.debug { + r.log("Mark read remain=%d", r.remain) + } +} + +func (r *messageSetReader) unwindStack() { + for r.count == 0 { + if r.remain == 0 { + if r.parent != nil { + if r.debug { + r.log("Popped reader stack") + } + r.readerStack = r.parent + continue + } + } + break + } +} + +func (r *messageSetReader) readMessageHeader(header *Header) (err error) { + var keyLen int64 + if err = r.readVarInt(&keyLen); err != nil { + return + } + if header.Key, err = r.readNewString(int(keyLen)); err != nil { + return + } + var valLen int64 + if err = r.readVarInt(&valLen); err != nil { + return + } + if header.Value, err = r.readNewBytes(int(valLen)); err != nil { + return + } + return nil +} + +func (r *messageSetReader) runFunc(rbFunc readBytesFunc) (err error) { + var length int64 + if err = r.readVarInt(&length); err != nil { + return + } + if r.remain, err = rbFunc(r.reader, r.remain, int(length)); err != nil { + return + } + return +} + +func (r *messageSetReader) readHeader() (err error) { + if r.count > 0 { + // currently reading a set of messages, no need to read a header until they are exhausted. + return + } + r.header = messagesHeader{} + if err = r.readInt64(&r.header.firstOffset); err != nil { + return + } + if err = r.readInt32(&r.header.length); err != nil { + return + } + var crcOrLeaderEpoch int32 + if err = r.readInt32(&crcOrLeaderEpoch); err != nil { + return + } + if err = r.readInt8(&r.header.magic); err != nil { + return + } + switch r.header.magic { + case 0: + r.header.crc = crcOrLeaderEpoch + if err = r.readInt8(&r.header.v1.attributes); err != nil { + return + } + r.count = 1 + // Set arbitrary non-zero length so that we always assume the + // message is truncated since bytes remain. + r.lengthRemain = 1 + if r.debug { + r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes) + } + case 1: + r.header.crc = crcOrLeaderEpoch + if err = r.readInt8(&r.header.v1.attributes); err != nil { + return + } + if err = r.readInt64(&r.header.v1.timestamp); err != nil { + return + } + r.count = 1 + // Set arbitrary non-zero length so that we always assume the + // message is truncated since bytes remain. + r.lengthRemain = 1 + if r.debug { + r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes) + } + case 2: + r.header.v2.leaderEpoch = crcOrLeaderEpoch + if err = r.readInt32(&r.header.crc); err != nil { + return + } + if err = r.readInt16(&r.header.v2.attributes); err != nil { + return + } + if err = r.readInt32(&r.header.v2.lastOffsetDelta); err != nil { + return + } + if err = r.readInt64(&r.header.v2.firstTimestamp); err != nil { + return + } + if err = r.readInt64(&r.header.v2.lastTimestamp); err != nil { + return + } + if err = r.readInt64(&r.header.v2.producerID); err != nil { + return + } + if err = r.readInt16(&r.header.v2.producerEpoch); err != nil { + return + } + if err = r.readInt32(&r.header.v2.baseSequence); err != nil { + return + } + if err = r.readInt32(&r.header.v2.count); err != nil { + return + } + r.count = int(r.header.v2.count) + // Subtracts the header bytes from the length + r.lengthRemain = int(r.header.length) - 49 + if r.debug { + r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes) + } + default: + err = r.header.badMagic() + return + } + return +} + +func (r *messageSetReader) readNewBytes(len int) (res []byte, err error) { + res, r.remain, err = readNewBytes(r.reader, r.remain, len) + return +} + +func (r *messageSetReader) readNewString(len int) (res string, err error) { + res, r.remain, err = readNewString(r.reader, r.remain, len) + return +} + +func (r *messageSetReader) readInt8(val *int8) (err error) { + r.remain, err = readInt8(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt16(val *int16) (err error) { + r.remain, err = readInt16(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt32(val *int32) (err error) { + r.remain, err = readInt32(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt64(val *int64) (err error) { + r.remain, err = readInt64(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readVarInt(val *int64) (err error) { + r.remain, err = readVarInt(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readBytesWith(fn readBytesFunc) (err error) { + r.remain, err = readBytesWith(r.reader, r.remain, fn) + return +} + +func (r *messageSetReader) log(msg string, args ...interface{}) { + log.Printf("[DEBUG] "+msg, args...) +} + +func extractOffset(base int64, msgSet []byte) (offset int64, err error) { + r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet) + for remain > 0 { + if remain, err = readInt64(r, remain, &offset); err != nil { + return + } + var sz int32 + if remain, err = readInt32(r, remain, &sz); err != nil { + return + } + if remain, err = discardN(r, remain, int(sz)); err != nil { + return + } + } + offset = base - offset + return +} diff --git a/vendor/github.com/segmentio/kafka-go/metadata.go b/vendor/github.com/segmentio/kafka-go/metadata.go new file mode 100644 index 000000000..429a6a260 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/metadata.go @@ -0,0 +1,287 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + metadataAPI "github.com/segmentio/kafka-go/protocol/metadata" +) + +// MetadataRequest represents a request sent to a kafka broker to retrieve its +// cluster metadata. +type MetadataRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The list of topics to retrieve metadata for. + Topics []string +} + +// MetadatResponse represents a response from a kafka broker to a metadata +// request. +type MetadataResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Name of the kafka cluster that client retrieved metadata from. + ClusterID string + + // The broker which is currently the controller for the cluster. + Controller Broker + + // The list of brokers registered to the cluster. + Brokers []Broker + + // The list of topics available on the cluster. + Topics []Topic +} + +// Metadata sends a metadata request to a kafka broker and returns the response. +func (c *Client) Metadata(ctx context.Context, req *MetadataRequest) (*MetadataResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &metadataAPI.Request{ + TopicNames: req.Topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Metadata: %w", err) + } + + res := m.(*metadataAPI.Response) + ret := &MetadataResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Brokers: make([]Broker, len(res.Brokers)), + Topics: make([]Topic, len(res.Topics)), + ClusterID: res.ClusterID, + } + + brokers := make(map[int32]Broker, len(res.Brokers)) + + for i, b := range res.Brokers { + broker := Broker{ + Host: b.Host, + Port: int(b.Port), + ID: int(b.NodeID), + Rack: b.Rack, + } + + ret.Brokers[i] = broker + brokers[b.NodeID] = broker + + if b.NodeID == res.ControllerID { + ret.Controller = broker + } + } + + for i, t := range res.Topics { + ret.Topics[i] = Topic{ + Name: t.Name, + Internal: t.IsInternal, + Partitions: make([]Partition, len(t.Partitions)), + Error: makeError(t.ErrorCode, ""), + } + + for j, p := range t.Partitions { + partition := Partition{ + Topic: t.Name, + ID: int(p.PartitionIndex), + Leader: brokers[p.LeaderID], + Replicas: make([]Broker, len(p.ReplicaNodes)), + Isr: make([]Broker, len(p.IsrNodes)), + Error: makeError(p.ErrorCode, ""), + } + + for i, id := range p.ReplicaNodes { + partition.Replicas[i] = brokers[id] + } + + for i, id := range p.IsrNodes { + partition.Isr[i] = brokers[id] + } + + ret.Topics[i].Partitions[j] = partition + } + } + + return ret, nil +} + +type topicMetadataRequestV1 []string + +func (r topicMetadataRequestV1) size() int32 { + return sizeofStringArray([]string(r)) +} + +func (r topicMetadataRequestV1) writeTo(wb *writeBuffer) { + // communicate nil-ness to the broker by passing -1 as the array length. + // for this particular request, the broker interpets a zero length array + // as a request for no topics whereas a nil array is for all topics. + if r == nil { + wb.writeArrayLen(-1) + } else { + wb.writeStringArray([]string(r)) + } +} + +type metadataResponseV1 struct { + Brokers []brokerMetadataV1 + ControllerID int32 + Topics []topicMetadataV1 +} + +func (r metadataResponseV1) size() int32 { + n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) + n2 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) + return 4 + n1 + n2 +} + +func (r metadataResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) }) + wb.writeInt32(r.ControllerID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type brokerMetadataV1 struct { + NodeID int32 + Host string + Port int32 + Rack string +} + +func (b brokerMetadataV1) size() int32 { + return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack) +} + +func (b brokerMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt32(b.NodeID) + wb.writeString(b.Host) + wb.writeInt32(b.Port) + wb.writeString(b.Rack) +} + +type topicMetadataV1 struct { + TopicErrorCode int16 + TopicName string + Internal bool + Partitions []partitionMetadataV1 +} + +func (t topicMetadataV1) size() int32 { + return 2 + 1 + + sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t topicMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.TopicErrorCode) + wb.writeString(t.TopicName) + wb.writeBool(t.Internal) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type partitionMetadataV1 struct { + PartitionErrorCode int16 + PartitionID int32 + Leader int32 + Replicas []int32 + Isr []int32 +} + +func (p partitionMetadataV1) size() int32 { + return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) +} + +func (p partitionMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(p.PartitionErrorCode) + wb.writeInt32(p.PartitionID) + wb.writeInt32(p.Leader) + wb.writeInt32Array(p.Replicas) + wb.writeInt32Array(p.Isr) +} + +type topicMetadataRequestV6 struct { + Topics []string + AllowAutoTopicCreation bool +} + +func (r topicMetadataRequestV6) size() int32 { + return sizeofStringArray([]string(r.Topics)) + 1 +} + +func (r topicMetadataRequestV6) writeTo(wb *writeBuffer) { + // communicate nil-ness to the broker by passing -1 as the array length. + // for this particular request, the broker interpets a zero length array + // as a request for no topics whereas a nil array is for all topics. + if r.Topics == nil { + wb.writeArrayLen(-1) + } else { + wb.writeStringArray([]string(r.Topics)) + } + wb.writeBool(r.AllowAutoTopicCreation) +} + +type metadataResponseV6 struct { + ThrottleTimeMs int32 + Brokers []brokerMetadataV1 + ClusterId string + ControllerID int32 + Topics []topicMetadataV6 +} + +func (r metadataResponseV6) size() int32 { + n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) + n2 := sizeofNullableString(&r.ClusterId) + n3 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) + return 4 + 4 + n1 + n2 + n3 +} + +func (r metadataResponseV6) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTimeMs) + wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) }) + wb.writeString(r.ClusterId) + wb.writeInt32(r.ControllerID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type topicMetadataV6 struct { + TopicErrorCode int16 + TopicName string + Internal bool + Partitions []partitionMetadataV6 +} + +func (t topicMetadataV6) size() int32 { + return 2 + 1 + + sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t topicMetadataV6) writeTo(wb *writeBuffer) { + wb.writeInt16(t.TopicErrorCode) + wb.writeString(t.TopicName) + wb.writeBool(t.Internal) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type partitionMetadataV6 struct { + PartitionErrorCode int16 + PartitionID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 +} + +func (p partitionMetadataV6) size() int32 { + return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) + sizeofInt32Array(p.OfflineReplicas) +} + +func (p partitionMetadataV6) writeTo(wb *writeBuffer) { + wb.writeInt16(p.PartitionErrorCode) + wb.writeInt32(p.PartitionID) + wb.writeInt32(p.Leader) + wb.writeInt32Array(p.Replicas) + wb.writeInt32Array(p.Isr) + wb.writeInt32Array(p.OfflineReplicas) +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/offsetcommit.go new file mode 100644 index 000000000..260fe4bc9 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetcommit.go @@ -0,0 +1,302 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetcommit" +) + +// OffsetCommit represent the commit of an offset to a partition. +// +// The extra metadata is opaque to the kafka protocol, it is intended to hold +// information like an identifier for the process that committed the offset, +// or the time at which the commit was made. +type OffsetCommit struct { + Partition int + Offset int64 + Metadata string +} + +// OffsetCommitRequest represents a request sent to a kafka broker to commit +// offsets for a partition. +type OffsetCommitRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to publish the offsets for. + GroupID string + + // ID of the consumer group generation. + GenerationID int + + // ID of the group member submitting the offsets. + MemberID string + + // ID of the group instance. + InstanceID string + + // Set of topic partitions to publish the offsets for. + // + // Not that offset commits need to be submitted to the broker acting as the + // group coordinator. This will be automatically resolved by the transport. + Topics map[string][]OffsetCommit +} + +// OffsetFetchResponse represents a response from a kafka broker to an offset +// commit request. +type OffsetCommitResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has accepted offset commits + // for. + Topics map[string][]OffsetCommitPartition +} + +// OffsetFetchPartition represents the state of a single partition in responses +// to committing offsets. +type OffsetCommitPartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to publish consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetCommit sends an offset commit request to a kafka broker and returns the +// response. +func (c *Client) OffsetCommit(ctx context.Context, req *OffsetCommitRequest) (*OffsetCommitResponse, error) { + now := time.Now().UnixNano() / int64(time.Millisecond) + topics := make([]offsetcommit.RequestTopic, 0, len(req.Topics)) + + for topicName, commits := range req.Topics { + partitions := make([]offsetcommit.RequestPartition, len(commits)) + + for i, c := range commits { + partitions[i] = offsetcommit.RequestPartition{ + PartitionIndex: int32(c.Partition), + CommittedOffset: c.Offset, + CommittedMetadata: c.Metadata, + // This field existed in v1 of the OffsetCommit API, setting it + // to the current timestamp is probably a safe thing to do, but + // it is hard to tell. + CommitTimestamp: now, + } + } + + topics = append(topics, offsetcommit.RequestTopic{ + Name: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetcommit.Request{ + GroupID: req.GroupID, + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.InstanceID, + Topics: topics, + // Hardcoded retention; this field existed between v2 and v4 of the + // OffsetCommit API, we would have to figure out a way to give the + // client control over the API version being used to support configuring + // it in the request object. + RetentionTimeMs: int64((24 * time.Hour) / time.Millisecond), + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetCommit: %w", err) + } + r := m.(*offsetcommit.Response) + + res := &OffsetCommitResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]OffsetCommitPartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]OffsetCommitPartition, len(topic.Partitions)) + + for i, p := range topic.Partitions { + partitions[i] = OffsetCommitPartition{ + Partition: int(p.PartitionIndex), + Error: makeError(p.ErrorCode, ""), + } + } + + res.Topics[topic.Name] = partitions + } + + return res, nil +} + +type offsetCommitRequestV2Partition struct { + // Partition ID + Partition int32 + + // Offset to be committed + Offset int64 + + // Metadata holds any associated metadata the client wants to keep + Metadata string +} + +func (t offsetCommitRequestV2Partition) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt64(t.Offset) + + sizeofString(t.Metadata) +} + +func (t offsetCommitRequestV2Partition) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) +} + +type offsetCommitRequestV2Topic struct { + // Topic name + Topic string + + // Partitions to commit offsets + Partitions []offsetCommitRequestV2Partition +} + +func (t offsetCommitRequestV2Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t offsetCommitRequestV2Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type offsetCommitRequestV2 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string + + // RetentionTime holds the time period in ms to retain the offset. + RetentionTime int64 + + // Topics to commit offsets + Topics []offsetCommitRequestV2Topic +} + +func (t offsetCommitRequestV2) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) + + sizeofInt64(t.RetentionTime) + + sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +} + +func (t offsetCommitRequestV2) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeInt64(t.RetentionTime) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) +} + +type offsetCommitResponseV2PartitionResponse struct { + Partition int32 + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t offsetCommitResponseV2PartitionResponse) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt16(t.ErrorCode) +} + +func (t offsetCommitResponseV2PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt16(t.ErrorCode) +} + +func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +type offsetCommitResponseV2Response struct { + Topic string + PartitionResponses []offsetCommitResponseV2PartitionResponse +} + +func (t offsetCommitResponseV2Response) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) +} + +func (t offsetCommitResponseV2Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) +} + +func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetCommitResponseV2PartitionResponse{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.PartitionResponses = append(t.PartitionResponses, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} + +type offsetCommitResponseV2 struct { + Responses []offsetCommitResponseV2Response +} + +func (t offsetCommitResponseV2) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) +} + +func (t offsetCommitResponseV2) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) +} + +func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetCommitResponseV2Response{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.Responses = append(t.Responses, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetdelete.go b/vendor/github.com/segmentio/kafka-go/offsetdelete.go new file mode 100644 index 000000000..ea526eb25 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetdelete.go @@ -0,0 +1,106 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetdelete" +) + +// OffsetDelete deletes the offset for a consumer group on a particular topic +// for a particular partition. +type OffsetDelete struct { + Topic string + Partition int +} + +// OffsetDeleteRequest represents a request sent to a kafka broker to delete +// the offsets for a partition on a given topic associated with a consumer group. +type OffsetDeleteRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to delete the offsets for. + GroupID string + + // Set of topic partitions to delete offsets for. + Topics map[string][]int +} + +// OffsetDeleteResponse represents a response from a kafka broker to a delete +// offset request. +type OffsetDeleteResponse struct { + // An error that may have occurred while attempting to delete an offset + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has additional info (error?) + // for. + Topics map[string][]OffsetDeletePartition +} + +// OffsetDeletePartition represents the state of a status of a partition in response +// to deleting offsets. +type OffsetDeletePartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to delete an offset for + // this partition. + Error error +} + +// OffsetDelete sends a delete offset request to a kafka broker and returns the +// response. +func (c *Client) OffsetDelete(ctx context.Context, req *OffsetDeleteRequest) (*OffsetDeleteResponse, error) { + topics := make([]offsetdelete.RequestTopic, 0, len(req.Topics)) + + for topicName, partitionIndexes := range req.Topics { + partitions := make([]offsetdelete.RequestPartition, len(partitionIndexes)) + + for i, c := range partitionIndexes { + partitions[i] = offsetdelete.RequestPartition{ + PartitionIndex: int32(c), + } + } + + topics = append(topics, offsetdelete.RequestTopic{ + Name: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetdelete.Request{ + GroupID: req.GroupID, + Topics: topics, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetDelete: %w", err) + } + r := m.(*offsetdelete.Response) + + res := &OffsetDeleteResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]OffsetDeletePartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]OffsetDeletePartition, len(topic.Partitions)) + + for i, p := range topic.Partitions { + partitions[i] = OffsetDeletePartition{ + Partition: int(p.PartitionIndex), + Error: makeError(p.ErrorCode, ""), + } + } + + res.Topics[topic.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/offsetfetch.go new file mode 100644 index 000000000..b85bc5c83 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetfetch.go @@ -0,0 +1,272 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetfetch" +) + +// OffsetFetchRequest represents a request sent to a kafka broker to read the +// currently committed offsets of topic partitions. +type OffsetFetchRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to retrieve the offsets for. + GroupID string + + // Set of topic partitions to retrieve the offsets for. + Topics map[string][]int +} + +// OffsetFetchResponse represents a response from a kafka broker to an offset +// fetch request. +type OffsetFetchResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has returned offsets for. + Topics map[string][]OffsetFetchPartition + + // An error that may have occurred while attempting to retrieve consumer + // group offsets. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetFetchPartition represents the state of a single partition in a consumer +// group. +type OffsetFetchPartition struct { + // ID of the partition. + Partition int + + // Last committed offsets on the partition when the request was served by + // the kafka broker. + CommittedOffset int64 + + // Consumer group metadata for this partition. + Metadata string + + // An error that may have occurred while attempting to retrieve consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetFetch sends an offset fetch request to a kafka broker and returns the +// response. +func (c *Client) OffsetFetch(ctx context.Context, req *OffsetFetchRequest) (*OffsetFetchResponse, error) { + + // Kafka version 0.10.2.x and above allow null Topics map for OffsetFetch API + // which will return the result for all topics with the desired consumer group: + // https://kafka.apache.org/0102/protocol.html#The_Messages_OffsetFetch + // For Kafka version below 0.10.2.x this call will result in an error + var topics []offsetfetch.RequestTopic + + if len(req.Topics) > 0 { + topics = make([]offsetfetch.RequestTopic, 0, len(req.Topics)) + + for topicName, partitions := range req.Topics { + indexes := make([]int32, len(partitions)) + + for i, p := range partitions { + indexes[i] = int32(p) + } + + topics = append(topics, offsetfetch.RequestTopic{ + Name: topicName, + PartitionIndexes: indexes, + }) + } + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetfetch.Request{ + GroupID: req.GroupID, + Topics: topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetFetch: %w", err) + } + + res := m.(*offsetfetch.Response) + ret := &OffsetFetchResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topics: make(map[string][]OffsetFetchPartition, len(res.Topics)), + Error: makeError(res.ErrorCode, ""), + } + + for _, t := range res.Topics { + partitions := make([]OffsetFetchPartition, len(t.Partitions)) + + for i, p := range t.Partitions { + partitions[i] = OffsetFetchPartition{ + Partition: int(p.PartitionIndex), + CommittedOffset: p.CommittedOffset, + Metadata: p.Metadata, + Error: makeError(p.ErrorCode, ""), + } + } + + ret.Topics[t.Name] = partitions + } + + return ret, nil +} + +type offsetFetchRequestV1Topic struct { + // Topic name + Topic string + + // Partitions to fetch offsets + Partitions []int32 +} + +func (t offsetFetchRequestV1Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofInt32Array(t.Partitions) +} + +func (t offsetFetchRequestV1Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32Array(t.Partitions) +} + +type offsetFetchRequestV1 struct { + // GroupID holds the unique group identifier + GroupID string + + // Topics to fetch offsets. + Topics []offsetFetchRequestV1Topic +} + +func (t offsetFetchRequestV1) size() int32 { + return sizeofString(t.GroupID) + + sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +} + +func (t offsetFetchRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) +} + +type offsetFetchResponseV1PartitionResponse struct { + // Partition ID + Partition int32 + + // Offset of last committed message + Offset int64 + + // Metadata client wants to keep + Metadata string + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t offsetFetchResponseV1PartitionResponse) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt64(t.Offset) + + sizeofString(t.Metadata) + + sizeofInt16(t.ErrorCode) +} + +func (t offsetFetchResponseV1PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) + wb.writeInt16(t.ErrorCode) +} + +func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.Partition); err != nil { + return + } + if remain, err = readInt64(r, remain, &t.Offset); err != nil { + return + } + if remain, err = readString(r, remain, &t.Metadata); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +type offsetFetchResponseV1Response struct { + // Topic name + Topic string + + // PartitionResponses holds offsets by partition + PartitionResponses []offsetFetchResponseV1PartitionResponse +} + +func (t offsetFetchResponseV1Response) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) +} + +func (t offsetFetchResponseV1Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) +} + +func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + item := offsetFetchResponseV1PartitionResponse{} + if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { + return + } + t.PartitionResponses = append(t.PartitionResponses, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} + +type offsetFetchResponseV1 struct { + // Responses holds topic partition offsets + Responses []offsetFetchResponseV1Response +} + +func (t offsetFetchResponseV1) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) +} + +func (t offsetFetchResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) +} + +func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetFetchResponseV1Response{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.Responses = append(t.Responses, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/produce.go b/vendor/github.com/segmentio/kafka-go/produce.go new file mode 100644 index 000000000..72d1ed09b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/produce.go @@ -0,0 +1,323 @@ +package kafka + +import ( + "bufio" + "context" + "encoding" + "errors" + "fmt" + "net" + "strconv" + "time" + + "github.com/segmentio/kafka-go/protocol" + produceAPI "github.com/segmentio/kafka-go/protocol/produce" +) + +type RequiredAcks int + +const ( + RequireNone RequiredAcks = 0 + RequireOne RequiredAcks = 1 + RequireAll RequiredAcks = -1 +) + +func (acks RequiredAcks) String() string { + switch acks { + case RequireNone: + return "none" + case RequireOne: + return "one" + case RequireAll: + return "all" + default: + return "unknown" + } +} + +func (acks RequiredAcks) MarshalText() ([]byte, error) { + return []byte(acks.String()), nil +} + +func (acks *RequiredAcks) UnmarshalText(b []byte) error { + switch string(b) { + case "none": + *acks = RequireNone + case "one": + *acks = RequireOne + case "all": + *acks = RequireAll + default: + x, err := strconv.ParseInt(string(b), 10, 64) + parsed := RequiredAcks(x) + if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) { + return fmt.Errorf("required acks must be one of none, one, or all, not %q", b) + } + *acks = parsed + } + return nil +} + +var ( + _ encoding.TextMarshaler = RequiredAcks(0) + _ encoding.TextUnmarshaler = (*RequiredAcks)(nil) +) + +// ProduceRequest represents a request sent to a kafka broker to produce records +// to a topic partition. +type ProduceRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The topic to produce the records to. + Topic string + + // The partition to produce the records to. + Partition int + + // The level of required acknowledgements to ask the kafka broker for. + RequiredAcks RequiredAcks + + // The message format version used when encoding the records. + // + // By default, the client automatically determine which version should be + // used based on the version of the Produce API supported by the server. + MessageVersion int + + // An optional transaction id when producing to the kafka broker is part of + // a transaction. + TransactionalID string + + // The sequence of records to produce to the topic partition. + Records RecordReader + + // An optional compression algorithm to apply to the batch of records sent + // to the kafka broker. + Compression Compression +} + +// ProduceResponse represents a response from a kafka broker to a produce +// request. +type ProduceResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to produce the records. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error + + // Offset of the first record that was written to the topic partition. + // + // This field will be zero if the kafka broker did not support Produce API + // version 3 or above. + BaseOffset int64 + + // Time at which the broker wrote the records to the topic partition. + // + // This field will be zero if the kafka broker did not support Produce API + // version 2 or above. + LogAppendTime time.Time + + // First offset in the topic partition that the records were written to. + // + // This field will be zero if the kafka broker did not support Produce + // API version 5 or above (or if the first offset is zero). + LogStartOffset int64 + + // If errors occurred writing specific records, they will be reported in + // this map. + // + // This field will always be empty if the kafka broker did not support the + // Produce API in version 8 or above. + RecordErrors map[int]error +} + +// Produce sends a produce request to a kafka broker and returns the response. +// +// If the request contained no records, an error wrapping protocol.ErrNoRecord +// is returned. +// +// When the request is configured with RequiredAcks=none, both the response and +// the error will be nil on success. +func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) { + attributes := protocol.Attributes(req.Compression) & 0x7 + + m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{ + TransactionalID: req.TransactionalID, + Acks: int16(req.RequiredAcks), + Timeout: c.timeoutMs(ctx, defaultProduceTimeout), + Topics: []produceAPI.RequestTopic{{ + Topic: req.Topic, + Partitions: []produceAPI.RequestPartition{{ + Partition: int32(req.Partition), + RecordSet: protocol.RecordSet{ + Attributes: attributes, + Records: req.Records, + }, + }}, + }}, + }) + + switch { + case err == nil: + case errors.Is(err, protocol.ErrNoRecord): + return new(ProduceResponse), nil + default: + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err) + } + + if req.RequiredAcks == RequireNone { + return nil, nil + } + + res := m.(*produceAPI.Response) + if len(res.Topics) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic) + } + topic := &res.Topics[0] + if len(topic.Partitions) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition) + } + partition := &topic.Partitions[0] + + ret := &ProduceResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(partition.ErrorCode, partition.ErrorMessage), + BaseOffset: partition.BaseOffset, + LogAppendTime: makeTime(partition.LogAppendTime), + LogStartOffset: partition.LogStartOffset, + } + + if len(partition.RecordErrors) != 0 { + ret.RecordErrors = make(map[int]error, len(partition.RecordErrors)) + + for _, recErr := range partition.RecordErrors { + ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage) + } + } + + return ret, nil +} + +type produceRequestV2 struct { + RequiredAcks int16 + Timeout int32 + Topics []produceRequestTopicV2 +} + +func (r produceRequestV2) size() int32 { + return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r produceRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt16(r.RequiredAcks) + wb.writeInt32(r.Timeout) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type produceRequestTopicV2 struct { + TopicName string + Partitions []produceRequestPartitionV2 +} + +func (t produceRequestTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t produceRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type produceRequestPartitionV2 struct { + Partition int32 + MessageSetSize int32 + MessageSet messageSet +} + +func (p produceRequestPartitionV2) size() int32 { + return 4 + 4 + p.MessageSet.size() +} + +func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) +} + +type produceResponsePartitionV2 struct { + Partition int32 + ErrorCode int16 + Offset int64 + Timestamp int64 +} + +func (p produceResponsePartitionV2) size() int32 { + return 4 + 2 + 8 + 8 +} + +func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) +} + +func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + return +} + +type produceResponsePartitionV7 struct { + Partition int32 + ErrorCode int16 + Offset int64 + Timestamp int64 + StartOffset int64 +} + +func (p produceResponsePartitionV7) size() int32 { + return 4 + 2 + 8 + 8 + 8 +} + +func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.StartOffset) +} + +func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.StartOffset); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol.go new file mode 100644 index 000000000..37208abf1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol.go @@ -0,0 +1,214 @@ +package kafka + +import ( + "encoding/binary" + "fmt" + "strconv" +) + +type ApiVersion struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (v ApiVersion) Format(w fmt.State, r rune) { + switch r { + case 's': + fmt.Fprint(w, apiKey(v.ApiKey)) + case 'd': + switch { + case w.Flag('-'): + fmt.Fprint(w, v.MinVersion) + case w.Flag('+'): + fmt.Fprint(w, v.MaxVersion) + default: + fmt.Fprint(w, v.ApiKey) + } + case 'v': + switch { + case w.Flag('-'): + fmt.Fprintf(w, "v%d", v.MinVersion) + case w.Flag('+'): + fmt.Fprintf(w, "v%d", v.MaxVersion) + case w.Flag('#'): + fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion) + default: + fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion) + } + } +} + +type apiKey int16 + +const ( + produce apiKey = 0 + fetch apiKey = 1 + listOffsets apiKey = 2 + metadata apiKey = 3 + leaderAndIsr apiKey = 4 + stopReplica apiKey = 5 + updateMetadata apiKey = 6 + controlledShutdown apiKey = 7 + offsetCommit apiKey = 8 + offsetFetch apiKey = 9 + findCoordinator apiKey = 10 + joinGroup apiKey = 11 + heartbeat apiKey = 12 + leaveGroup apiKey = 13 + syncGroup apiKey = 14 + describeGroups apiKey = 15 + listGroups apiKey = 16 + saslHandshake apiKey = 17 + apiVersions apiKey = 18 + createTopics apiKey = 19 + deleteTopics apiKey = 20 + deleteRecords apiKey = 21 + initProducerId apiKey = 22 + offsetForLeaderEpoch apiKey = 23 + addPartitionsToTxn apiKey = 24 + addOffsetsToTxn apiKey = 25 + endTxn apiKey = 26 + writeTxnMarkers apiKey = 27 + txnOffsetCommit apiKey = 28 + describeAcls apiKey = 29 + createAcls apiKey = 30 + deleteAcls apiKey = 31 + describeConfigs apiKey = 32 + alterConfigs apiKey = 33 + alterReplicaLogDirs apiKey = 34 + describeLogDirs apiKey = 35 + saslAuthenticate apiKey = 36 + createPartitions apiKey = 37 + createDelegationToken apiKey = 38 + renewDelegationToken apiKey = 39 + expireDelegationToken apiKey = 40 + describeDelegationToken apiKey = 41 + deleteGroups apiKey = 42 + electLeaders apiKey = 43 + incrementalAlterConfigs apiKey = 44 + alterPartitionReassignments apiKey = 45 + listPartitionReassignments apiKey = 46 + offsetDelete apiKey = 47 +) + +func (k apiKey) String() string { + if i := int(k); i >= 0 && i < len(apiKeyStrings) { + return apiKeyStrings[i] + } + return strconv.Itoa(int(k)) +} + +type apiVersion int16 + +const ( + v0 = 0 + v1 = 1 + v2 = 2 + v3 = 3 + v5 = 5 + v6 = 6 + v7 = 7 + v10 = 10 + + // Unused protocol versions: v4, v8, v9. +) + +var apiKeyStrings = [...]string{ + produce: "Produce", + fetch: "Fetch", + listOffsets: "ListOffsets", + metadata: "Metadata", + leaderAndIsr: "LeaderAndIsr", + stopReplica: "StopReplica", + updateMetadata: "UpdateMetadata", + controlledShutdown: "ControlledShutdown", + offsetCommit: "OffsetCommit", + offsetFetch: "OffsetFetch", + findCoordinator: "FindCoordinator", + joinGroup: "JoinGroup", + heartbeat: "Heartbeat", + leaveGroup: "LeaveGroup", + syncGroup: "SyncGroup", + describeGroups: "DescribeGroups", + listGroups: "ListGroups", + saslHandshake: "SaslHandshake", + apiVersions: "ApiVersions", + createTopics: "CreateTopics", + deleteTopics: "DeleteTopics", + deleteRecords: "DeleteRecords", + initProducerId: "InitProducerId", + offsetForLeaderEpoch: "OffsetForLeaderEpoch", + addPartitionsToTxn: "AddPartitionsToTxn", + addOffsetsToTxn: "AddOffsetsToTxn", + endTxn: "EndTxn", + writeTxnMarkers: "WriteTxnMarkers", + txnOffsetCommit: "TxnOffsetCommit", + describeAcls: "DescribeAcls", + createAcls: "CreateAcls", + deleteAcls: "DeleteAcls", + describeConfigs: "DescribeConfigs", + alterConfigs: "AlterConfigs", + alterReplicaLogDirs: "AlterReplicaLogDirs", + describeLogDirs: "DescribeLogDirs", + saslAuthenticate: "SaslAuthenticate", + createPartitions: "CreatePartitions", + createDelegationToken: "CreateDelegationToken", + renewDelegationToken: "RenewDelegationToken", + expireDelegationToken: "ExpireDelegationToken", + describeDelegationToken: "DescribeDelegationToken", + deleteGroups: "DeleteGroups", + electLeaders: "ElectLeaders", + incrementalAlterConfigs: "IncrementalAlfterConfigs", + alterPartitionReassignments: "AlterPartitionReassignments", + listPartitionReassignments: "ListPartitionReassignments", + offsetDelete: "OffsetDelete", +} + +type requestHeader struct { + Size int32 + ApiKey int16 + ApiVersion int16 + CorrelationID int32 + ClientID string +} + +func (h requestHeader) size() int32 { + return 4 + 2 + 2 + 4 + sizeofString(h.ClientID) +} + +func (h requestHeader) writeTo(wb *writeBuffer) { + wb.writeInt32(h.Size) + wb.writeInt16(h.ApiKey) + wb.writeInt16(h.ApiVersion) + wb.writeInt32(h.CorrelationID) + wb.writeString(h.ClientID) +} + +type request interface { + size() int32 + writable +} + +func makeInt8(b []byte) int8 { + return int8(b[0]) +} + +func makeInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func makeInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func makeInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func expectZeroSize(sz int, err error) error { + if err == nil && sz != 0 { + err = fmt.Errorf("reading a response left %d unread bytes", sz) + } + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go b/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go new file mode 100644 index 000000000..390e0db43 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go @@ -0,0 +1,35 @@ +package addoffsetstotxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + GroupID string `kafka:"min=v0,max=v3|min=v3,max=v3,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go b/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go new file mode 100644 index 000000000..b204da350 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go @@ -0,0 +1,62 @@ +package addpartitionstotxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + Topics []RequestTopic `kafka:"min=v0,max=v3"` +} + +type RequestTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []int32 `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Results []ResponseResult `kafka:"min=v0,max=v3"` +} + +type ResponseResult struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Results []ResponsePartition `kafka:"min=v0,max=v3"` +} + +type ResponsePartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + PartitionIndex int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go b/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go new file mode 100644 index 000000000..c657d92ac --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go @@ -0,0 +1,68 @@ +package alterclientquotas + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterClientQuotas +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entries []Entry `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Entry struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entities []Entity `kafka:"min=v0,max=v1"` + Ops []Ops `kafka:"min=v0,max=v1"` +} + +type Entity struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` +} + +type Ops struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + Value float64 `kafka:"min=v0,max=v1"` + Remove bool `kafka:"min=v0,max=v1"` +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Results []ResponseQuotas `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas } + +type ResponseQuotas struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + Entities []Entity `kafka:"min=v0,max=v1"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go new file mode 100644 index 000000000..6c7d0d5db --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go @@ -0,0 +1,48 @@ +package alterconfigs + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterConfigs +type Request struct { + Resources []RequestResources `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestResources struct { + ResourceType int8 `kafka:"min=v0,max=v1"` + ResourceName string `kafka:"min=v0,max=v1"` + Configs []RequestConfig `kafka:"min=v0,max=v1"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v1"` + Value string `kafka:"min=v0,max=v1,nullable"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Responses []ResponseResponses `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } + +type ResponseResponses struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + ResourceType int8 `kafka:"min=v0,max=v1"` + ResourceName string `kafka:"min=v0,max=v1"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go new file mode 100644 index 000000000..7f8d2ed2f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go @@ -0,0 +1,61 @@ +package alterpartitionreassignments + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterPartitionReassignments +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + TimeoutMs int32 `kafka:"min=v0,max=v0"` + Topics []RequestTopic `kafka:"min=v0,max=v0"` +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []RequestPartition `kafka:"min=v0,max=v0"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + Replicas []int32 `kafka:"min=v0,max=v0,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.AlterPartitionReassignments +} + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + Results []ResponseResult `kafka:"min=v0,max=v0"` +} + +type ResponseResult struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []ResponsePartition `kafka:"min=v0,max=v0"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` +} + +func (r *Response) ApiKey() protocol.ApiKey { + return protocol.AlterPartitionReassignments +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alteruserscramcredentials/alteruserscramcredentials.go b/vendor/github.com/segmentio/kafka-go/protocol/alteruserscramcredentials/alteruserscramcredentials.go new file mode 100644 index 000000000..b5369be20 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alteruserscramcredentials/alteruserscramcredentials.go @@ -0,0 +1,66 @@ +package alteruserscramcredentials + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Deletions []RequestUserScramCredentialsDeletion `kafka:"min=v0,max=v0"` + Upsertions []RequestUserScramCredentialsUpsertion `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestUserScramCredentialsDeletion struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Name string `kafka:"min=v0,max=v0,compact"` + Mechanism int8 `kafka:"min=v0,max=v0"` +} + +type RequestUserScramCredentialsUpsertion struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Name string `kafka:"min=v0,max=v0,compact"` + Mechanism int8 `kafka:"min=v0,max=v0"` + Iterations int32 `kafka:"min=v0,max=v0"` + Salt []byte `kafka:"min=v0,max=v0,compact"` + SaltedPassword []byte `kafka:"min=v0,max=v0,compact"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + Results []ResponseUserScramCredentials `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterUserScramCredentials } + +type ResponseUserScramCredentials struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + User string `kafka:"min=v0,max=v0,compact"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go b/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go new file mode 100644 index 000000000..1c5745582 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go @@ -0,0 +1,27 @@ +package apiversions + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + _ struct{} `kafka:"min=v0,max=v2"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ApiVersions } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v2"` + ApiKeys []ApiKeyResponse `kafka:"min=v0,max=v2"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ApiVersions } + +type ApiKeyResponse struct { + ApiKey int16 `kafka:"min=v0,max=v2"` + MinVersion int16 `kafka:"min=v0,max=v2"` + MaxVersion int16 `kafka:"min=v0,max=v2"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/buffer.go b/vendor/github.com/segmentio/kafka-go/protocol/buffer.go new file mode 100644 index 000000000..d45a91dbd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/buffer.go @@ -0,0 +1,634 @@ +package protocol + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "sync" + "sync/atomic" +) + +// Bytes is an interface implemented by types that represent immutable +// sequences of bytes. +// +// Bytes values are used to abstract the location where record keys and +// values are read from (e.g. in-memory buffers, network sockets, files). +// +// The Close method should be called to release resources held by the object +// when the program is done with it. +// +// Bytes values are generally not safe to use concurrently from multiple +// goroutines. +type Bytes interface { + io.ReadCloser + // Returns the number of bytes remaining to be read from the payload. + Len() int +} + +// NewBytes constructs a Bytes value from b. +// +// The returned value references b, it does not make a copy of the backing +// array. +// +// If b is nil, nil is returned to represent a null BYTES value in the kafka +// protocol. +func NewBytes(b []byte) Bytes { + if b == nil { + return nil + } + r := new(bytesReader) + r.Reset(b) + return r +} + +// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the +// length of b to minimize the memory footprint. +// +// The function returns a nil slice if b is nil. +func ReadAll(b Bytes) ([]byte, error) { + if b == nil { + return nil, nil + } + s := make([]byte, b.Len()) + _, err := io.ReadFull(b, s) + return s, err +} + +type bytesReader struct{ bytes.Reader } + +func (*bytesReader) Close() error { return nil } + +type refCount uintptr + +func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) } + +func (rc *refCount) unref(onZero func()) { + if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 { + onZero() + } +} + +const ( + // Size of the memory buffer for a single page. We use a farily + // large size here (64 KiB) because batches exchanged with kafka + // tend to be multiple kilobytes in size, sometimes hundreds. + // Using large pages amortizes the overhead of the page metadata + // and algorithms to manage the pages. + pageSize = 65536 +) + +type page struct { + refc refCount + offset int64 + length int + buffer *[pageSize]byte +} + +func newPage(offset int64) *page { + p, _ := pagePool.Get().(*page) + if p != nil { + p.offset = offset + p.length = 0 + p.ref() + } else { + p = &page{ + refc: 1, + offset: offset, + buffer: &[pageSize]byte{}, + } + } + return p +} + +func (p *page) ref() { p.refc.ref() } + +func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) } + +func (p *page) slice(begin, end int64) []byte { + i, j := begin-p.offset, end-p.offset + + if i < 0 { + i = 0 + } else if i > pageSize { + i = pageSize + } + + if j < 0 { + j = 0 + } else if j > pageSize { + j = pageSize + } + + if i < j { + return p.buffer[i:j] + } + + return nil +} + +func (p *page) Cap() int { return pageSize } + +func (p *page) Len() int { return p.length } + +func (p *page) Size() int64 { return int64(p.length) } + +func (p *page) Truncate(n int) { + if n < p.length { + p.length = n + } +} + +func (p *page) ReadAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + if off > int64(p.length) { + return 0, nil + } + return copy(b, p.buffer[off:p.length]), nil +} + +func (p *page) ReadFrom(r io.Reader) (int64, error) { + n, err := io.ReadFull(r, p.buffer[p.length:]) + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + err = nil + } + p.length += n + return int64(n), err +} + +func (p *page) WriteAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + n := copy(p.buffer[off:], b) + if end := int(off) + n; end > p.length { + p.length = end + } + return n, nil +} + +func (p *page) Write(b []byte) (int, error) { + return p.WriteAt(b, p.offset+int64(p.length)) +} + +var ( + _ io.ReaderAt = (*page)(nil) + _ io.ReaderFrom = (*page)(nil) + _ io.Writer = (*page)(nil) + _ io.WriterAt = (*page)(nil) +) + +type pageBuffer struct { + refc refCount + pages contiguousPages + length int + cursor int +} + +func newPageBuffer() *pageBuffer { + b, _ := pageBufferPool.Get().(*pageBuffer) + if b != nil { + b.cursor = 0 + b.refc.ref() + } else { + b = &pageBuffer{ + refc: 1, + pages: make(contiguousPages, 0, 16), + } + } + return b +} + +func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) { + length := end - begin + + if length > math.MaxUint32 { + panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB") + } + + ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...) + ref.pages.ref() + ref.offset = begin + ref.length = uint32(length) +} + +func (pb *pageBuffer) ref(begin, end int64) *pageRef { + ref := new(pageRef) + pb.refTo(ref, begin, end) + return ref +} + +func (pb *pageBuffer) unref() { + pb.refc.unref(func() { + pb.pages.unref() + pb.pages.clear() + pb.pages = pb.pages[:0] + pb.length = 0 + pageBufferPool.Put(pb) + }) +} + +func (pb *pageBuffer) newPage() *page { + return newPage(int64(pb.length)) +} + +func (pb *pageBuffer) Close() error { + return nil +} + +func (pb *pageBuffer) Len() int { + return pb.length - pb.cursor +} + +func (pb *pageBuffer) Size() int64 { + return int64(pb.length) +} + +func (pb *pageBuffer) Discard(n int) (int, error) { + remain := pb.length - pb.cursor + if remain < n { + n = remain + } + pb.cursor += n + return n, nil +} + +func (pb *pageBuffer) Truncate(n int) { + if n < pb.length { + pb.length = n + + if n < pb.cursor { + pb.cursor = n + } + + for i := range pb.pages { + if p := pb.pages[i]; p.length <= n { + n -= p.length + } else { + if n > 0 { + pb.pages[i].Truncate(n) + i++ + } + pb.pages[i:].unref() + pb.pages[i:].clear() + pb.pages = pb.pages[:i] + break + } + } + } +} + +func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) { + c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence) + if err != nil { + return -1, err + } + pb.cursor = int(c) + return c, nil +} + +func (pb *pageBuffer) ReadByte() (byte, error) { + b := [1]byte{} + _, err := pb.Read(b[:]) + return b[0], err +} + +func (pb *pageBuffer) Read(b []byte) (int, error) { + if pb.cursor >= pb.length { + return 0, io.EOF + } + n, err := pb.ReadAt(b, int64(pb.cursor)) + pb.cursor += n + return n, err +} + +func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) { + return pb.pages.ReadAt(b, off) +} + +func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) { + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + rn := int64(0) + + for { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if free == 0 { + tail = pb.newPage() + free = pageSize + pb.pages = append(pb.pages, tail) + } + + n, err := tail.ReadFrom(r) + pb.length += int(n) + rn += n + if n < int64(free) { + return rn, err + } + } +} + +func (pb *pageBuffer) WriteString(s string) (int, error) { + return pb.Write([]byte(s)) +} + +func (pb *pageBuffer) Write(b []byte) (int, error) { + wn := len(b) + if wn == 0 { + return 0, nil + } + + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + for len(b) != 0 { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if len(b) <= free { + tail.Write(b) + pb.length += len(b) + break + } + + tail.Write(b[:free]) + b = b[free:] + + pb.length += free + pb.pages = append(pb.pages, pb.newPage()) + } + + return wn, nil +} + +func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) { + n, err := pb.pages.WriteAt(b, off) + if err != nil { + return n, err + } + if n < len(b) { + pb.Write(b[n:]) + } + return len(b), nil +} + +func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) { + var wn int + var err error + pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += n + return err == nil + }) + pb.cursor += wn + return int64(wn), err +} + +var ( + _ io.ReaderAt = (*pageBuffer)(nil) + _ io.ReaderFrom = (*pageBuffer)(nil) + _ io.StringWriter = (*pageBuffer)(nil) + _ io.Writer = (*pageBuffer)(nil) + _ io.WriterAt = (*pageBuffer)(nil) + _ io.WriterTo = (*pageBuffer)(nil) + + pagePool sync.Pool + pageBufferPool sync.Pool +) + +type contiguousPages []*page + +func (pages contiguousPages) ref() { + for _, p := range pages { + p.ref() + } +} + +func (pages contiguousPages) unref() { + for _, p := range pages { + p.unref() + } +} + +func (pages contiguousPages) clear() { + for i := range pages { + pages[i] = nil + } +} + +func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) { + rn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.ReadAt(b, off) + b = b[n:] + rn += n + off += int64(n) + } + + return rn, nil +} + +func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) { + wn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.WriteAt(b, off) + b = b[n:] + wn += n + off += int64(n) + } + + return wn, nil +} + +func (pages contiguousPages) slice(begin, end int64) contiguousPages { + i := pages.indexOf(begin) + j := pages.indexOf(end) + if j < len(pages) { + j++ + } + return pages[i:j] +} + +func (pages contiguousPages) indexOf(offset int64) int { + if len(pages) == 0 { + return 0 + } + return int((offset - pages[0].offset) / pageSize) +} + +func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) { + for _, p := range pages.slice(begin, end) { + if !f(p.slice(begin, end)) { + break + } + } +} + +var ( + _ io.ReaderAt = contiguousPages{} + _ io.WriterAt = contiguousPages{} +) + +type pageRef struct { + buffer [2]*page + pages contiguousPages + offset int64 + cursor int64 + length uint32 + once uint32 +} + +func (ref *pageRef) unref() { + if atomic.CompareAndSwapUint32(&ref.once, 0, 1) { + ref.pages.unref() + ref.pages.clear() + ref.pages = nil + ref.offset = 0 + ref.cursor = 0 + ref.length = 0 + } +} + +func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) } + +func (ref *pageRef) Size() int64 { return int64(ref.length) } + +func (ref *pageRef) Close() error { ref.unref(); return nil } + +func (ref *pageRef) String() string { + return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length) +} + +func (ref *pageRef) Seek(offset int64, whence int) (int64, error) { + c, err := seek(ref.cursor, int64(ref.length), offset, whence) + if err != nil { + return -1, err + } + ref.cursor = c + return c, nil +} + +func (ref *pageRef) ReadByte() (byte, error) { + var c byte + var ok bool + ref.scan(ref.cursor, func(b []byte) bool { + c, ok = b[0], true + return false + }) + if ok { + ref.cursor++ + } else { + return 0, io.EOF + } + return c, nil +} + +func (ref *pageRef) Read(b []byte) (int, error) { + if ref.cursor >= int64(ref.length) { + return 0, io.EOF + } + n, err := ref.ReadAt(b, ref.cursor) + ref.cursor += int64(n) + return n, err +} + +func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) { + limit := ref.offset + int64(ref.length) + off += ref.offset + + if off >= limit { + return 0, io.EOF + } + + if off+int64(len(b)) > limit { + b = b[:limit-off] + } + + if len(b) == 0 { + return 0, nil + } + + n, err := ref.pages.ReadAt(b, off) + if n == 0 && err == nil { + err = io.EOF + } + return n, err +} + +func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) { + ref.scan(ref.cursor, func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += int64(n) + return err == nil + }) + ref.cursor += wn + return +} + +func (ref *pageRef) scan(off int64, f func([]byte) bool) { + begin := ref.offset + off + end := ref.offset + int64(ref.length) + ref.pages.scan(begin, end, f) +} + +var ( + _ io.Closer = (*pageRef)(nil) + _ io.Seeker = (*pageRef)(nil) + _ io.Reader = (*pageRef)(nil) + _ io.ReaderAt = (*pageRef)(nil) + _ io.WriterTo = (*pageRef)(nil) +) + +type pageRefAllocator struct { + refs []pageRef + head int + size int +} + +func (a *pageRefAllocator) newPageRef() *pageRef { + if a.head == len(a.refs) { + a.refs = make([]pageRef, a.size) + a.head = 0 + } + ref := &a.refs[a.head] + a.head++ + return ref +} + +func seek(cursor, limit, offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + // absolute offset + case io.SeekCurrent: + offset = cursor + offset + case io.SeekEnd: + offset = limit - offset + default: + return -1, fmt.Errorf("seek: invalid whence value: %d", whence) + } + if offset < 0 { + offset = 0 + } + if offset > limit { + offset = limit + } + return offset, nil +} + +func closeBytes(b Bytes) { + if b != nil { + b.Close() + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/cluster.go b/vendor/github.com/segmentio/kafka-go/protocol/cluster.go new file mode 100644 index 000000000..5dd3455ad --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/cluster.go @@ -0,0 +1,143 @@ +package protocol + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" +) + +type Cluster struct { + ClusterID string + Controller int32 + Brokers map[int32]Broker + Topics map[string]Topic +} + +func (c Cluster) BrokerIDs() []int32 { + brokerIDs := make([]int32, 0, len(c.Brokers)) + for id := range c.Brokers { + brokerIDs = append(brokerIDs, id) + } + sort.Slice(brokerIDs, func(i, j int) bool { + return brokerIDs[i] < brokerIDs[j] + }) + return brokerIDs +} + +func (c Cluster) TopicNames() []string { + topicNames := make([]string, 0, len(c.Topics)) + for name := range c.Topics { + topicNames = append(topicNames, name) + } + sort.Strings(topicNames) + return topicNames +} + +func (c Cluster) IsZero() bool { + return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0 +} + +func (c Cluster) Format(w fmt.State, _ rune) { + tw := new(tabwriter.Writer) + fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n") + + for _, id := range c.BrokerIDs() { + broker := c.Brokers[id] + fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller) + } + + tw.Flush() + fmt.Fprintln(w) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n") + topicNames := c.TopicNames() + brokers := make(map[int32]struct{}, len(c.Brokers)) + brokerIDs := make([]int32, 0, len(c.Brokers)) + + for _, name := range topicNames { + topic := c.Topics[name] + + for _, p := range topic.Partitions { + for _, id := range p.Replicas { + brokers[id] = struct{}{} + } + } + + for id := range brokers { + brokerIDs = append(brokerIDs, id) + } + + fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1)) + + for id := range brokers { + delete(brokers, id) + } + + brokerIDs = brokerIDs[:0] + } + + tw.Flush() + fmt.Fprintln(w) + + if w.Flag('+') { + for _, name := range topicNames { + fmt.Fprintf(w, " TOPIC: %q\n\n", name) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n") + + for _, p := range c.Topics[name].Partitions { + fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID, + formatBrokerIDs(p.Replicas, -1), + formatBrokerIDs(p.ISR, p.Leader), + formatBrokerIDs(p.Offline, -1), + ) + } + + tw.Flush() + fmt.Fprintln(w) + } + } +} + +func formatBrokerIDs(brokerIDs []int32, leader int32) string { + if len(brokerIDs) == 0 { + return "" + } + + if len(brokerIDs) == 1 { + return itoa(brokerIDs[0]) + } + + sort.Slice(brokerIDs, func(i, j int) bool { + id1 := brokerIDs[i] + id2 := brokerIDs[j] + + if id1 == leader { + return true + } + + if id2 == leader { + return false + } + + return id1 < id2 + }) + + brokerNames := make([]string, len(brokerIDs)) + + for i, id := range brokerIDs { + brokerNames[i] = itoa(id) + } + + return strings.Join(brokerNames, ",") +} + +var ( + _ fmt.Formatter = Cluster{} +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/conn.go b/vendor/github.com/segmentio/kafka-go/protocol/conn.go new file mode 100644 index 000000000..d08a577f6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/conn.go @@ -0,0 +1,100 @@ +package protocol + +import ( + "bufio" + "fmt" + "net" + "sync/atomic" + "time" +) + +type Conn struct { + buffer *bufio.Reader + conn net.Conn + clientID string + idgen int32 + versions atomic.Value // map[ApiKey]int16 +} + +func NewConn(conn net.Conn, clientID string) *Conn { + return &Conn{ + buffer: bufio.NewReader(conn), + conn: conn, + clientID: clientID, + } +} + +func (c *Conn) String() string { + return fmt.Sprintf("kafka://%s@%s->%s", c.clientID, c.LocalAddr(), c.RemoteAddr()) +} + +func (c *Conn) Close() error { + return c.conn.Close() +} + +func (c *Conn) Discard(n int) (int, error) { + return c.buffer.Discard(n) +} + +func (c *Conn) Peek(n int) ([]byte, error) { + return c.buffer.Peek(n) +} + +func (c *Conn) Read(b []byte) (int, error) { + return c.buffer.Read(b) +} + +func (c *Conn) Write(b []byte) (int, error) { + return c.conn.Write(b) +} + +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *Conn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +func (c *Conn) SetVersions(versions map[ApiKey]int16) { + connVersions := make(map[ApiKey]int16, len(versions)) + + for k, v := range versions { + connVersions[k] = v + } + + c.versions.Store(connVersions) +} + +func (c *Conn) RoundTrip(msg Message) (Message, error) { + correlationID := atomic.AddInt32(&c.idgen, +1) + versions, _ := c.versions.Load().(map[ApiKey]int16) + apiVersion := versions[msg.ApiKey()] + + if p, _ := msg.(PreparedMessage); p != nil { + p.Prepare(apiVersion) + } + + if raw, ok := msg.(RawExchanger); ok && raw.Required(versions) { + return raw.RawExchange(c) + } + + return RoundTrip(c, apiVersion, correlationID, c.clientID, msg) +} + +var ( + _ net.Conn = (*Conn)(nil) + _ bufferedReader = (*Conn)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go b/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go new file mode 100644 index 000000000..ab643105d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go @@ -0,0 +1,21 @@ +package consumer + +const MaxVersionSupported = 1 + +type Subscription struct { + Version int16 `kafka:"min=v0,max=v1"` + Topics []string `kafka:"min=v0,max=v1"` + UserData []byte `kafka:"min=v0,max=v1,nullable"` + OwnedPartitions []TopicPartition `kafka:"min=v1,max=v1"` +} + +type Assignment struct { + Version int16 `kafka:"min=v0,max=v1"` + AssignedPartitions []TopicPartition `kafka:"min=v0,max=v1"` + UserData []byte `kafka:"min=v0,max=v1,nullable"` +} + +type TopicPartition struct { + Topic string `kafka:"min=v0,max=v1"` + Partitions []int32 `kafka:"min=v0,max=v1"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go b/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go new file mode 100644 index 000000000..aad0cc07c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go @@ -0,0 +1,57 @@ +package createacls + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + Creations []RequestACLs `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestACLs struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + ResourcePatternType int8 `kafka:"min=v1,max=v3"` + Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Operation int8 `kafka:"min=v0,max=v3"` + PermissionType int8 `kafka:"min=v0,max=v3"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Results []ResponseACLs `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls } + +type ResponseACLs struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go b/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go new file mode 100644 index 000000000..4b86b4408 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go @@ -0,0 +1,46 @@ +package createpartitions + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_CreatePartitions. +// TODO: Support version 2. +type Request struct { + Topics []RequestTopic `kafka:"min=v0,max=v1"` + TimeoutMs int32 `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v1"` + Count int32 `kafka:"min=v0,max=v1"` + Assignments []RequestAssignment `kafka:"min=v0,max=v1,nullable"` +} + +type RequestAssignment struct { + BrokerIDs []int32 `kafka:"min=v0,max=v1"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Results []ResponseResult `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } + +type ResponseResult struct { + Name string `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go b/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go new file mode 100644 index 000000000..62c597fb1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go @@ -0,0 +1,74 @@ +package createtopics + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v5+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v5,max=v5,tag"` + + Topics []RequestTopic `kafka:"min=v0,max=v5"` + TimeoutMs int32 `kafka:"min=v0,max=v5"` + ValidateOnly bool `kafka:"min=v1,max=v5"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateTopics } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v5"` + NumPartitions int32 `kafka:"min=v0,max=v5"` + ReplicationFactor int16 `kafka:"min=v0,max=v5"` + Assignments []RequestAssignment `kafka:"min=v0,max=v5"` + Configs []RequestConfig `kafka:"min=v0,max=v5"` +} + +type RequestAssignment struct { + PartitionIndex int32 `kafka:"min=v0,max=v5"` + BrokerIDs []int32 `kafka:"min=v0,max=v5"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v5"` + Value string `kafka:"min=v0,max=v5,nullable"` +} + +type Response struct { + // We need at least one tagged field to indicate that v5+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v5,max=v5,tag"` + + ThrottleTimeMs int32 `kafka:"min=v2,max=v5"` + Topics []ResponseTopic `kafka:"min=v0,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateTopics } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v5"` + ErrorCode int16 `kafka:"min=v0,max=v5"` + ErrorMessage string `kafka:"min=v1,max=v5,nullable"` + NumPartitions int32 `kafka:"min=v5,max=v5"` + ReplicationFactor int16 `kafka:"min=v5,max=v5"` + + Configs []ResponseTopicConfig `kafka:"min=v5,max=v5"` +} + +type ResponseTopicConfig struct { + Name string `kafka:"min=v5,max=v5"` + Value string `kafka:"min=v5,max=v5,nullable"` + ReadOnly bool `kafka:"min=v5,max=v5"` + ConfigSource int8 `kafka:"min=v5,max=v5"` + IsSensitive bool `kafka:"min=v5,max=v5"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/decode.go b/vendor/github.com/segmentio/kafka-go/protocol/decode.go new file mode 100644 index 000000000..5bf61ffa4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/decode.go @@ -0,0 +1,537 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math" + "reflect" + "sync" + "sync/atomic" +) + +type discarder interface { + Discard(int) (int, error) +} + +type decoder struct { + reader io.Reader + remain int + buffer [8]byte + err error + table *crc32.Table + crc32 uint32 +} + +func (d *decoder) Reset(r io.Reader, n int) { + d.reader = r + d.remain = n + d.buffer = [8]byte{} + d.err = nil + d.table = nil + d.crc32 = 0 +} + +func (d *decoder) Read(b []byte) (int, error) { + if d.err != nil { + return 0, d.err + } + if d.remain == 0 { + return 0, io.EOF + } + if len(b) > d.remain { + b = b[:d.remain] + } + n, err := d.reader.Read(b) + if n > 0 && d.table != nil { + d.crc32 = crc32.Update(d.crc32, d.table, b[:n]) + } + d.remain -= n + return n, err +} + +func (d *decoder) ReadByte() (byte, error) { + c := d.readByte() + return c, d.err +} + +func (d *decoder) done() bool { + return d.remain == 0 || d.err != nil +} + +func (d *decoder) setCRC(table *crc32.Table) { + d.table, d.crc32 = table, 0 +} + +func (d *decoder) decodeBool(v value) { + v.setBool(d.readBool()) +} + +func (d *decoder) decodeInt8(v value) { + v.setInt8(d.readInt8()) +} + +func (d *decoder) decodeInt16(v value) { + v.setInt16(d.readInt16()) +} + +func (d *decoder) decodeInt32(v value) { + v.setInt32(d.readInt32()) +} + +func (d *decoder) decodeInt64(v value) { + v.setInt64(d.readInt64()) +} + +func (d *decoder) decodeFloat64(v value) { + v.setFloat64(d.readFloat64()) +} + +func (d *decoder) decodeString(v value) { + v.setString(d.readString()) +} + +func (d *decoder) decodeCompactString(v value) { + v.setString(d.readCompactString()) +} + +func (d *decoder) decodeBytes(v value) { + v.setBytes(d.readBytes()) +} + +func (d *decoder) decodeCompactBytes(v value) { + v.setBytes(d.readCompactBytes()) +} + +func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readInt32(); n < 0 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n)) + for i := 0; i < int(n) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readUnsignedVarInt(); n < 1 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n-1)) + for i := 0; i < int(n-1) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) discardAll() { + d.discard(d.remain) +} + +func (d *decoder) discard(n int) { + if n > d.remain { + n = d.remain + } + var err error + if r, _ := d.reader.(discarder); r != nil { + n, err = r.Discard(n) + d.remain -= n + } else { + _, err = io.Copy(ioutil.Discard, d) + } + d.setError(err) +} + +func (d *decoder) read(n int) []byte { + b := make([]byte, n) + n, err := io.ReadFull(d, b) + b = b[:n] + d.setError(err) + return b +} + +func (d *decoder) writeTo(w io.Writer, n int) { + limit := d.remain + if n < limit { + d.remain = n + } + c, err := io.Copy(w, d) + if int(c) < n && err == nil { + err = io.ErrUnexpectedEOF + } + d.remain = limit - int(c) + d.setError(err) +} + +func (d *decoder) setError(err error) { + if d.err == nil && err != nil { + d.err = err + d.discardAll() + } +} + +func (d *decoder) readFull(b []byte) bool { + n, err := io.ReadFull(d, b) + d.setError(err) + return n == len(b) +} + +func (d *decoder) readByte() byte { + if d.readFull(d.buffer[:1]) { + return d.buffer[0] + } + return 0 +} + +func (d *decoder) readBool() bool { + return d.readByte() != 0 +} + +func (d *decoder) readInt8() int8 { + if d.readFull(d.buffer[:1]) { + return readInt8(d.buffer[:1]) + } + return 0 +} + +func (d *decoder) readInt16() int16 { + if d.readFull(d.buffer[:2]) { + return readInt16(d.buffer[:2]) + } + return 0 +} + +func (d *decoder) readInt32() int32 { + if d.readFull(d.buffer[:4]) { + return readInt32(d.buffer[:4]) + } + return 0 +} + +func (d *decoder) readInt64() int64 { + if d.readFull(d.buffer[:8]) { + return readInt64(d.buffer[:8]) + } + return 0 +} + +func (d *decoder) readFloat64() float64 { + if d.readFull(d.buffer[:8]) { + return readFloat64(d.buffer[:8]) + } + return 0 +} + +func (d *decoder) readString() string { + if n := d.readInt16(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readVarString() string { + if n := d.readVarInt(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readCompactString() string { + if n := d.readUnsignedVarInt(); n < 1 { + return "" + } else { + return bytesToString(d.read(int(n - 1))) + } +} + +func (d *decoder) readBytes() []byte { + if n := d.readInt32(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readVarBytes() []byte { + if n := d.readVarInt(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readCompactBytes() []byte { + if n := d.readUnsignedVarInt(); n < 1 { + return nil + } else { + return d.read(int(n - 1)) + } +} + +func (d *decoder) readVarInt() int64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return int64(x>>1) ^ -(int64(x) & 1) + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode varint from input stream")) + return 0 +} + +func (d *decoder) readUnsignedVarInt() uint64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return x + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode unsigned varint from input stream")) + return 0 +} + +type decodeFunc func(*decoder, value) + +var ( + _ io.Reader = (*decoder)(nil) + _ io.ByteReader = (*decoder)(nil) + + readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem() +) + +func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + if reflect.PtrTo(typ).Implements(readerFrom) { + return readerDecodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*decoder).decodeBool + case reflect.Int8: + return (*decoder).decodeInt8 + case reflect.Int16: + return (*decoder).decodeInt16 + case reflect.Int32: + return (*decoder).decodeInt32 + case reflect.Int64: + return (*decoder).decodeInt64 + case reflect.Float64: + return (*decoder).decodeFloat64 + case reflect.String: + return stringDecodeFuncOf(flexible, tag) + case reflect.Struct: + return structDecodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesDecodeFuncOf(flexible, tag) + } + return arrayDecodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all strings are compact + return (*decoder).decodeCompactString + } + return (*decoder).decodeString +} + +func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all arrays are compact + return (*decoder).decodeCompactBytes + } + return (*decoder).decodeBytes +} + +func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc { + type field struct { + decode decodeFunc + index index + tagID int + } + + var fields []field + taggedFields := map[int]*field{} + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + decode: decodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields[tag.TagID] = &f + } + return false + } + return true + }) + }) + + return func(d *decoder, v value) { + for i := range fields { + f := &fields[i] + f.decode(d, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + n := int(d.readUnsignedVarInt()) + + for i := 0; i < n; i++ { + tagID := int(d.readUnsignedVarInt()) + size := int(d.readUnsignedVarInt()) + + f, ok := taggedFields[tagID] + if ok { + f.decode(d, v.fieldByIndex(f.index)) + } else { + d.read(size) + } + } + } + } +} + +func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + elemType := typ.Elem() + elemFunc := decodeFuncOf(elemType, version, flexible, tag) + if flexible { + // In flexible messages, all arrays are compact + return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) } + } + + return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) } +} + +func readerDecodeFuncOf(typ reflect.Type) decodeFunc { + typ = reflect.PtrTo(typ) + return func(d *decoder, v value) { + if d.err == nil { + _, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d) + if err != nil { + d.setError(err) + } + } + } +} + +func readInt8(b []byte) int8 { + return int8(b[0]) +} + +func readInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func readInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func readInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func readFloat64(b []byte) float64 { + return math.Float64frombits(binary.BigEndian.Uint64(b)) +} + +func Unmarshal(data []byte, version int16, value interface{}) error { + typ := elemTypeOf(value) + cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc) + key := versionedType{typ: typ, version: version} + decode := cache[key] + + if decode == nil { + decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]decodeFunc, len(cache)+1) + newCache[key] = decode + + for typ, fun := range cache { + newCache[typ] = fun + } + + unmarshalers.Store(newCache) + } + + d, _ := decoders.Get().(*decoder) + if d == nil { + d = &decoder{reader: bytes.NewReader(nil)} + } + + d.remain = len(data) + r, _ := d.reader.(*bytes.Reader) + r.Reset(data) + + defer func() { + r.Reset(nil) + d.Reset(r, 0) + decoders.Put(d) + }() + + decode(d, valueOf(value)) + return dontExpectEOF(d.err) +} + +var ( + decoders sync.Pool // *decoder + unmarshalers atomic.Value // map[versionedType]decodeFunc +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/deleteacls/deleteacls.go b/vendor/github.com/segmentio/kafka-go/protocol/deleteacls/deleteacls.go new file mode 100644 index 000000000..7f0f002f3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/deleteacls/deleteacls.go @@ -0,0 +1,74 @@ +package deleteacls + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + Filters []RequestFilter `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteAcls } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestFilter struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ResourceTypeFilter int8 `kafka:"min=v0,max=v3"` + ResourceNameFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + ResourcePatternTypeFilter int8 `kafka:"min=v1,max=v3"` + PrincipalFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + HostFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + Operation int8 `kafka:"min=v0,max=v3"` + PermissionType int8 `kafka:"min=v0,max=v3"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + FilterResults []FilterResult `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteAcls } + +type FilterResult struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + MatchingACLs []MatchingACL `kafka:"min=v0,max=v3"` +} + +type MatchingACL struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + ResourcePatternType int8 `kafka:"min=v1,max=v3"` + Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Operation int8 `kafka:"min=v0,max=v3"` + PermissionType int8 `kafka:"min=v0,max=v3"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go b/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go new file mode 100644 index 000000000..759dfc2fe --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go @@ -0,0 +1,45 @@ +package deletegroups + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + GroupIDs []string `kafka:"min=v0,max=v2"` +} + +func (r *Request) Group() string { + // use first group to determine group coordinator + if len(r.GroupIDs) > 0 { + return r.GroupIDs[0] + } + return "" +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v2"` + Responses []ResponseGroup `kafka:"min=v0,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } + +type ResponseGroup struct { + GroupID string `kafka:"min=v0,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go b/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go new file mode 100644 index 000000000..3af5a0014 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go @@ -0,0 +1,34 @@ +package deletetopics + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TopicNames []string `kafka:"min=v0,max=v3"` + TimeoutMs int32 `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v3"` + Responses []ResponseTopic `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeacls/describeacls.go b/vendor/github.com/segmentio/kafka-go/protocol/describeacls/describeacls.go new file mode 100644 index 000000000..93a7d2ed7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeacls/describeacls.go @@ -0,0 +1,72 @@ +package describeacls + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + Filter ACLFilter `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeAcls } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type ACLFilter struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ResourceTypeFilter int8 `kafka:"min=v0,max=v3"` + ResourceNameFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + ResourcePatternTypeFilter int8 `kafka:"min=v1,max=v3"` + PrincipalFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + HostFilter string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + Operation int8 `kafka:"min=v0,max=v3"` + PermissionType int8 `kafka:"min=v0,max=v3"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable|min=v2,max=v3,nullable,compact"` + Resources []Resource `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeAcls } + +type Resource struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + PatternType int8 `kafka:"min=v1,max=v3"` + ACLs []ResponseACL `kafka:"min=v0,max=v3"` +} + +type ResponseACL struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v3,tag"` + + Principal string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Host string `kafka:"min=v0,max=v1|min=v2,max=v3,compact"` + Operation int8 `kafka:"min=v0,max=v3"` + PermissionType int8 `kafka:"min=v0,max=v3"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go b/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go new file mode 100644 index 000000000..e137776bf --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go @@ -0,0 +1,68 @@ +package describeclientquotas + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Components []Component `kafka:"min=v0,max=v1"` + Strict bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Component struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v1"` + MatchType int8 `kafka:"min=v0,max=v1"` + Match string `kafka:"min=v0,max=v1,nullable"` +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` + Entries []ResponseQuotas `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas } + +type Entity struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` +} + +type Value struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + Value float64 `kafka:"min=v0,max=v1"` +} + +type ResponseQuotas struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entities []Entity `kafka:"min=v0,max=v1"` + Values []Value `kafka:"min=v0,max=v1"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go new file mode 100644 index 000000000..09c91841f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go @@ -0,0 +1,129 @@ +package describeconfigs + +import ( + "strconv" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + resourceTypeBroker int8 = 4 +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeConfigs +type Request struct { + Resources []RequestResource `kafka:"min=v0,max=v3"` + IncludeSynonyms bool `kafka:"min=v1,max=v3"` + IncludeDocumentation bool `kafka:"min=v3,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Broker metadata requests must be sent to the associated broker + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokerID, err := strconv.Atoi(resource.ResourceName) + if err != nil { + return protocol.Broker{}, err + } + + return cluster.Brokers[int32(brokerID)], nil + } + } + + return cluster.Brokers[cluster.Controller], nil +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + topicsMessage := Request{} + + for _, resource := range r.Resources { + // Split out broker requests to separate brokers + if resource.ResourceType == resourceTypeBroker { + messages = append(messages, &Request{ + Resources: []RequestResource{resource}, + }) + } else { + topicsMessage.Resources = append( + topicsMessage.Resources, resource, + ) + } + } + + if len(topicsMessage.Resources) > 0 { + messages = append(messages, &topicsMessage) + } + + return messages, new(Response), nil +} + +type RequestResource struct { + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v3"` + ConfigNames []string `kafka:"min=v0,max=v3,nullable"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Resources []ResponseResource `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for _, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + response.Resources = append( + response.Resources, + m.(*Response).Resources..., + ) + } + + return response, nil +} + +type ResponseResource struct { + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v3,nullable"` + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v3"` + ConfigEntries []ResponseConfigEntry `kafka:"min=v0,max=v3"` +} + +type ResponseConfigEntry struct { + ConfigName string `kafka:"min=v0,max=v3"` + ConfigValue string `kafka:"min=v0,max=v3,nullable"` + ReadOnly bool `kafka:"min=v0,max=v3"` + IsDefault bool `kafka:"min=v0,max=v0"` + ConfigSource int8 `kafka:"min=v1,max=v3"` + IsSensitive bool `kafka:"min=v0,max=v3"` + ConfigSynonyms []ResponseConfigSynonym `kafka:"min=v1,max=v3"` + ConfigType int8 `kafka:"min=v3,max=v3"` + ConfigDocumentation string `kafka:"min=v3,max=v3,nullable"` +} + +type ResponseConfigSynonym struct { + ConfigName string `kafka:"min=v1,max=v3"` + ConfigValue string `kafka:"min=v1,max=v3,nullable"` + ConfigSource int8 `kafka:"min=v1,max=v3"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go new file mode 100644 index 000000000..a4d12048a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go @@ -0,0 +1,85 @@ +package describegroups + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups +type Request struct { + Groups []string `kafka:"min=v0,max=v4"` + IncludeAuthorizedOperations bool `kafka:"min=v3,max=v4"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups } + +func (r *Request) Group() string { + return r.Groups[0] +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + + // Split requests by group since they'll need to go to different coordinators. + for _, group := range r.Groups { + messages = append( + messages, + &Request{ + Groups: []string{group}, + IncludeAuthorizedOperations: r.IncludeAuthorizedOperations, + }, + ) + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v4"` + Groups []ResponseGroup `kafka:"min=v0,max=v4"` +} + +type ResponseGroup struct { + ErrorCode int16 `kafka:"min=v0,max=v4"` + GroupID string `kafka:"min=v0,max=v4"` + GroupState string `kafka:"min=v0,max=v4"` + ProtocolType string `kafka:"min=v0,max=v4"` + ProtocolData string `kafka:"min=v0,max=v4"` + Members []ResponseGroupMember `kafka:"min=v0,max=v4"` + AuthorizedOperations int32 `kafka:"min=v3,max=v4"` +} + +type ResponseGroupMember struct { + MemberID string `kafka:"min=v0,max=v4"` + GroupInstanceID string `kafka:"min=v4,max=v4,nullable"` + ClientID string `kafka:"min=v0,max=v4"` + ClientHost string `kafka:"min=v0,max=v4"` + MemberMetadata []byte `kafka:"min=v0,max=v4"` + MemberAssignment []byte `kafka:"min=v0,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for _, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + response.Groups = append(response.Groups, m.(*Response).Groups...) + } + + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeuserscramcredentials/describeuserscramcredentials.go b/vendor/github.com/segmentio/kafka-go/protocol/describeuserscramcredentials/describeuserscramcredentials.go new file mode 100644 index 000000000..e923b9a11 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeuserscramcredentials/describeuserscramcredentials.go @@ -0,0 +1,64 @@ +package describeuserscramcredentials + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Users []RequestUser `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestUser struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Name string `kafka:"min=v0,max=v0,compact"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + Results []ResponseResult `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeUserScramCredentials } + +type ResponseResult struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + User string `kafka:"min=v0,max=v0,compact"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + CredentialInfos []CredentialInfo `kafka:"min=v0,max=v0"` +} + +type CredentialInfo struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Mechanism int8 `kafka:"min=v0,max=v0"` + Iterations int32 `kafka:"min=v0,max=v0"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go b/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go new file mode 100644 index 000000000..cd36ff5d7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go @@ -0,0 +1,44 @@ +package electleaders + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ElectLeaders +type Request struct { + ElectionType int8 `kafka:"min=v1,max=v1"` + TopicPartitions []RequestTopicPartitions `kafka:"min=v0,max=v1"` + TimeoutMs int32 `kafka:"min=v0,max=v1"` +} + +type RequestTopicPartitions struct { + Topic string `kafka:"min=v0,max=v1"` + PartitionIDs []int32 `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTime int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v1,max=v1"` + ReplicaElectionResults []ResponseReplicaElectionResult `kafka:"min=v0,max=v1"` +} + +type ResponseReplicaElectionResult struct { + Topic string `kafka:"min=v0,max=v1"` + PartitionResults []ResponsePartitionResult `kafka:"min=v0,max=v1"` +} + +type ResponsePartitionResult struct { + PartitionID int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/encode.go b/vendor/github.com/segmentio/kafka-go/protocol/encode.go new file mode 100644 index 000000000..bd1633671 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/encode.go @@ -0,0 +1,606 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math" + "reflect" + "sync" + "sync/atomic" +) + +type encoder struct { + writer io.Writer + err error + table *crc32.Table + crc32 uint32 + buffer [32]byte +} + +type encoderChecksum struct { + reader io.Reader + encoder *encoder +} + +func (e *encoderChecksum) Read(b []byte) (int, error) { + n, err := e.reader.Read(b) + if n > 0 { + e.encoder.update(b[:n]) + } + return n, err +} + +func (e *encoder) Reset(w io.Writer) { + e.writer = w + e.err = nil + e.table = nil + e.crc32 = 0 + e.buffer = [32]byte{} +} + +func (e *encoder) ReadFrom(r io.Reader) (int64, error) { + if e.table != nil { + r = &encoderChecksum{ + reader: r, + encoder: e, + } + } + return io.Copy(e.writer, r) +} + +func (e *encoder) Write(b []byte) (int, error) { + if e.err != nil { + return 0, e.err + } + n, err := e.writer.Write(b) + if n > 0 { + e.update(b[:n]) + } + if err != nil { + e.err = err + } + return n, err +} + +func (e *encoder) WriteByte(b byte) error { + e.buffer[0] = b + _, err := e.Write(e.buffer[:1]) + return err +} + +func (e *encoder) WriteString(s string) (int, error) { + // This implementation is an optimization to avoid the heap allocation that + // would occur when converting the string to a []byte to call crc32.Update. + // + // Strings are rarely long in the kafka protocol, so the use of a 32 byte + // buffer is a good comprise between keeping the encoder value small and + // limiting the number of calls to Write. + // + // We introduced this optimization because memory profiles on the benchmarks + // showed that most heap allocations were caused by this code path. + n := 0 + + for len(s) != 0 { + c := copy(e.buffer[:], s) + w, err := e.Write(e.buffer[:c]) + n += w + if err != nil { + return n, err + } + s = s[c:] + } + + return n, nil +} + +func (e *encoder) setCRC(table *crc32.Table) { + e.table, e.crc32 = table, 0 +} + +func (e *encoder) update(b []byte) { + if e.table != nil { + e.crc32 = crc32.Update(e.crc32, e.table, b) + } +} + +func (e *encoder) encodeBool(v value) { + b := int8(0) + if v.bool() { + b = 1 + } + e.writeInt8(b) +} + +func (e *encoder) encodeInt8(v value) { + e.writeInt8(v.int8()) +} + +func (e *encoder) encodeInt16(v value) { + e.writeInt16(v.int16()) +} + +func (e *encoder) encodeInt32(v value) { + e.writeInt32(v.int32()) +} + +func (e *encoder) encodeInt64(v value) { + e.writeInt64(v.int64()) +} + +func (e *encoder) encodeFloat64(v value) { + e.writeFloat64(v.float64()) +} + +func (e *encoder) encodeString(v value) { + e.writeString(v.string()) +} + +func (e *encoder) encodeCompactString(v value) { + e.writeCompactString(v.string()) +} + +func (e *encoder) encodeNullString(v value) { + e.writeNullString(v.string()) +} + +func (e *encoder) encodeCompactNullString(v value) { + e.writeCompactNullString(v.string()) +} + +func (e *encoder) encodeBytes(v value) { + e.writeBytes(v.bytes()) +} + +func (e *encoder) encodeCompactBytes(v value) { + e.writeCompactBytes(v.bytes()) +} + +func (e *encoder) encodeNullBytes(v value) { + e.writeNullBytes(v.bytes()) +} + +func (e *encoder) encodeCompactNullBytes(v value) { + e.writeCompactNullBytes(v.bytes()) +} + +func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeInt32(-1) + return + } + + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeUnsignedVarInt(0) + return + } + + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) writeInt8(i int8) { + writeInt8(e.buffer[:1], i) + e.Write(e.buffer[:1]) +} + +func (e *encoder) writeInt16(i int16) { + writeInt16(e.buffer[:2], i) + e.Write(e.buffer[:2]) +} + +func (e *encoder) writeInt32(i int32) { + writeInt32(e.buffer[:4], i) + e.Write(e.buffer[:4]) +} + +func (e *encoder) writeInt64(i int64) { + writeInt64(e.buffer[:8], i) + e.Write(e.buffer[:8]) +} + +func (e *encoder) writeFloat64(f float64) { + writeFloat64(e.buffer[:8], f) + e.Write(e.buffer[:8]) +} + +func (e *encoder) writeString(s string) { + e.writeInt16(int16(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeVarString(s string) { + e.writeVarInt(int64(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeCompactString(s string) { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) +} + +func (e *encoder) writeNullString(s string) { + if s == "" { + e.writeInt16(-1) + } else { + e.writeInt16(int16(len(s))) + e.WriteString(s) + } +} + +func (e *encoder) writeCompactNullString(s string) { + if s == "" { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) + } +} + +func (e *encoder) writeBytes(b []byte) { + e.writeInt32(int32(len(b))) + e.Write(b) +} + +func (e *encoder) writeCompactBytes(b []byte) { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) +} + +func (e *encoder) writeNullBytes(b []byte) { + if b == nil { + e.writeInt32(-1) + } else { + e.writeInt32(int32(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeVarNullBytes(b []byte) { + if b == nil { + e.writeVarInt(-1) + } else { + e.writeVarInt(int64(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeCompactNullBytes(b []byte) { + if b == nil { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) + } +} + +func (e *encoder) writeNullBytesFrom(b Bytes) error { + if b == nil { + e.writeInt32(-1) + return nil + } else { + size := int64(b.Len()) + e.writeInt32(int32(size)) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarNullBytesFrom(b Bytes) error { + if b == nil { + e.writeVarInt(-1) + return nil + } else { + size := int64(b.Len()) + e.writeVarInt(size) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarInt(i int64) { + e.writeUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) +} + +func (e *encoder) writeUnsignedVarInt(i uint64) { + b := e.buffer[:] + n := 0 + + for i >= 0x80 && n < len(b) { + b[n] = byte(i) | 0x80 + i >>= 7 + n++ + } + + if n < len(b) { + b[n] = byte(i) + n++ + } + + e.Write(b[:n]) +} + +type encodeFunc func(*encoder, value) + +var ( + _ io.ReaderFrom = (*encoder)(nil) + _ io.Writer = (*encoder)(nil) + _ io.ByteWriter = (*encoder)(nil) + _ io.StringWriter = (*encoder)(nil) + + writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem() +) + +func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + if reflect.PtrTo(typ).Implements(writerTo) { + return writerEncodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*encoder).encodeBool + case reflect.Int8: + return (*encoder).encodeInt8 + case reflect.Int16: + return (*encoder).encodeInt16 + case reflect.Int32: + return (*encoder).encodeInt32 + case reflect.Int64: + return (*encoder).encodeInt64 + case reflect.Float64: + return (*encoder).encodeFloat64 + case reflect.String: + return stringEncodeFuncOf(flexible, tag) + case reflect.Struct: + return structEncodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesEncodeFuncOf(flexible, tag) + } + return arrayEncodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactNullString + case flexible: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactString + case tag.Nullable: + return (*encoder).encodeNullString + default: + return (*encoder).encodeString + } +} + +func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactNullBytes + case flexible: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactBytes + case tag.Nullable: + return (*encoder).encodeNullBytes + default: + return (*encoder).encodeBytes + } +} + +func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc { + type field struct { + encode encodeFunc + index index + tagID int + } + + var fields []field + var taggedFields []field + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + if typ.Size() != 0 { // skip struct{} + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + encode: encodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields = append(taggedFields, f) + } + return false + } + return true + }) + } + }) + + return func(e *encoder, v value) { + for i := range fields { + f := &fields[i] + f.encode(e, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + e.writeUnsignedVarInt(uint64(len(taggedFields))) + + for i := range taggedFields { + f := &taggedFields[i] + e.writeUnsignedVarInt(uint64(f.tagID)) + + buf := &bytes.Buffer{} + se := &encoder{writer: buf} + f.encode(se, v.fieldByIndex(f.index)) + e.writeUnsignedVarInt(uint64(buf.Len())) + e.Write(buf.Bytes()) + } + } + } +} + +func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + elemType := typ.Elem() + elemFunc := encodeFuncOf(elemType, version, flexible, tag) + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) } + case flexible: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) } + case tag.Nullable: + return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) } + default: + return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) } + } +} + +func writerEncodeFuncOf(typ reflect.Type) encodeFunc { + typ = reflect.PtrTo(typ) + return func(e *encoder, v value) { + // Optimization to write directly into the buffer when the encoder + // does no need to compute a crc32 checksum. + w := io.Writer(e) + if e.table == nil { + w = e.writer + } + _, err := v.iface(typ).(io.WriterTo).WriteTo(w) + if err != nil { + e.err = err + } + } +} + +func writeInt8(b []byte, i int8) { + b[0] = byte(i) +} + +func writeInt16(b []byte, i int16) { + binary.BigEndian.PutUint16(b, uint16(i)) +} + +func writeInt32(b []byte, i int32) { + binary.BigEndian.PutUint32(b, uint32(i)) +} + +func writeInt64(b []byte, i int64) { + binary.BigEndian.PutUint64(b, uint64(i)) +} + +func writeFloat64(b []byte, f float64) { + binary.BigEndian.PutUint64(b, math.Float64bits(f)) +} + +func Marshal(version int16, value interface{}) ([]byte, error) { + typ := typeOf(value) + cache, _ := marshalers.Load().(map[versionedType]encodeFunc) + key := versionedType{typ: typ, version: version} + encode := cache[key] + + if encode == nil { + encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]encodeFunc, len(cache)+1) + newCache[key] = encode + + for typ, fun := range cache { + newCache[typ] = fun + } + + marshalers.Store(newCache) + } + + e, _ := encoders.Get().(*encoder) + if e == nil { + e = &encoder{writer: new(bytes.Buffer)} + } + + b, _ := e.writer.(*bytes.Buffer) + defer func() { + b.Reset() + e.Reset(b) + encoders.Put(e) + }() + + encode(e, nonAddressableValueOf(value)) + + if e.err != nil { + return nil, e.err + } + + buf := b.Bytes() + out := make([]byte, len(buf)) + copy(out, buf) + return out, nil +} + +type versionedType struct { + typ _type + version int16 +} + +var ( + encoders sync.Pool // *encoder + marshalers atomic.Value // map[versionedType]encodeFunc +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go b/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go new file mode 100644 index 000000000..c3799cb88 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go @@ -0,0 +1,35 @@ +package endtxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + Committed bool `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.EndTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.EndTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/error.go b/vendor/github.com/segmentio/kafka-go/protocol/error.go new file mode 100644 index 000000000..52c5d0833 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/error.go @@ -0,0 +1,91 @@ +package protocol + +import ( + "fmt" +) + +// Error represents client-side protocol errors. +type Error string + +func (e Error) Error() string { return string(e) } + +func Errorf(msg string, args ...interface{}) Error { + return Error(fmt.Sprintf(msg, args...)) +} + +const ( + // ErrNoTopic is returned when a request needs to be sent to a specific. + ErrNoTopic Error = "topic not found" + + // ErrNoPartition is returned when a request needs to be sent to a specific + // partition, but the client did not find it in the cluster metadata. + ErrNoPartition Error = "topic partition not found" + + // ErrNoLeader is returned when a request needs to be sent to a partition + // leader, but the client could not determine what the leader was at this + // time. + ErrNoLeader Error = "topic partition has no leader" + + // ErrNoRecord is returned when attempting to write a message containing an + // empty record set (which kafka forbids). + // + // We handle this case client-side because kafka will close the connection + // that it received an empty produce request on, causing all concurrent + // requests to be aborted. + ErrNoRecord Error = "record set contains no records" + + // ErrNoReset is returned by ResetRecordReader when the record reader does + // not support being reset. + ErrNoReset Error = "record sequence does not support reset" +) + +type TopicError struct { + Topic string + Err error +} + +func NewTopicError(topic string, err error) *TopicError { + return &TopicError{Topic: topic, Err: err} +} + +func NewErrNoTopic(topic string) *TopicError { + return NewTopicError(topic, ErrNoTopic) +} + +func (e *TopicError) Error() string { + return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic) +} + +func (e *TopicError) Unwrap() error { + return e.Err +} + +type TopicPartitionError struct { + Topic string + Partition int32 + Err error +} + +func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError { + return &TopicPartitionError{ + Topic: topic, + Partition: partition, + Err: err, + } +} + +func NewErrNoPartition(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoPartition) +} + +func NewErrNoLeader(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoLeader) +} + +func (e *TopicPartitionError) Error() string { + return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition) +} + +func (e *TopicPartitionError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go b/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go new file mode 100644 index 000000000..6ce7bae1b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go @@ -0,0 +1,126 @@ +package fetch + +import ( + "fmt" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + ReplicaID int32 `kafka:"min=v0,max=v11"` + MaxWaitTime int32 `kafka:"min=v0,max=v11"` + MinBytes int32 `kafka:"min=v0,max=v11"` + MaxBytes int32 `kafka:"min=v3,max=v11"` + IsolationLevel int8 `kafka:"min=v4,max=v11"` + SessionID int32 `kafka:"min=v7,max=v11"` + SessionEpoch int32 `kafka:"min=v7,max=v11"` + Topics []RequestTopic `kafka:"min=v0,max=v11"` + ForgottenTopics []RequestForgottenTopic `kafka:"min=v7,max=v11"` + RackID string `kafka:"min=v11,max=v11"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Fetch } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + broker := protocol.Broker{ID: -1} + + for i := range r.Topics { + t := &r.Topics[i] + + topic, ok := cluster.Topics[t.Topic] + if !ok { + return broker, NewError(protocol.NewErrNoTopic(t.Topic)) + } + + for j := range t.Partitions { + p := &t.Partitions[j] + + partition, ok := topic.Partitions[p.Partition] + if !ok { + return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition)) + } + + if b, ok := cluster.Brokers[partition.Leader]; !ok { + return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition)) + } else if broker.ID < 0 { + broker = b + } else if b.ID != broker.ID { + return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID)) + } + } + } + + return broker, nil +} + +type RequestTopic struct { + Topic string `kafka:"min=v0,max=v11"` + Partitions []RequestPartition `kafka:"min=v0,max=v11"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v0,max=v11"` + CurrentLeaderEpoch int32 `kafka:"min=v9,max=v11"` + FetchOffset int64 `kafka:"min=v0,max=v11"` + LogStartOffset int64 `kafka:"min=v5,max=v11"` + PartitionMaxBytes int32 `kafka:"min=v0,max=v11"` +} + +type RequestForgottenTopic struct { + Topic string `kafka:"min=v7,max=v11"` + Partitions []int32 `kafka:"min=v7,max=v11"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v11"` + ErrorCode int16 `kafka:"min=v7,max=v11"` + SessionID int32 `kafka:"min=v7,max=v11"` + Topics []ResponseTopic `kafka:"min=v0,max=v11"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Fetch } + +type ResponseTopic struct { + Topic string `kafka:"min=v0,max=v11"` + Partitions []ResponsePartition `kafka:"min=v0,max=v11"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v0,max=v11"` + ErrorCode int16 `kafka:"min=v0,max=v11"` + HighWatermark int64 `kafka:"min=v0,max=v11"` + LastStableOffset int64 `kafka:"min=v4,max=v11"` + LogStartOffset int64 `kafka:"min=v5,max=v11"` + AbortedTransactions []ResponseTransaction `kafka:"min=v4,max=v11"` + PreferredReadReplica int32 `kafka:"min=v11,max=v11"` + RecordSet protocol.RecordSet `kafka:"min=v0,max=v11"` +} + +type ResponseTransaction struct { + ProducerID int64 `kafka:"min=v4,max=v11"` + FirstOffset int64 `kafka:"min=v4,max=v11"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) + +type Error struct { + Err error +} + +func NewError(err error) *Error { + return &Error{Err: err} +} + +func (e *Error) Error() string { + return fmt.Sprintf("fetch request error: %v", e.Err) +} + +func (e *Error) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go new file mode 100644 index 000000000..0306e206d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go @@ -0,0 +1,25 @@ +package findcoordinator + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + Key string `kafka:"min=v0,max=v2"` + KeyType int8 `kafka:"min=v1,max=v2"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` + ErrorMessage string `kafka:"min=v1,max=v2,nullable"` + NodeID int32 `kafka:"min=v0,max=v2"` + Host string `kafka:"min=v0,max=v2"` + Port int32 `kafka:"min=v0,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go b/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go new file mode 100644 index 000000000..cf4c11185 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go @@ -0,0 +1,36 @@ +package heartbeat + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_Heartbeat +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + GroupID string `kafka:"min=v0,max=v4"` + GenerationID int32 `kafka:"min=v0,max=v4"` + MemberID string `kafka:"min=v0,max=v4"` + GroupInstanceID string `kafka:"min=v3,max=v4,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.Heartbeat +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v4"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { + return protocol.Heartbeat +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go new file mode 100644 index 000000000..f4328efc1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go @@ -0,0 +1,79 @@ +package incrementalalterconfigs + +import ( + "errors" + "strconv" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + resourceTypeBroker int8 = 4 +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_IncrementalAlterConfigs +type Request struct { + Resources []RequestResource `kafka:"min=v0,max=v0"` + ValidateOnly bool `kafka:"min=v0,max=v0"` +} + +type RequestResource struct { + ResourceType int8 `kafka:"min=v0,max=v0"` + ResourceName string `kafka:"min=v0,max=v0"` + Configs []RequestConfig `kafka:"min=v0,max=v0"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v0"` + ConfigOperation int8 `kafka:"min=v0,max=v0"` + Value string `kafka:"min=v0,max=v0,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Check that at most only one broker is being updated. + // + // TODO: Support updating multiple brokers in a single request. + brokers := map[string]struct{}{} + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokers[resource.ResourceName] = struct{}{} + } + } + if len(brokers) > 1 { + return protocol.Broker{}, + errors.New("Updating more than one broker in a single request is not supported yet") + } + + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokerID, err := strconv.Atoi(resource.ResourceName) + if err != nil { + return protocol.Broker{}, err + } + + return cluster.Brokers[int32(brokerID)], nil + } + } + + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + Responses []ResponseAlterResponse `kafka:"min=v0,max=v0"` +} + +type ResponseAlterResponse struct { + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + ResourceType int8 `kafka:"min=v0,max=v0"` + ResourceName string `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go b/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go new file mode 100644 index 000000000..17f6ef7ea --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go @@ -0,0 +1,37 @@ +package initproducerid + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v4,tag"` + + TransactionalID string `kafka:"min=v0,max=v4,nullable"` + TransactionTimeoutMs int32 `kafka:"min=v0,max=v4"` + ProducerID int64 `kafka:"min=v3,max=v4"` + ProducerEpoch int16 `kafka:"min=v3,max=v4"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.InitProducerId } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v4,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v4"` + ErrorCode int16 `kafka:"min=v0,max=v4"` + ProducerID int64 `kafka:"min=v0,max=v4"` + ProducerEpoch int16 `kafka:"min=v0,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.InitProducerId } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go b/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go new file mode 100644 index 000000000..a0738eed0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go @@ -0,0 +1,67 @@ +package joingroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + GroupID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + SessionTimeoutMS int32 `kafka:"min=v0,max=v7"` + RebalanceTimeoutMS int32 `kafka:"min=v1,max=v7"` + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,compact,nullable"` + ProtocolType string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Protocols []RequestProtocol `kafka:"min=v0,max=v7"` +} + +type RequestProtocol struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + Name string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.JoinGroup +} + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + ThrottleTimeMS int32 `kafka:"min=v2,max=v7"` + ErrorCode int16 `kafka:"min=v0,max=v7"` + GenerationID int32 `kafka:"min=v0,max=v7"` + ProtocolType string `kafka:"min=v7,max=v7,compact,nullable"` + ProtocolName string `kafka:"min=v0,max=v5|min=v6,max=v6,compact|min=v7,max=v7,compact,nullable"` + LeaderID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Members []ResponseMember `kafka:"min=v0,max=v7"` +} + +type ResponseMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,nullable,compact"` + Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` +} + +type ResponseMemberMetadata struct{} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.JoinGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go b/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go new file mode 100644 index 000000000..4dd8773e4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go @@ -0,0 +1,65 @@ +package leavegroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + GroupID string `kafka:"min=v0,max=v2|min=v3,max=v4,compact"` + MemberID string `kafka:"min=v0,max=v2"` + Members []RequestMember `kafka:"min=v3,max=v4"` +} + +func (r *Request) Prepare(apiVersion int16) { + if apiVersion < 3 { + if len(r.Members) > 0 { + r.MemberID = r.Members[0].MemberID + } + } +} + +type RequestMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } + +func (r *Request) Group() string { return r.GroupID } + +var ( + _ protocol.GroupMessage = (*Request)(nil) + _ protocol.PreparedMessage = (*Request)(nil) +) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v4"` + ThrottleTimeMS int32 `kafka:"min=v1,max=v4"` + Members []ResponseMember `kafka:"min=v3,max=v4"` +} + +type ResponseMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` + ErrorCode int16 `kafka:"min=v3,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go b/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go new file mode 100644 index 000000000..136458a25 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go @@ -0,0 +1,82 @@ +package listgroups + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListGroups +type Request struct { + _ struct{} `kafka:"min=v0,max=v2"` + brokerID int32 +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListGroups } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[r.brokerID], nil +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + + for _, broker := range cluster.Brokers { + messages = append(messages, &Request{brokerID: broker.ID}) + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` + Groups []ResponseGroup `kafka:"min=v0,max=v2"` +} + +type ResponseGroup struct { + GroupID string `kafka:"min=v0,max=v2"` + ProtocolType string `kafka:"min=v0,max=v2"` + + // Use this to store which broker returned the response + BrokerID int32 `kafka:"-"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListGroups } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for r, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + brokerResp := m.(*Response) + respGroups := []ResponseGroup{} + + for _, brokerResp := range brokerResp.Groups { + respGroups = append( + respGroups, + ResponseGroup{ + GroupID: brokerResp.GroupID, + ProtocolType: brokerResp.ProtocolType, + BrokerID: requests[r].(*Request).brokerID, + }, + ) + } + + response.Groups = append(response.Groups, respGroups...) + } + + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go b/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go new file mode 100644 index 000000000..059662d9a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go @@ -0,0 +1,230 @@ +package listoffsets + +import ( + "sort" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + ReplicaID int32 `kafka:"min=v1,max=v5"` + IsolationLevel int8 `kafka:"min=v2,max=v5"` + Topics []RequestTopic `kafka:"min=v1,max=v5"` +} + +type RequestTopic struct { + Topic string `kafka:"min=v1,max=v5"` + Partitions []RequestPartition `kafka:"min=v1,max=v5"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v1,max=v5"` + CurrentLeaderEpoch int32 `kafka:"min=v4,max=v5"` + Timestamp int64 `kafka:"min=v1,max=v5"` + // v0 of the API predates kafka 0.10, and doesn't make much sense to + // use so we chose not to support it. It had this extra field to limit + // the number of offsets returned, which has been removed in v1. + // + // MaxNumOffsets int32 `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListOffsets } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Expects r to be a request that was returned by Map, will likely panic + // or produce the wrong result if that's not the case. + partition := r.Topics[0].Partitions[0].Partition + topic := r.Topics[0].Topic + + for _, p := range cluster.Topics[topic].Partitions { + if p.ID == partition { + return cluster.Brokers[p.Leader], nil + } + } + + return protocol.Broker{ID: -1}, nil +} + +func (r *Request) Split(cluster protocol.Cluster) ([]protocol.Message, protocol.Merger, error) { + // Because kafka refuses to answer ListOffsets requests containing multiple + // entries of unique topic/partition pairs, we submit multiple requests on + // the wire and merge their results back. + // + // ListOffsets requests also need to be sent to partition leaders, to keep + // the logic simple we simply split each offset request into a single + // message. This may cause a bit more requests to be sent on the wire but + // it keeps the code sane, we can still optimize the aggregation mechanism + // later if it becomes a problem. + // + // Really the idea here is to shield applications from having to deal with + // the limitation of the kafka server, so they can request any combinations + // of topic/partition/offsets. + requests := make([]Request, 0, 2*len(r.Topics)) + + for _, t := range r.Topics { + for _, p := range t.Partitions { + requests = append(requests, Request{ + ReplicaID: r.ReplicaID, + IsolationLevel: r.IsolationLevel, + Topics: []RequestTopic{{ + Topic: t.Topic, + Partitions: []RequestPartition{{ + Partition: p.Partition, + CurrentLeaderEpoch: p.CurrentLeaderEpoch, + Timestamp: p.Timestamp, + }}, + }}, + }) + } + } + + messages := make([]protocol.Message, len(requests)) + + for i := range requests { + messages[i] = &requests[i] + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v2,max=v5"` + Topics []ResponseTopic `kafka:"min=v1,max=v5"` +} + +type ResponseTopic struct { + Topic string `kafka:"min=v1,max=v5"` + Partitions []ResponsePartition `kafka:"min=v1,max=v5"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v1,max=v5"` + ErrorCode int16 `kafka:"min=v1,max=v5"` + Timestamp int64 `kafka:"min=v1,max=v5"` + Offset int64 `kafka:"min=v1,max=v5"` + LeaderEpoch int32 `kafka:"min=v4,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListOffsets } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) (protocol.Message, error) { + type topicPartition struct { + topic string + partition int32 + } + + // Kafka doesn't always return the timestamp in the response, for example + // when the request sends -2 (for the first offset) it always returns -1, + // probably to indicate that the timestamp is unknown. This means that we + // can't correlate the requests and responses based on their timestamps, + // the primary key is the topic/partition pair. + // + // To make the API a bit friendly, we reconstructing an index of topic + // partitions to the timestamps that were requested, and override the + // timestamp value in the response. + timestamps := make([]map[topicPartition]int64, len(requests)) + + for i, m := range requests { + req := m.(*Request) + ts := make(map[topicPartition]int64, len(req.Topics)) + + for _, t := range req.Topics { + for _, p := range t.Partitions { + ts[topicPartition{ + topic: t.Topic, + partition: p.Partition, + }] = p.Timestamp + } + } + + timestamps[i] = ts + } + + topics := make(map[string][]ResponsePartition) + errors := 0 + + for i, res := range results { + m, err := protocol.Result(res) + if err != nil { + for _, t := range requests[i].(*Request).Topics { + partitions := topics[t.Topic] + + for _, p := range t.Partitions { + partitions = append(partitions, ResponsePartition{ + Partition: p.Partition, + ErrorCode: -1, // UNKNOWN, can we do better? + Timestamp: -1, + Offset: -1, + LeaderEpoch: -1, + }) + } + + topics[t.Topic] = partitions + } + errors++ + continue + } + + response := m.(*Response) + + if r.ThrottleTimeMs < response.ThrottleTimeMs { + r.ThrottleTimeMs = response.ThrottleTimeMs + } + + for _, t := range response.Topics { + for _, p := range t.Partitions { + if timestamp, ok := timestamps[i][topicPartition{ + topic: t.Topic, + partition: p.Partition, + }]; ok { + p.Timestamp = timestamp + } + topics[t.Topic] = append(topics[t.Topic], p) + } + } + + } + + if errors > 0 && errors == len(results) { + _, err := protocol.Result(results[0]) + return nil, err + } + + r.Topics = make([]ResponseTopic, 0, len(topics)) + + for topicName, partitions := range topics { + r.Topics = append(r.Topics, ResponseTopic{ + Topic: topicName, + Partitions: partitions, + }) + } + + sort.Slice(r.Topics, func(i, j int) bool { + return r.Topics[i].Topic < r.Topics[j].Topic + }) + + for _, t := range r.Topics { + sort.Slice(t.Partitions, func(i, j int) bool { + p1 := &t.Partitions[i] + p2 := &t.Partitions[j] + + if p1.Partition != p2.Partition { + return p1.Partition < p2.Partition + } + + return p1.Offset < p2.Offset + }) + } + + return r, nil +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) + _ protocol.Splitter = (*Request)(nil) + _ protocol.Merger = (*Response)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/listpartitionreassignments/listpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/protocol/listpartitionreassignments/listpartitionreassignments.go new file mode 100644 index 000000000..d26a64101 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/listpartitionreassignments/listpartitionreassignments.go @@ -0,0 +1,70 @@ +package listpartitionreassignments + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListPartitionReassignments. + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + TimeoutMs int32 `kafka:"min=v0,max=v0"` + Topics []RequestTopic `kafka:"min=v0,max=v0,nullable"` +} + +type RequestTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Name string `kafka:"min=v0,max=v0"` + PartitionIndexes []int32 `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.ListPartitionReassignments +} + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + Topics []ResponseTopic `kafka:"min=v0,max=v0"` +} + +type ResponseTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + Name string `kafka:"min=v0,max=v0"` + Partitions []ResponsePartition `kafka:"min=v0,max=v0"` +} + +type ResponsePartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + PartitionIndex int32 `kafka:"min=v0,max=v0"` + Replicas []int32 `kafka:"min=v0,max=v0"` + AddingReplicas []int32 `kafka:"min=v0,max=v0"` + RemovingReplicas []int32 `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { + return protocol.ListPartitionReassignments +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go b/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go new file mode 100644 index 000000000..ac2031bda --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go @@ -0,0 +1,52 @@ +package metadata + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TopicNames []string `kafka:"min=v0,max=v8,nullable"` + AllowAutoTopicCreation bool `kafka:"min=v4,max=v8"` + IncludeClusterAuthorizedOperations bool `kafka:"min=v8,max=v8"` + IncludeTopicAuthorizedOperations bool `kafka:"min=v8,max=v8"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Metadata } + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v8"` + Brokers []ResponseBroker `kafka:"min=v0,max=v8"` + ClusterID string `kafka:"min=v2,max=v8,nullable"` + ControllerID int32 `kafka:"min=v1,max=v8"` + Topics []ResponseTopic `kafka:"min=v0,max=v8"` + ClusterAuthorizedOperations int32 `kafka:"min=v8,max=v8"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Metadata } + +type ResponseBroker struct { + NodeID int32 `kafka:"min=v0,max=v8"` + Host string `kafka:"min=v0,max=v8"` + Port int32 `kafka:"min=v0,max=v8"` + Rack string `kafka:"min=v1,max=v8,nullable"` +} + +type ResponseTopic struct { + ErrorCode int16 `kafka:"min=v0,max=v8"` + Name string `kafka:"min=v0,max=v8"` + IsInternal bool `kafka:"min=v1,max=v8"` + Partitions []ResponsePartition `kafka:"min=v0,max=v8"` + TopicAuthorizedOperations int32 `kafka:"min=v8,max=v8"` +} + +type ResponsePartition struct { + ErrorCode int16 `kafka:"min=v0,max=v8"` + PartitionIndex int32 `kafka:"min=v0,max=v8"` + LeaderID int32 `kafka:"min=v0,max=v8"` + LeaderEpoch int32 `kafka:"min=v7,max=v8"` + ReplicaNodes []int32 `kafka:"min=v0,max=v8"` + IsrNodes []int32 `kafka:"min=v0,max=v8"` + OfflineReplicas []int32 `kafka:"min=v5,max=v8"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go new file mode 100644 index 000000000..5844928a6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go @@ -0,0 +1,54 @@ +package offsetcommit + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v7"` + GenerationID int32 `kafka:"min=v1,max=v7"` + MemberID string `kafka:"min=v1,max=v7"` + RetentionTimeMs int64 `kafka:"min=v2,max=v4"` + GroupInstanceID string `kafka:"min=v7,max=v7,nullable"` + Topics []RequestTopic `kafka:"min=v0,max=v7"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v7"` + Partitions []RequestPartition `kafka:"min=v0,max=v7"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v7"` + CommittedOffset int64 `kafka:"min=v0,max=v7"` + CommitTimestamp int64 `kafka:"min=v1,max=v1"` + CommittedLeaderEpoch int32 `kafka:"min=v5,max=v7"` + CommittedMetadata string `kafka:"min=v0,max=v7,nullable"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v7"` + Topics []ResponseTopic `kafka:"min=v0,max=v7"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v7"` + Partitions []ResponsePartition `kafka:"min=v0,max=v7"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v7"` + ErrorCode int16 `kafka:"min=v0,max=v7"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go new file mode 100644 index 000000000..bda619f3c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go @@ -0,0 +1,47 @@ +package offsetdelete + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v0"` + Topics []RequestTopic `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []RequestPartition `kafka:"min=v0,max=v0"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v0"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + Topics []ResponseTopic `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []ResponsePartition `kafka:"min=v0,max=v0"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go new file mode 100644 index 000000000..8f1096f5d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go @@ -0,0 +1,46 @@ +package offsetfetch + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v5"` + Topics []RequestTopic `kafka:"min=v0,max=v5,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v5"` + PartitionIndexes []int32 `kafka:"min=v0,max=v5"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v5"` + Topics []ResponseTopic `kafka:"min=v0,max=v5"` + ErrorCode int16 `kafka:"min=v2,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v5"` + Partitions []ResponsePartition `kafka:"min=v0,max=v5"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v5"` + CommittedOffset int64 `kafka:"min=v0,max=v5"` + ComittedLeaderEpoch int32 `kafka:"min=v5,max=v5"` + Metadata string `kafka:"min=v0,max=v5,nullable"` + ErrorCode int16 `kafka:"min=v0,max=v5"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go b/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go new file mode 100644 index 000000000..6d337c3cf --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go @@ -0,0 +1,147 @@ +package produce + +import ( + "fmt" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TransactionalID string `kafka:"min=v3,max=v8,nullable"` + Acks int16 `kafka:"min=v0,max=v8"` + Timeout int32 `kafka:"min=v0,max=v8"` + Topics []RequestTopic `kafka:"min=v0,max=v8"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + broker := protocol.Broker{ID: -1} + + for i := range r.Topics { + t := &r.Topics[i] + + topic, ok := cluster.Topics[t.Topic] + if !ok { + return broker, NewError(protocol.NewErrNoTopic(t.Topic)) + } + + for j := range t.Partitions { + p := &t.Partitions[j] + + partition, ok := topic.Partitions[p.Partition] + if !ok { + return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition)) + } + + if b, ok := cluster.Brokers[partition.Leader]; !ok { + return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition)) + } else if broker.ID < 0 { + broker = b + } else if b.ID != broker.ID { + return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID)) + } + } + } + + return broker, nil +} + +func (r *Request) Prepare(apiVersion int16) { + // Determine which version of the message should be used, based on which + // version of the Produce API is supported by the server. + // + // In version 0.11, kafka gives this error: + // + // org.apache.kafka.common.record.InvalidRecordException + // Produce requests with version 3 are only allowed to contain record batches with magic version. + // + // In version 2.x, kafka refuses the message claiming that the CRC32 + // checksum is invalid. + var recordVersion int8 + + if apiVersion < 3 { + recordVersion = 1 + } else { + recordVersion = 2 + } + + for i := range r.Topics { + t := &r.Topics[i] + + for j := range t.Partitions { + p := &t.Partitions[j] + + // Allow the program to overload the version if really needed. + if p.RecordSet.Version == 0 { + p.RecordSet.Version = recordVersion + } + } + } +} + +func (r *Request) HasResponse() bool { + return r.Acks != 0 +} + +type RequestTopic struct { + Topic string `kafka:"min=v0,max=v8"` + Partitions []RequestPartition `kafka:"min=v0,max=v8"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v0,max=v8"` + RecordSet protocol.RecordSet `kafka:"min=v0,max=v8"` +} + +type Response struct { + Topics []ResponseTopic `kafka:"min=v0,max=v8"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v8"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Produce } + +type ResponseTopic struct { + Topic string `kafka:"min=v0,max=v8"` + Partitions []ResponsePartition `kafka:"min=v0,max=v8"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v0,max=v8"` + ErrorCode int16 `kafka:"min=v0,max=v8"` + BaseOffset int64 `kafka:"min=v0,max=v8"` + LogAppendTime int64 `kafka:"min=v2,max=v8"` + LogStartOffset int64 `kafka:"min=v5,max=v8"` + RecordErrors []ResponseError `kafka:"min=v8,max=v8"` + ErrorMessage string `kafka:"min=v8,max=v8,nullable"` +} + +type ResponseError struct { + BatchIndex int32 `kafka:"min=v8,max=v8"` + BatchIndexErrorMessage string `kafka:"min=v8,max=v8,nullable"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) + _ protocol.PreparedMessage = (*Request)(nil) +) + +type Error struct { + Err error +} + +func NewError(err error) *Error { + return &Error{Err: err} +} + +func (e *Error) Error() string { + return fmt.Sprintf("fetch request error: %v", e.Err) +} + +func (e *Error) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol/protocol.go new file mode 100644 index 000000000..3d0a7b8dd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/protocol.go @@ -0,0 +1,541 @@ +package protocol + +import ( + "errors" + "fmt" + "io" + "net" + "reflect" + "strconv" + "strings" +) + +// Message is an interface implemented by all request and response types of the +// kafka protocol. +// +// This interface is used mostly as a safe-guard to provide a compile-time check +// for values passed to functions dealing kafka message types. +type Message interface { + ApiKey() ApiKey +} + +type ApiKey int16 + +func (k ApiKey) String() string { + if i := int(k); i >= 0 && i < len(apiNames) { + return apiNames[i] + } + return strconv.Itoa(int(k)) +} + +func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() } + +func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() } + +func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 { + min := k.MinVersion() + max := k.MaxVersion() + switch { + case min > maxVersion: + return min + case max < maxVersion: + return max + default: + return maxVersion + } +} + +func (k ApiKey) apiType() apiType { + if i := int(k); i >= 0 && i < len(apiTypes) { + return apiTypes[i] + } + return apiType{} +} + +const ( + Produce ApiKey = 0 + Fetch ApiKey = 1 + ListOffsets ApiKey = 2 + Metadata ApiKey = 3 + LeaderAndIsr ApiKey = 4 + StopReplica ApiKey = 5 + UpdateMetadata ApiKey = 6 + ControlledShutdown ApiKey = 7 + OffsetCommit ApiKey = 8 + OffsetFetch ApiKey = 9 + FindCoordinator ApiKey = 10 + JoinGroup ApiKey = 11 + Heartbeat ApiKey = 12 + LeaveGroup ApiKey = 13 + SyncGroup ApiKey = 14 + DescribeGroups ApiKey = 15 + ListGroups ApiKey = 16 + SaslHandshake ApiKey = 17 + ApiVersions ApiKey = 18 + CreateTopics ApiKey = 19 + DeleteTopics ApiKey = 20 + DeleteRecords ApiKey = 21 + InitProducerId ApiKey = 22 + OffsetForLeaderEpoch ApiKey = 23 + AddPartitionsToTxn ApiKey = 24 + AddOffsetsToTxn ApiKey = 25 + EndTxn ApiKey = 26 + WriteTxnMarkers ApiKey = 27 + TxnOffsetCommit ApiKey = 28 + DescribeAcls ApiKey = 29 + CreateAcls ApiKey = 30 + DeleteAcls ApiKey = 31 + DescribeConfigs ApiKey = 32 + AlterConfigs ApiKey = 33 + AlterReplicaLogDirs ApiKey = 34 + DescribeLogDirs ApiKey = 35 + SaslAuthenticate ApiKey = 36 + CreatePartitions ApiKey = 37 + CreateDelegationToken ApiKey = 38 + RenewDelegationToken ApiKey = 39 + ExpireDelegationToken ApiKey = 40 + DescribeDelegationToken ApiKey = 41 + DeleteGroups ApiKey = 42 + ElectLeaders ApiKey = 43 + IncrementalAlterConfigs ApiKey = 44 + AlterPartitionReassignments ApiKey = 45 + ListPartitionReassignments ApiKey = 46 + OffsetDelete ApiKey = 47 + DescribeClientQuotas ApiKey = 48 + AlterClientQuotas ApiKey = 49 + DescribeUserScramCredentials ApiKey = 50 + AlterUserScramCredentials ApiKey = 51 + + numApis = 52 +) + +var apiNames = [numApis]string{ + Produce: "Produce", + Fetch: "Fetch", + ListOffsets: "ListOffsets", + Metadata: "Metadata", + LeaderAndIsr: "LeaderAndIsr", + StopReplica: "StopReplica", + UpdateMetadata: "UpdateMetadata", + ControlledShutdown: "ControlledShutdown", + OffsetCommit: "OffsetCommit", + OffsetFetch: "OffsetFetch", + FindCoordinator: "FindCoordinator", + JoinGroup: "JoinGroup", + Heartbeat: "Heartbeat", + LeaveGroup: "LeaveGroup", + SyncGroup: "SyncGroup", + DescribeGroups: "DescribeGroups", + ListGroups: "ListGroups", + SaslHandshake: "SaslHandshake", + ApiVersions: "ApiVersions", + CreateTopics: "CreateTopics", + DeleteTopics: "DeleteTopics", + DeleteRecords: "DeleteRecords", + InitProducerId: "InitProducerId", + OffsetForLeaderEpoch: "OffsetForLeaderEpoch", + AddPartitionsToTxn: "AddPartitionsToTxn", + AddOffsetsToTxn: "AddOffsetsToTxn", + EndTxn: "EndTxn", + WriteTxnMarkers: "WriteTxnMarkers", + TxnOffsetCommit: "TxnOffsetCommit", + DescribeAcls: "DescribeAcls", + CreateAcls: "CreateAcls", + DeleteAcls: "DeleteAcls", + DescribeConfigs: "DescribeConfigs", + AlterConfigs: "AlterConfigs", + AlterReplicaLogDirs: "AlterReplicaLogDirs", + DescribeLogDirs: "DescribeLogDirs", + SaslAuthenticate: "SaslAuthenticate", + CreatePartitions: "CreatePartitions", + CreateDelegationToken: "CreateDelegationToken", + RenewDelegationToken: "RenewDelegationToken", + ExpireDelegationToken: "ExpireDelegationToken", + DescribeDelegationToken: "DescribeDelegationToken", + DeleteGroups: "DeleteGroups", + ElectLeaders: "ElectLeaders", + IncrementalAlterConfigs: "IncrementalAlterConfigs", + AlterPartitionReassignments: "AlterPartitionReassignments", + ListPartitionReassignments: "ListPartitionReassignments", + OffsetDelete: "OffsetDelete", + DescribeClientQuotas: "DescribeClientQuotas", + AlterClientQuotas: "AlterClientQuotas", + DescribeUserScramCredentials: "DescribeUserScramCredentials", + AlterUserScramCredentials: "AlterUserScramCredentials", +} + +type messageType struct { + version int16 + flexible bool + gotype reflect.Type + decode decodeFunc + encode encodeFunc +} + +func (t *messageType) new() Message { + return reflect.New(t.gotype).Interface().(Message) +} + +type apiType struct { + requests []messageType + responses []messageType +} + +func (t apiType) minVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[0].version +} + +func (t apiType) maxVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[len(t.requests)-1].version +} + +var apiTypes [numApis]apiType + +// Register is automatically called by sub-packages are imported to install a +// new pair of request/response message types. +func Register(req, res Message) { + k1 := req.ApiKey() + k2 := res.ApiKey() + + if k1 != k2 { + panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2)) + } + + apiTypes[k1] = apiType{ + requests: typesOf(req), + responses: typesOf(res), + } +} + +// OverrideTypeMessage is an interface implemented by messages that want to override the standard +// request/response types for a given API. +type OverrideTypeMessage interface { + TypeKey() OverrideTypeKey +} + +type OverrideTypeKey int16 + +const ( + RawProduceOverride OverrideTypeKey = 0 +) + +var overrideApiTypes [numApis]map[OverrideTypeKey]apiType + +func RegisterOverride(req, res Message, key OverrideTypeKey) { + k1 := req.ApiKey() + k2 := res.ApiKey() + + if k1 != k2 { + panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2)) + } + + if overrideApiTypes[k1] == nil { + overrideApiTypes[k1] = make(map[OverrideTypeKey]apiType) + } + overrideApiTypes[k1][key] = apiType{ + requests: typesOf(req), + responses: typesOf(res), + } +} + +func typesOf(v interface{}) []messageType { + return makeTypes(reflect.TypeOf(v).Elem()) +} + +func makeTypes(t reflect.Type) []messageType { + minVersion := int16(-1) + maxVersion := int16(-1) + + // All future versions will be flexible (according to spec), so don't need to + // worry about maxes here. + minFlexibleVersion := int16(-1) + + forEachStructField(t, func(_ reflect.Type, _ index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if minVersion < 0 || tag.MinVersion < minVersion { + minVersion = tag.MinVersion + } + if maxVersion < 0 || tag.MaxVersion > maxVersion { + maxVersion = tag.MaxVersion + } + if tag.TagID > -2 && (minFlexibleVersion < 0 || tag.MinVersion < minFlexibleVersion) { + minFlexibleVersion = tag.MinVersion + } + return true + }) + }) + + types := make([]messageType, 0, (maxVersion-minVersion)+1) + + for v := minVersion; v <= maxVersion; v++ { + flexible := minFlexibleVersion >= 0 && v >= minFlexibleVersion + + types = append(types, messageType{ + version: v, + gotype: t, + flexible: flexible, + decode: decodeFuncOf(t, v, flexible, structTag{}), + encode: encodeFuncOf(t, v, flexible, structTag{}), + }) + } + + return types +} + +type structTag struct { + MinVersion int16 + MaxVersion int16 + Compact bool + Nullable bool + TagID int +} + +func forEachStructTag(tag string, do func(structTag) bool) { + if tag == "-" { + return // special case to ignore the field + } + + forEach(tag, '|', func(s string) bool { + tag := structTag{ + MinVersion: -1, + MaxVersion: -1, + + // Legitimate tag IDs can start at 0. We use -1 as a placeholder to indicate + // that the message type is flexible, so that leaves -2 as the default for + // indicating that there is no tag ID and the message is not flexible. + TagID: -2, + } + + var err error + forEach(s, ',', func(s string) bool { + switch { + case strings.HasPrefix(s, "min="): + tag.MinVersion, err = parseVersion(s[4:]) + case strings.HasPrefix(s, "max="): + tag.MaxVersion, err = parseVersion(s[4:]) + case s == "tag": + tag.TagID = -1 + case strings.HasPrefix(s, "tag="): + tag.TagID, err = strconv.Atoi(s[4:]) + case s == "compact": + tag.Compact = true + case s == "nullable": + tag.Nullable = true + default: + err = fmt.Errorf("unrecognized option: %q", s) + } + return err == nil + }) + + if err != nil { + panic(fmt.Errorf("malformed struct tag: %w", err)) + } + + if tag.MinVersion < 0 && tag.MaxVersion >= 0 { + panic(fmt.Errorf("missing minimum version in struct tag: %q", s)) + } + + if tag.MaxVersion < 0 && tag.MinVersion >= 0 { + panic(fmt.Errorf("missing maximum version in struct tag: %q", s)) + } + + if tag.MinVersion > tag.MaxVersion { + panic(fmt.Errorf("invalid version range in struct tag: %q", s)) + } + + return do(tag) + }) +} + +func forEach(s string, sep byte, do func(string) bool) bool { + for len(s) != 0 { + p := "" + i := strings.IndexByte(s, sep) + if i < 0 { + p, s = s, "" + } else { + p, s = s[:i], s[i+1:] + } + if !do(p) { + return false + } + } + return true +} + +func forEachStructField(t reflect.Type, do func(reflect.Type, index, string)) { + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + if f.PkgPath != "" && f.Name != "_" { + continue + } + + kafkaTag, ok := f.Tag.Lookup("kafka") + if !ok { + kafkaTag = "|" + } + + do(f.Type, indexOf(f), kafkaTag) + } +} + +func parseVersion(s string) (int16, error) { + if !strings.HasPrefix(s, "v") { + return 0, fmt.Errorf("invalid version number: %q", s) + } + i, err := strconv.ParseInt(s[1:], 10, 16) + if err != nil { + return 0, fmt.Errorf("invalid version number: %q: %w", s, err) + } + if i < 0 { + return 0, fmt.Errorf("invalid negative version number: %q", s) + } + return int16(i), nil +} + +func dontExpectEOF(err error) error { + if err != nil { + if errors.Is(err, io.EOF) { + return io.ErrUnexpectedEOF + } + + return err + } + + return nil +} + +type Broker struct { + Rack string + Host string + Port int32 + ID int32 +} + +func (b Broker) String() string { + return net.JoinHostPort(b.Host, itoa(b.Port)) +} + +func (b Broker) Format(w fmt.State, v rune) { + switch v { + case 'd': + io.WriteString(w, itoa(b.ID)) + case 's': + io.WriteString(w, b.String()) + case 'v': + io.WriteString(w, itoa(b.ID)) + io.WriteString(w, " ") + io.WriteString(w, b.String()) + if b.Rack != "" { + io.WriteString(w, " ") + io.WriteString(w, b.Rack) + } + } +} + +func itoa(i int32) string { + return strconv.Itoa(int(i)) +} + +type Topic struct { + Name string + Error int16 + Partitions map[int32]Partition +} + +type Partition struct { + ID int32 + Error int16 + Leader int32 + Replicas []int32 + ISR []int32 + Offline []int32 +} + +// RawExchanger is an extention to the Message interface to allow messages +// to control the request response cycle for the message. This is currently +// only used to facilitate v0 SASL Authenticate requests being written in +// a non-standard fashion when the SASL Handshake was done at v0 but not +// when done at v1. +type RawExchanger interface { + // Required should return true when a RawExchange is needed. + // The passed in versions are the negotiated versions for the connection + // performing the request. + Required(versions map[ApiKey]int16) bool + // RawExchange is given the raw connection to the broker and the Message + // is responsible for writing itself to the connection as well as reading + // the response. + RawExchange(rw io.ReadWriter) (Message, error) +} + +// BrokerMessage is an extension of the Message interface implemented by some +// request types to customize the broker assignment logic. +type BrokerMessage interface { + // Given a representation of the kafka cluster state as argument, returns + // the broker that the message should be routed to. + Broker(Cluster) (Broker, error) +} + +// GroupMessage is an extension of the Message interface implemented by some +// request types to inform the program that they should be routed to a group +// coordinator. +type GroupMessage interface { + // Returns the group configured on the message. + Group() string +} + +// TransactionalMessage is an extension of the Message interface implemented by some +// request types to inform the program that they should be routed to a transaction +// coordinator. +type TransactionalMessage interface { + // Returns the transactional id configured on the message. + Transaction() string +} + +// PreparedMessage is an extension of the Message interface implemented by some +// request types which may need to run some pre-processing on their state before +// being sent. +type PreparedMessage interface { + // Prepares the message before being sent to a kafka broker using the API + // version passed as argument. + Prepare(apiVersion int16) +} + +// Splitter is an interface implemented by messages that can be split into +// multiple requests and have their results merged back by a Merger. +type Splitter interface { + // For a given cluster layout, returns the list of messages constructed + // from the receiver for each requests that should be sent to the cluster. + // The second return value is a Merger which can be used to merge back the + // results of each request into a single message (or an error). + Split(Cluster) ([]Message, Merger, error) +} + +// Merger is an interface implemented by messages which can merge multiple +// results into one response. +type Merger interface { + // Given a list of message and associated results, merge them back into a + // response (or an error). The results must be either Message or error + // values, other types should trigger a panic. + Merge(messages []Message, results []interface{}) (Message, error) +} + +// Result converts r to a Message or an error, or panics if r could not be +// converted to these types. +func Result(r interface{}) (Message, error) { + switch v := r.(type) { + case Message: + return v, nil + case error: + return nil, v + default: + panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v)) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/rawproduce/rawproduce.go b/vendor/github.com/segmentio/kafka-go/protocol/rawproduce/rawproduce.go new file mode 100644 index 000000000..bad83138d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/rawproduce/rawproduce.go @@ -0,0 +1,91 @@ +package rawproduce + +import ( + "fmt" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/produce" +) + +func init() { + // Register a type override so that raw produce requests will be encoded with the correct type. + req := &Request{} + protocol.RegisterOverride(req, &produce.Response{}, req.TypeKey()) +} + +type Request struct { + TransactionalID string `kafka:"min=v3,max=v8,nullable"` + Acks int16 `kafka:"min=v0,max=v8"` + Timeout int32 `kafka:"min=v0,max=v8"` + Topics []RequestTopic `kafka:"min=v0,max=v8"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce } + +func (r *Request) TypeKey() protocol.OverrideTypeKey { return protocol.RawProduceOverride } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + broker := protocol.Broker{ID: -1} + + for i := range r.Topics { + t := &r.Topics[i] + + topic, ok := cluster.Topics[t.Topic] + if !ok { + return broker, NewError(protocol.NewErrNoTopic(t.Topic)) + } + + for j := range t.Partitions { + p := &t.Partitions[j] + + partition, ok := topic.Partitions[p.Partition] + if !ok { + return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition)) + } + + if b, ok := cluster.Brokers[partition.Leader]; !ok { + return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition)) + } else if broker.ID < 0 { + broker = b + } else if b.ID != broker.ID { + return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID)) + } + } + } + + return broker, nil +} + +func (r *Request) HasResponse() bool { + return r.Acks != 0 +} + +type RequestTopic struct { + Topic string `kafka:"min=v0,max=v8"` + Partitions []RequestPartition `kafka:"min=v0,max=v8"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v0,max=v8"` + RecordSet protocol.RawRecordSet `kafka:"min=v0,max=v8"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) + +type Error struct { + Err error +} + +func NewError(err error) *Error { + return &Error{Err: err} +} + +func (e *Error) Error() string { + return fmt.Sprintf("fetch request error: %v", e.Err) +} + +func (e *Error) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record.go b/vendor/github.com/segmentio/kafka-go/protocol/record.go new file mode 100644 index 000000000..e11af4dcc --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record.go @@ -0,0 +1,354 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/segmentio/kafka-go/compress" +) + +// Attributes is a bitset representing special attributes set on records. +type Attributes int16 + +const ( + Gzip Attributes = Attributes(compress.Gzip) // 1 + Snappy Attributes = Attributes(compress.Snappy) // 2 + Lz4 Attributes = Attributes(compress.Lz4) // 3 + Zstd Attributes = Attributes(compress.Zstd) // 4 + Transactional Attributes = 1 << 4 + Control Attributes = 1 << 5 +) + +func (a Attributes) Compression() compress.Compression { + return compress.Compression(a & 7) +} + +func (a Attributes) Transactional() bool { + return (a & Transactional) != 0 +} + +func (a Attributes) Control() bool { + return (a & Control) != 0 +} + +func (a Attributes) String() string { + s := a.Compression().String() + if a.Transactional() { + s += "+transactional" + } + if a.Control() { + s += "+control" + } + return s +} + +// Header represents a single entry in a list of record headers. +type Header struct { + Key string + Value []byte +} + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +type Record struct { + // The offset at which the record exists in a topic partition. This value + // is ignored in produce requests. + Offset int64 + + // Returns the time of the record. This value may be omitted in produce + // requests to let kafka set the time when it saves the record. + Time time.Time + + // Returns a byte sequence containing the key of this record. The returned + // sequence may be nil to indicate that the record has no key. If the record + // is part of a RecordSet, the content of the key must remain valid at least + // until the record set is closed (or until the key is closed). + Key Bytes + + // Returns a byte sequence containing the value of this record. The returned + // sequence may be nil to indicate that the record has no value. If the + // record is part of a RecordSet, the content of the value must remain valid + // at least until the record set is closed (or until the value is closed). + Value Bytes + + // Returns the list of headers associated with this record. The returned + // slice may be reused across calls, the program should use it as an + // immutable value. + Headers []Header +} + +// RecordSet represents a sequence of records in Produce requests and Fetch +// responses. All v0, v1, and v2 formats are supported. +type RecordSet struct { + // The message version that this record set will be represented as, valid + // values are 1, or 2. + // + // When reading, this is the value of the highest version used in the + // batches that compose the record set. + // + // When writing, this value dictates the format that the records will be + // encoded in. + Version int8 + + // Attributes set on the record set. + // + // When reading, the attributes are the combination of all attributes in + // the batches that compose the record set. + // + // When writing, the attributes apply to the whole sequence of records in + // the set. + Attributes Attributes + + // A reader exposing the sequence of records. + // + // When reading a RecordSet from an io.Reader, the Records field will be a + // *RecordStream. If the program needs to access the details of each batch + // that compose the stream, it may use type assertions to access the + // underlying types of each batch. + Records RecordReader +} + +// bufferedReader is an interface implemented by types like bufio.Reader, which +// we use to optimize prefix reads by accessing the internal buffer directly +// through calls to Peek. +type bufferedReader interface { + Discard(int) (int, error) + Peek(int) ([]byte, error) +} + +// bytesBuffer is an interface implemented by types like bytes.Buffer, which we +// use to optimize prefix reads by accessing the internal buffer directly +// through calls to Bytes. +type bytesBuffer interface { + Bytes() []byte +} + +// magicByteOffset is the position of the magic byte in all versions of record +// sets in the kafka protocol. +const magicByteOffset = 16 + +// ReadFrom reads the representation of a record set from r into rs, returning +// the number of bytes consumed from r, and an non-nil error if the record set +// could not be read. +func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) { + d, _ := r.(*decoder) + if d == nil { + d = &decoder{ + reader: r, + remain: 4, + } + } + + *rs = RecordSet{} + limit := d.remain + size := d.readInt32() + + if d.err != nil { + return int64(limit - d.remain), d.err + } + + if size <= 0 { + return 4, nil + } + + stream := &RecordStream{ + Records: make([]RecordReader, 0, 4), + } + + var err error + d.remain = int(size) + + for d.remain > 0 && err == nil { + var version byte + + if d.remain < (magicByteOffset + 1) { + if len(stream.Records) != 0 { + break + } + return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1) + } + + switch r := d.reader.(type) { + case bufferedReader: + b, err := r.Peek(magicByteOffset + 1) + if err != nil { + n, _ := r.Discard(len(b)) + return 4 + int64(n), dontExpectEOF(err) + } + version = b[magicByteOffset] + case bytesBuffer: + version = r.Bytes()[magicByteOffset] + default: + b := make([]byte, magicByteOffset+1) + if n, err := io.ReadFull(d.reader, b); err != nil { + return 4 + int64(n), dontExpectEOF(err) + } + version = b[magicByteOffset] + // Reconstruct the prefix that we had to read to determine the version + // of the record set from the magic byte. + // + // Technically this may recurisvely stack readers when consuming all + // items of the batch, which could hurt performance. In practice this + // path should not be taken tho, since the decoder would read from a + // *bufio.Reader which implements the bufferedReader interface. + d.reader = io.MultiReader(bytes.NewReader(b), d.reader) + } + + var tmp RecordSet + switch version { + case 0, 1: + err = tmp.readFromVersion1(d) + case 2: + err = tmp.readFromVersion2(d) + default: + err = fmt.Errorf("unsupported message version %d for message of size %d", version, size) + } + + if tmp.Version > rs.Version { + rs.Version = tmp.Version + } + + rs.Attributes |= tmp.Attributes + + if tmp.Records != nil { + stream.Records = append(stream.Records, tmp.Records) + } + } + + if len(stream.Records) != 0 { + rs.Records = stream + // Ignore errors if we've successfully read records, so the + // program can keep making progress. + err = nil + } + + d.discardAll() + rn := 4 + (int(size) - d.remain) + d.remain = limit - rn + return int64(rn), err +} + +// WriteTo writes the representation of rs into w. The value of rs.Version +// dictates which format that the record set will be represented as. +// +// The error will be ErrNoRecord if rs contained no records. +// +// Note: since this package is only compatible with kafka 0.10 and above, the +// method never produces messages in version 0. If rs.Version is zero, the +// method defaults to producing messages in version 1. +func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) { + if rs.Records == nil { + return 0, ErrNoRecord + } + + // This optimization avoids rendering the record set in an intermediary + // buffer when the writer is already a pageBuffer, which is a common case + // due to the way WriteRequest and WriteResponse are implemented. + buffer, _ := w.(*pageBuffer) + bufferOffset := int64(0) + + if buffer != nil { + bufferOffset = buffer.Size() + } else { + buffer = newPageBuffer() + defer buffer.unref() + } + + size := packUint32(0) + buffer.Write(size[:]) // size placeholder + + var err error + switch rs.Version { + case 0, 1: + err = rs.writeToVersion1(buffer, bufferOffset+4) + case 2: + err = rs.writeToVersion2(buffer, bufferOffset+4) + default: + err = fmt.Errorf("unsupported record set version %d", rs.Version) + } + if err != nil { + return 0, err + } + + n := buffer.Size() - bufferOffset + if n == 0 { + size = packUint32(^uint32(0)) + } else { + size = packUint32(uint32(n) - 4) + } + buffer.WriteAt(size[:], bufferOffset) + + // This condition indicates that the output writer received by `WriteTo` was + // not a *pageBuffer, in which case we need to flush the buffered records + // data into it. + if buffer != w { + return buffer.WriteTo(w) + } + + return n, nil +} + +// RawRecordSet represents a record set for a RawProduce request. The record set is +// represented as a raw sequence of pre-encoded record set bytes. +type RawRecordSet struct { + // Reader exposes the raw sequence of record set bytes. + Reader io.Reader +} + +// ReadFrom reads the representation of a record set from r into rrs. It re-uses the +// existing RecordSet.ReadFrom implementation to first read/decode data into a RecordSet, +// then writes/encodes the RecordSet to a buffer referenced by the RawRecordSet. +// +// Note: re-using the RecordSet.ReadFrom implementation makes this suboptimal from a +// performance standpoint as it require an extra copy of the record bytes. Holding off +// on optimizing, as this code path is only invoked in tests. +func (rrs *RawRecordSet) ReadFrom(r io.Reader) (int64, error) { + rs := &RecordSet{} + n, err := rs.ReadFrom(r) + if err != nil { + return 0, err + } + + buf := &bytes.Buffer{} + rs.WriteTo(buf) + *rrs = RawRecordSet{ + Reader: buf, + } + + return n, nil +} + +// WriteTo writes the RawRecordSet to an io.Writer. Since this is a raw record set representation, all that is +// done here is copying bytes from the underlying reader to the specified writer. +func (rrs *RawRecordSet) WriteTo(w io.Writer) (int64, error) { + if rrs.Reader == nil { + return 0, ErrNoRecord + } + + return io.Copy(w, rrs.Reader) +} + +func makeTime(t int64) time.Time { + return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)) +} + +func timestamp(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() / int64(time.Millisecond) +} + +func packUint32(u uint32) (b [4]byte) { + binary.BigEndian.PutUint32(b[:], u) + return +} + +func packUint64(u uint64) (b [8]byte) { + binary.BigEndian.PutUint64(b[:], u) + return +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go b/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go new file mode 100644 index 000000000..eca539933 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go @@ -0,0 +1,358 @@ +package protocol + +import ( + "errors" + "io" + "time" +) + +// RecordReader is an interface representing a sequence of records. Record sets +// are used in both produce and fetch requests to represent the sequence of +// records that are sent to or receive from kafka brokers. +// +// RecordSet values are not safe to use concurrently from multiple goroutines. +type RecordReader interface { + // Returns the next record in the set, or io.EOF if the end of the sequence + // has been reached. + // + // The returned Record is guaranteed to be valid until the next call to + // ReadRecord. If the program needs to retain the Record value it must make + // a copy. + ReadRecord() (*Record, error) +} + +// NewRecordReader constructs a reader exposing the records passed as arguments. +func NewRecordReader(records ...Record) RecordReader { + switch len(records) { + case 0: + return emptyRecordReader{} + default: + r := &recordReader{records: make([]Record, len(records))} + copy(r.records, records) + return r + } +} + +// MultiRecordReader merges multiple record batches into one. +func MultiRecordReader(batches ...RecordReader) RecordReader { + switch len(batches) { + case 0: + return emptyRecordReader{} + case 1: + return batches[0] + default: + m := &multiRecordReader{batches: make([]RecordReader, len(batches))} + copy(m.batches, batches) + return m + } +} + +func forEachRecord(r RecordReader, f func(int, *Record) error) error { + for i := 0; ; i++ { + rec, err := r.ReadRecord() + + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return err + } + + if err := handleRecord(i, rec, f); err != nil { + return err + } + } +} + +func handleRecord(i int, r *Record, f func(int, *Record) error) error { + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + return f(i, r) +} + +type recordReader struct { + records []Record + index int +} + +func (r *recordReader) ReadRecord() (*Record, error) { + if i := r.index; i >= 0 && i < len(r.records) { + r.index++ + return &r.records[i], nil + } + return nil, io.EOF +} + +type multiRecordReader struct { + batches []RecordReader + index int +} + +func (m *multiRecordReader) ReadRecord() (*Record, error) { + for { + if m.index == len(m.batches) { + return nil, io.EOF + } + r, err := m.batches[m.index].ReadRecord() + if err == nil { + return r, nil + } + if !errors.Is(err, io.EOF) { + return nil, err + } + m.index++ + } +} + +// optimizedRecordReader is an implementation of a RecordReader which exposes a +// sequence. +type optimizedRecordReader struct { + records []optimizedRecord + index int + buffer Record + headers [][]Header +} + +func (r *optimizedRecordReader) ReadRecord() (*Record, error) { + if i := r.index; i >= 0 && i < len(r.records) { + rec := &r.records[i] + r.index++ + r.buffer = Record{ + Offset: rec.offset, + Time: rec.time(), + Key: rec.key(), + Value: rec.value(), + } + if i < len(r.headers) { + r.buffer.Headers = r.headers[i] + } + return &r.buffer, nil + } + return nil, io.EOF +} + +type optimizedRecord struct { + offset int64 + timestamp int64 + keyRef *pageRef + valueRef *pageRef +} + +func (r *optimizedRecord) time() time.Time { + return makeTime(r.timestamp) +} + +func (r *optimizedRecord) key() Bytes { + return makeBytes(r.keyRef) +} + +func (r *optimizedRecord) value() Bytes { + return makeBytes(r.valueRef) +} + +func makeBytes(ref *pageRef) Bytes { + if ref == nil { + return nil + } + return ref +} + +type emptyRecordReader struct{} + +func (emptyRecordReader) ReadRecord() (*Record, error) { return nil, io.EOF } + +// ControlRecord represents a record read from a control batch. +type ControlRecord struct { + Offset int64 + Time time.Time + Version int16 + Type int16 + Data []byte + Headers []Header +} + +func ReadControlRecord(r *Record) (*ControlRecord, error) { + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + + k, err := ReadAll(r.Key) + if err != nil { + return nil, err + } + if k == nil { + return nil, Error("invalid control record with nil key") + } + if len(k) != 4 { + return nil, Errorf("invalid control record with key of size %d", len(k)) + } + + v, err := ReadAll(r.Value) + if err != nil { + return nil, err + } + + c := &ControlRecord{ + Offset: r.Offset, + Time: r.Time, + Version: readInt16(k[:2]), + Type: readInt16(k[2:]), + Data: v, + Headers: r.Headers, + } + + return c, nil +} + +func (cr *ControlRecord) Key() Bytes { + k := make([]byte, 4) + writeInt16(k[:2], cr.Version) + writeInt16(k[2:], cr.Type) + return NewBytes(k) +} + +func (cr *ControlRecord) Value() Bytes { + return NewBytes(cr.Data) +} + +func (cr *ControlRecord) Record() Record { + return Record{ + Offset: cr.Offset, + Time: cr.Time, + Key: cr.Key(), + Value: cr.Value(), + Headers: cr.Headers, + } +} + +// ControlBatch is an implementation of the RecordReader interface representing +// control batches returned by kafka brokers. +type ControlBatch struct { + Attributes Attributes + PartitionLeaderEpoch int32 + BaseOffset int64 + ProducerID int64 + ProducerEpoch int16 + BaseSequence int32 + Records RecordReader +} + +// NewControlBatch constructs a control batch from the list of records passed as +// arguments. +func NewControlBatch(records ...ControlRecord) *ControlBatch { + rawRecords := make([]Record, len(records)) + for i, cr := range records { + rawRecords[i] = cr.Record() + } + return &ControlBatch{ + Records: NewRecordReader(rawRecords...), + } +} + +func (c *ControlBatch) ReadRecord() (*Record, error) { + return c.Records.ReadRecord() +} + +func (c *ControlBatch) ReadControlRecord() (*ControlRecord, error) { + r, err := c.ReadRecord() + if err != nil { + return nil, err + } + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + return ReadControlRecord(r) +} + +func (c *ControlBatch) Offset() int64 { + return c.BaseOffset +} + +func (c *ControlBatch) Version() int { + return 2 +} + +// RecordBatch is an implementation of the RecordReader interface representing +// regular record batches (v2). +type RecordBatch struct { + Attributes Attributes + PartitionLeaderEpoch int32 + BaseOffset int64 + ProducerID int64 + ProducerEpoch int16 + BaseSequence int32 + Records RecordReader +} + +func (r *RecordBatch) ReadRecord() (*Record, error) { + return r.Records.ReadRecord() +} + +func (r *RecordBatch) Offset() int64 { + return r.BaseOffset +} + +func (r *RecordBatch) Version() int { + return 2 +} + +// MessageSet is an implementation of the RecordReader interface representing +// regular message sets (v1). +type MessageSet struct { + Attributes Attributes + BaseOffset int64 + Records RecordReader +} + +func (m *MessageSet) ReadRecord() (*Record, error) { + return m.Records.ReadRecord() +} + +func (m *MessageSet) Offset() int64 { + return m.BaseOffset +} + +func (m *MessageSet) Version() int { + return 1 +} + +// RecordStream is an implementation of the RecordReader interface which +// combines multiple underlying RecordReader and only expose records that +// are not from control batches. +type RecordStream struct { + Records []RecordReader + index int +} + +func (s *RecordStream) ReadRecord() (*Record, error) { + for { + if s.index < 0 || s.index >= len(s.Records) { + return nil, io.EOF + } + + if _, isControl := s.Records[s.index].(*ControlBatch); isControl { + s.index++ + continue + } + + r, err := s.Records[s.index].ReadRecord() + if err != nil { + if errors.Is(err, io.EOF) { + s.index++ + continue + } + } + + return r, err + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go b/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go new file mode 100644 index 000000000..5757e1146 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go @@ -0,0 +1,243 @@ +package protocol + +import ( + "errors" + "hash/crc32" + "io" + "math" + "time" +) + +func readMessage(b *pageBuffer, d *decoder) (attributes int8, baseOffset, timestamp int64, key, value Bytes, err error) { + md := decoder{ + reader: d, + remain: 12, + } + + baseOffset = md.readInt64() + md.remain = int(md.readInt32()) + + crc := uint32(md.readInt32()) + md.setCRC(crc32.IEEETable) + magicByte := md.readInt8() + attributes = md.readInt8() + timestamp = int64(0) + + if magicByte != 0 { + timestamp = md.readInt64() + } + + keyOffset := b.Size() + keyLength := int(md.readInt32()) + hasKey := keyLength >= 0 + if hasKey { + md.writeTo(b, keyLength) + key = b.ref(keyOffset, b.Size()) + } + + valueOffset := b.Size() + valueLength := int(md.readInt32()) + hasValue := valueLength >= 0 + if hasValue { + md.writeTo(b, valueLength) + value = b.ref(valueOffset, b.Size()) + } + + if md.crc32 != crc { + err = Errorf("crc32 checksum mismatch (computed=%d found=%d)", md.crc32, crc) + } else { + err = dontExpectEOF(md.err) + } + + return +} + +func (rs *RecordSet) readFromVersion1(d *decoder) error { + var records RecordReader + + b := newPageBuffer() + defer b.unref() + + attributes, baseOffset, timestamp, key, value, err := readMessage(b, d) + if err != nil { + return err + } + + if compression := Attributes(attributes).Compression(); compression == 0 { + records = &message{ + Record: Record{ + Offset: baseOffset, + Time: makeTime(timestamp), + Key: key, + Value: value, + }, + } + } else { + // Can we have a non-nil key when reading a compressed message? + if key != nil { + key.Close() + } + if value == nil { + records = emptyRecordReader{} + } else { + defer value.Close() + + codec := compression.Codec() + if codec == nil { + return Errorf("unsupported compression codec: %d", compression) + } + decompressor := codec.NewReader(value) + defer decompressor.Close() + + b := newPageBuffer() + defer b.unref() + + d := &decoder{ + reader: decompressor, + remain: math.MaxInt32, + } + + r := &recordReader{ + records: make([]Record, 0, 32), + } + + for !d.done() { + _, offset, timestamp, key, value, err := readMessage(b, d) + if err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + break + } + for _, rec := range r.records { + closeBytes(rec.Key) + closeBytes(rec.Value) + } + return err + } + r.records = append(r.records, Record{ + Offset: offset, + Time: makeTime(timestamp), + Key: key, + Value: value, + }) + } + + if baseOffset != 0 { + // https://kafka.apache.org/documentation/#messageset + // + // In version 1, to avoid server side re-compression, only the + // wrapper message will be assigned an offset. The inner messages + // will have relative offsets. The absolute offset can be computed + // using the offset from the outer message, which corresponds to the + // offset assigned to the last inner message. + lastRelativeOffset := int64(len(r.records)) - 1 + + for i := range r.records { + r.records[i].Offset = baseOffset - (lastRelativeOffset - r.records[i].Offset) + } + } + + records = r + } + } + + *rs = RecordSet{ + Version: 1, + Attributes: Attributes(attributes), + Records: records, + } + + return nil +} + +func (rs *RecordSet) writeToVersion1(buffer *pageBuffer, bufferOffset int64) error { + attributes := rs.Attributes + records := rs.Records + + if compression := attributes.Compression(); compression != 0 { + if codec := compression.Codec(); codec != nil { + // In the message format version 1, compression is achieved by + // compressing the value of a message which recursively contains + // the representation of the compressed message set. + subset := *rs + subset.Attributes &= ^7 // erase compression + + if err := subset.writeToVersion1(buffer, bufferOffset); err != nil { + return err + } + + compressed := newPageBuffer() + defer compressed.unref() + + compressor := codec.NewWriter(compressed) + defer compressor.Close() + + var err error + buffer.pages.scan(bufferOffset, buffer.Size(), func(b []byte) bool { + _, err = compressor.Write(b) + return err == nil + }) + if err != nil { + return err + } + if err := compressor.Close(); err != nil { + return err + } + + buffer.Truncate(int(bufferOffset)) + + records = &message{ + Record: Record{ + Value: compressed, + }, + } + } + } + + e := encoder{writer: buffer} + currentTimestamp := timestamp(time.Now()) + + return forEachRecord(records, func(i int, r *Record) error { + t := timestamp(r.Time) + if t == 0 { + t = currentTimestamp + } + + messageOffset := buffer.Size() + e.writeInt64(int64(i)) + e.writeInt32(0) // message size placeholder + e.writeInt32(0) // crc32 placeholder + e.setCRC(crc32.IEEETable) + e.writeInt8(1) // magic byte: version 1 + e.writeInt8(int8(attributes)) + e.writeInt64(t) + + if err := e.writeNullBytesFrom(r.Key); err != nil { + return err + } + + if err := e.writeNullBytesFrom(r.Value); err != nil { + return err + } + + b0 := packUint32(uint32(buffer.Size() - (messageOffset + 12))) + b1 := packUint32(e.crc32) + + buffer.WriteAt(b0[:], messageOffset+8) + buffer.WriteAt(b1[:], messageOffset+12) + e.setCRC(nil) + return nil + }) +} + +type message struct { + Record Record + read bool +} + +func (m *message) ReadRecord() (*Record, error) { + if m.read { + return nil, io.EOF + } + m.read = true + return &m.Record, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go b/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go new file mode 100644 index 000000000..366ec4bff --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go @@ -0,0 +1,315 @@ +package protocol + +import ( + "fmt" + "hash/crc32" + "io" + "time" +) + +func (rs *RecordSet) readFromVersion2(d *decoder) error { + baseOffset := d.readInt64() + batchLength := d.readInt32() + + if int(batchLength) > d.remain || d.err != nil { + d.discardAll() + return nil + } + + dec := &decoder{ + reader: d, + remain: int(batchLength), + } + + partitionLeaderEpoch := dec.readInt32() + magicByte := dec.readInt8() + crc := dec.readInt32() + + dec.setCRC(crc32.MakeTable(crc32.Castagnoli)) + + attributes := dec.readInt16() + lastOffsetDelta := dec.readInt32() + firstTimestamp := dec.readInt64() + maxTimestamp := dec.readInt64() + producerID := dec.readInt64() + producerEpoch := dec.readInt16() + baseSequence := dec.readInt32() + numRecords := dec.readInt32() + reader := io.Reader(dec) + + // unused + _ = lastOffsetDelta + _ = maxTimestamp + + if compression := Attributes(attributes).Compression(); compression != 0 { + codec := compression.Codec() + if codec == nil { + return fmt.Errorf("unsupported compression codec (%d)", compression) + } + decompressor := codec.NewReader(reader) + defer decompressor.Close() + reader = decompressor + } + + buffer := newPageBuffer() + defer buffer.unref() + + _, err := buffer.ReadFrom(reader) + if err != nil { + return err + } + if dec.crc32 != uint32(crc) { + return fmt.Errorf("crc32 checksum mismatch (computed=%d found=%d)", dec.crc32, uint32(crc)) + } + + recordsLength := buffer.Len() + dec.reader = buffer + dec.remain = recordsLength + + records := make([]optimizedRecord, numRecords) + // These are two lazy allocators that will be used to optimize allocation of + // page references for keys and values. + // + // By default, no memory is allocated and on first use, numRecords page refs + // are allocated in a contiguous memory space, and the allocators return + // pointers into those arrays for each page ref that get requested. + // + // The reasoning is that kafka partitions typically have records of a single + // form, which either have no keys, no values, or both keys and values. + // Using lazy allocators adapts nicely to these patterns to only allocate + // the memory that is needed by the program, while still reducing the number + // of malloc calls made by the program. + // + // Using a single allocator for both keys and values keeps related values + // close by in memory, making access to the records more friendly to CPU + // caches. + alloc := pageRefAllocator{size: int(numRecords)} + // Following the same reasoning that kafka partitions will typically have + // records with repeating formats, we expect to either find records with + // no headers, or records which always contain headers. + // + // To reduce the memory footprint when records have no headers, the Header + // slices are lazily allocated in a separate array. + headers := ([][]Header)(nil) + + for i := range records { + r := &records[i] + _ = dec.readVarInt() // record length (unused) + _ = dec.readInt8() // record attributes (unused) + timestampDelta := dec.readVarInt() + offsetDelta := dec.readVarInt() + + r.offset = baseOffset + offsetDelta + r.timestamp = firstTimestamp + timestampDelta + + keyLength := dec.readVarInt() + keyOffset := int64(recordsLength - dec.remain) + if keyLength > 0 { + dec.discard(int(keyLength)) + } + + valueLength := dec.readVarInt() + valueOffset := int64(recordsLength - dec.remain) + if valueLength > 0 { + dec.discard(int(valueLength)) + } + + if numHeaders := dec.readVarInt(); numHeaders > 0 { + if headers == nil { + headers = make([][]Header, numRecords) + } + + h := make([]Header, numHeaders) + + for i := range h { + h[i] = Header{ + Key: dec.readVarString(), + Value: dec.readVarBytes(), + } + } + + headers[i] = h + } + + if dec.err != nil { + records = records[:i] + break + } + + if keyLength >= 0 { + r.keyRef = alloc.newPageRef() + buffer.refTo(r.keyRef, keyOffset, keyOffset+keyLength) + } + + if valueLength >= 0 { + r.valueRef = alloc.newPageRef() + buffer.refTo(r.valueRef, valueOffset, valueOffset+valueLength) + } + } + + // Note: it's unclear whether kafka 0.11+ still truncates the responses, + // all attempts I made at constructing a test to trigger a truncation have + // failed. I kept this code here as a safeguard but it may never execute. + if dec.err != nil && len(records) == 0 { + return dec.err + } + + *rs = RecordSet{ + Version: magicByte, + Attributes: Attributes(attributes), + Records: &optimizedRecordReader{ + records: records, + headers: headers, + }, + } + + if rs.Attributes.Control() { + rs.Records = &ControlBatch{ + Attributes: rs.Attributes, + PartitionLeaderEpoch: partitionLeaderEpoch, + BaseOffset: baseOffset, + ProducerID: producerID, + ProducerEpoch: producerEpoch, + BaseSequence: baseSequence, + Records: rs.Records, + } + } else { + rs.Records = &RecordBatch{ + Attributes: rs.Attributes, + PartitionLeaderEpoch: partitionLeaderEpoch, + BaseOffset: baseOffset, + ProducerID: producerID, + ProducerEpoch: producerEpoch, + BaseSequence: baseSequence, + Records: rs.Records, + } + } + + return nil +} + +func (rs *RecordSet) writeToVersion2(buffer *pageBuffer, bufferOffset int64) error { + records := rs.Records + numRecords := int32(0) + + e := &encoder{writer: buffer} + e.writeInt64(0) // base offset | 0 +8 + e.writeInt32(0) // placeholder for record batch length | 8 +4 + e.writeInt32(-1) // partition leader epoch | 12 +3 + e.writeInt8(2) // magic byte | 16 +1 + e.writeInt32(0) // placeholder for crc32 checksum | 17 +4 + e.writeInt16(int16(rs.Attributes)) // attributes | 21 +2 + e.writeInt32(0) // placeholder for lastOffsetDelta | 23 +4 + e.writeInt64(0) // placeholder for firstTimestamp | 27 +8 + e.writeInt64(0) // placeholder for maxTimestamp | 35 +8 + e.writeInt64(-1) // producer id | 43 +8 + e.writeInt16(-1) // producer epoch | 51 +2 + e.writeInt32(-1) // base sequence | 53 +4 + e.writeInt32(0) // placeholder for numRecords | 57 +4 + + var compressor io.WriteCloser + if compression := rs.Attributes.Compression(); compression != 0 { + if codec := compression.Codec(); codec != nil { + compressor = codec.NewWriter(buffer) + e.writer = compressor + } + } + + currentTimestamp := timestamp(time.Now()) + lastOffsetDelta := int32(0) + firstTimestamp := int64(0) + maxTimestamp := int64(0) + + err := forEachRecord(records, func(i int, r *Record) error { + t := timestamp(r.Time) + if t == 0 { + t = currentTimestamp + } + if i == 0 { + firstTimestamp = t + } + if t > maxTimestamp { + maxTimestamp = t + } + + timestampDelta := t - firstTimestamp + offsetDelta := int64(i) + lastOffsetDelta = int32(offsetDelta) + + length := 1 + // attributes + sizeOfVarInt(timestampDelta) + + sizeOfVarInt(offsetDelta) + + sizeOfVarNullBytesIface(r.Key) + + sizeOfVarNullBytesIface(r.Value) + + sizeOfVarInt(int64(len(r.Headers))) + + for _, h := range r.Headers { + length += sizeOfVarString(h.Key) + sizeOfVarNullBytes(h.Value) + } + + e.writeVarInt(int64(length)) + e.writeInt8(0) // record attributes (unused) + e.writeVarInt(timestampDelta) + e.writeVarInt(offsetDelta) + + if err := e.writeVarNullBytesFrom(r.Key); err != nil { + return err + } + + if err := e.writeVarNullBytesFrom(r.Value); err != nil { + return err + } + + e.writeVarInt(int64(len(r.Headers))) + + for _, h := range r.Headers { + e.writeVarString(h.Key) + e.writeVarNullBytes(h.Value) + } + + numRecords++ + return nil + }) + + if err != nil { + return err + } + + if compressor != nil { + if err := compressor.Close(); err != nil { + return err + } + } + + if numRecords == 0 { + return ErrNoRecord + } + + b2 := packUint32(uint32(lastOffsetDelta)) + b3 := packUint64(uint64(firstTimestamp)) + b4 := packUint64(uint64(maxTimestamp)) + b5 := packUint32(uint32(numRecords)) + + buffer.WriteAt(b2[:], bufferOffset+23) + buffer.WriteAt(b3[:], bufferOffset+27) + buffer.WriteAt(b4[:], bufferOffset+35) + buffer.WriteAt(b5[:], bufferOffset+57) + + totalLength := buffer.Size() - bufferOffset + batchLength := totalLength - 12 + + checksum := uint32(0) + crcTable := crc32.MakeTable(crc32.Castagnoli) + + buffer.pages.scan(bufferOffset+21, bufferOffset+totalLength, func(chunk []byte) bool { + checksum = crc32.Update(checksum, crcTable, chunk) + return true + }) + + b0 := packUint32(uint32(batchLength)) + b1 := packUint32(checksum) + + buffer.WriteAt(b0[:], bufferOffset+8) + buffer.WriteAt(b1[:], bufferOffset+17) + return nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/reflect.go b/vendor/github.com/segmentio/kafka-go/protocol/reflect.go new file mode 100644 index 000000000..4d664b26b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/reflect.go @@ -0,0 +1,102 @@ +//go:build !unsafe +// +build !unsafe + +package protocol + +import ( + "reflect" +) + +type index []int + +type _type struct{ typ reflect.Type } + +func typeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x)) +} + +func elemTypeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x).Elem()) +} + +func makeType(t reflect.Type) _type { + return _type{typ: t} +} + +type value struct { + val reflect.Value +} + +func nonAddressableValueOf(x interface{}) value { + return value{val: reflect.ValueOf(x)} +} + +func valueOf(x interface{}) value { + return value{val: reflect.ValueOf(x).Elem()} +} + +func (v value) bool() bool { return v.val.Bool() } + +func (v value) int8() int8 { return int8(v.int64()) } + +func (v value) int16() int16 { return int16(v.int64()) } + +func (v value) int32() int32 { return int32(v.int64()) } + +func (v value) int64() int64 { return v.val.Int() } + +func (v value) float64() float64 { return v.val.Float() } + +func (v value) string() string { return v.val.String() } + +func (v value) bytes() []byte { return v.val.Bytes() } + +func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() } + +func (v value) array(t reflect.Type) array { return array(v) } + +func (v value) setBool(b bool) { v.val.SetBool(b) } + +func (v value) setInt8(i int8) { v.setInt64(int64(i)) } + +func (v value) setInt16(i int16) { v.setInt64(int64(i)) } + +func (v value) setInt32(i int32) { v.setInt64(int64(i)) } + +func (v value) setInt64(i int64) { v.val.SetInt(i) } + +func (v value) setFloat64(f float64) { v.val.SetFloat(f) } + +func (v value) setString(s string) { v.val.SetString(s) } + +func (v value) setBytes(b []byte) { v.val.SetBytes(b) } + +func (v value) setArray(a array) { + if a.val.IsValid() { + v.val.Set(a.val) + } else { + v.val.Set(reflect.Zero(v.val.Type())) + } +} + +func (v value) fieldByIndex(i index) value { + return value{val: v.val.FieldByIndex(i)} +} + +type array struct { + val reflect.Value +} + +func makeArray(t reflect.Type, n int) array { + return array{val: reflect.MakeSlice(reflect.SliceOf(t), n, n)} +} + +func (a array) index(i int) value { return value{val: a.val.Index(i)} } + +func (a array) length() int { return a.val.Len() } + +func (a array) isNil() bool { return a.val.IsNil() } + +func indexOf(s reflect.StructField) index { return index(s.Index) } + +func bytesToString(b []byte) string { return string(b) } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go b/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go new file mode 100644 index 000000000..9eca5060f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go @@ -0,0 +1,143 @@ +//go:build unsafe +// +build unsafe + +package protocol + +import ( + "reflect" + "unsafe" +) + +type iface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +type slice struct { + ptr unsafe.Pointer + len int + cap int +} + +type index uintptr + +type _type struct { + ptr unsafe.Pointer +} + +func typeOf(x interface{}) _type { + return _type{ptr: ((*iface)(unsafe.Pointer(&x))).typ} +} + +func elemTypeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x).Elem()) +} + +func makeType(t reflect.Type) _type { + return _type{ptr: ((*iface)(unsafe.Pointer(&t))).ptr} +} + +type value struct { + ptr unsafe.Pointer +} + +func nonAddressableValueOf(x interface{}) value { + return valueOf(x) +} + +func valueOf(x interface{}) value { + return value{ptr: ((*iface)(unsafe.Pointer(&x))).ptr} +} + +func makeValue(t reflect.Type) value { + return value{ptr: unsafe.Pointer(reflect.New(t).Pointer())} +} + +func (v value) bool() bool { return *(*bool)(v.ptr) } + +func (v value) int8() int8 { return *(*int8)(v.ptr) } + +func (v value) int16() int16 { return *(*int16)(v.ptr) } + +func (v value) int32() int32 { return *(*int32)(v.ptr) } + +func (v value) int64() int64 { return *(*int64)(v.ptr) } + +func (v value) float64() float64 { return *(*float64)(v.ptr) } + +func (v value) string() string { return *(*string)(v.ptr) } + +func (v value) bytes() []byte { return *(*[]byte)(v.ptr) } + +func (v value) iface(t reflect.Type) interface{} { + return *(*interface{})(unsafe.Pointer(&iface{ + typ: ((*iface)(unsafe.Pointer(&t))).ptr, + ptr: v.ptr, + })) +} + +func (v value) array(t reflect.Type) array { + return array{ + size: uintptr(t.Size()), + elem: ((*slice)(v.ptr)).ptr, + len: ((*slice)(v.ptr)).len, + } +} + +func (v value) setBool(b bool) { *(*bool)(v.ptr) = b } + +func (v value) setInt8(i int8) { *(*int8)(v.ptr) = i } + +func (v value) setInt16(i int16) { *(*int16)(v.ptr) = i } + +func (v value) setInt32(i int32) { *(*int32)(v.ptr) = i } + +func (v value) setInt64(i int64) { *(*int64)(v.ptr) = i } + +func (v value) setFloat64(f float64) { *(*float64)(v.ptr) = f } + +func (v value) setString(s string) { *(*string)(v.ptr) = s } + +func (v value) setBytes(b []byte) { *(*[]byte)(v.ptr) = b } + +func (v value) setArray(a array) { *(*slice)(v.ptr) = slice{ptr: a.elem, len: a.len, cap: a.len} } + +func (v value) fieldByIndex(i index) value { + return value{ptr: unsafe.Pointer(uintptr(v.ptr) + uintptr(i))} +} + +type array struct { + elem unsafe.Pointer + size uintptr + len int +} + +var ( + emptyArray struct{} +) + +func makeArray(t reflect.Type, n int) array { + var elem unsafe.Pointer + var size = uintptr(t.Size()) + if n == 0 { + elem = unsafe.Pointer(&emptyArray) + } else { + elem = unsafe_NewArray(((*iface)(unsafe.Pointer(&t))).ptr, n) + } + return array{elem: elem, size: size, len: n} +} + +func (a array) index(i int) value { + return value{ptr: unsafe.Pointer(uintptr(a.elem) + (uintptr(i) * a.size))} +} + +func (a array) length() int { return a.len } + +func (a array) isNil() bool { return a.elem == nil } + +func indexOf(s reflect.StructField) index { return index(s.Offset) } + +func bytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer diff --git a/vendor/github.com/segmentio/kafka-go/protocol/request.go b/vendor/github.com/segmentio/kafka-go/protocol/request.go new file mode 100644 index 000000000..135b938bb --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/request.go @@ -0,0 +1,134 @@ +package protocol + +import ( + "fmt" + "io" +) + +func ReadRequest(r io.Reader) (apiVersion int16, correlationID int32, clientID string, msg Message, err error) { + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + d.remain = int(size) + apiKey := ApiKey(d.readInt16()) + apiVersion = d.readInt16() + correlationID = d.readInt32() + clientID = d.readString() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return + } + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + return + } + + req := &t.requests[apiVersion-minVersion] + + if req.flexible { + // In the flexible case, there's a tag buffer at the end of the request header + taggedCount := int(d.readUnsignedVarInt()) + for i := 0; i < taggedCount; i++ { + d.readUnsignedVarInt() // tagID + size := d.readUnsignedVarInt() + + // Just throw away the values for now + d.read(int(size)) + } + } + + msg = req.new() + req.decode(d, valueOf(msg)) + d.discardAll() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + } + + return +} + +func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + if typedMessage, ok := msg.(OverrideTypeMessage); ok { + typeKey := typedMessage.TypeKey() + overrideType := overrideApiTypes[apiKey][typeKey] + t = &overrideType + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.requests[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the request size + e.writeInt16(int16(apiKey)) + e.writeInt16(apiVersion) + e.writeInt32(correlationID) + + if r.flexible { + // Flexible messages use a nullable string for the client ID, then extra space for a + // tag buffer, which begins with a size value. Since we're not writing any fields into the + // latter, we can just write zero for now. + // + // See + // https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details. + e.writeNullString(clientID) + e.writeUnsignedVarInt(0) + } else { + // Technically, recent versions of kafka interpret this field as a nullable + // string, however kafka 0.10 expected a non-nullable string and fails with + // a NullPointerException when it receives a null client id. + e.writeString(clientID) + } + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/response.go b/vendor/github.com/segmentio/kafka-go/protocol/response.go new file mode 100644 index 000000000..a43bd0237 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/response.go @@ -0,0 +1,157 @@ +package protocol + +import ( + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" +) + +func ReadResponse(r io.Reader, apiKey ApiKey, apiVersion int16) (correlationID int32, msg Message, err error) { + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + return + } + + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + d.remain = int(size) + correlationID = d.readInt32() + if err = d.err; err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + // If a Writer/Reader is configured without TLS and connects + // to a broker expecting TLS the only message we return to the + // caller is io.ErrUnexpetedEOF which is opaque. This section + // tries to determine if that's what has happened. + // We first deconstruct the initial 4 bytes of the message + // from the size which was read earlier. + // Next, we examine those bytes to see if they looks like a TLS + // error message. If they do we wrap the io.ErrUnexpectedEOF + // with some context. + if looksLikeUnexpectedTLS(size) { + err = fmt.Errorf("%w: broker appears to be expecting TLS", io.ErrUnexpectedEOF) + } + return + } + err = dontExpectEOF(err) + return + } + + res := &t.responses[apiVersion-minVersion] + + if res.flexible { + // In the flexible case, there's a tag buffer at the end of the response header + taggedCount := int(d.readUnsignedVarInt()) + for i := 0; i < taggedCount; i++ { + d.readUnsignedVarInt() // tagID + size := d.readUnsignedVarInt() + + // Just throw away the values for now + d.read(int(size)) + } + } + + msg = res.new() + res.decode(d, valueOf(msg)) + d.discardAll() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + } + + return +} + +func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + if typedMessage, ok := msg.(OverrideTypeMessage); ok { + typeKey := typedMessage.TypeKey() + overrideType := overrideApiTypes[apiKey][typeKey] + t = &overrideType + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.responses[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the response size + e.writeInt32(correlationID) + if r.flexible { + // Flexible messages use extra space for a tag buffer, + // which begins with a size value. Since we're not writing any fields into the + // latter, we can just write zero for now. + // + // See + // https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details. + e.writeUnsignedVarInt(0) + } + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} + +const ( + tlsAlertByte byte = 0x15 +) + +// looksLikeUnexpectedTLS returns true if the size passed in resemble +// the TLS alert message that is returned to a client which sends +// an invalid ClientHello message. +func looksLikeUnexpectedTLS(size int32) bool { + var sizeBytes [4]byte + binary.BigEndian.PutUint32(sizeBytes[:], uint32(size)) + + if sizeBytes[0] != tlsAlertByte { + return false + } + version := int(sizeBytes[1])<<8 | int(sizeBytes[2]) + return version <= tls.VersionTLS13 && version >= tls.VersionTLS10 +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go b/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go new file mode 100644 index 000000000..c23532ca7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go @@ -0,0 +1,28 @@ +package protocol + +import ( + "io" +) + +// RoundTrip sends a request to a kafka broker and returns the response. +func RoundTrip(rw io.ReadWriter, apiVersion int16, correlationID int32, clientID string, req Message) (Message, error) { + if err := WriteRequest(rw, apiVersion, correlationID, clientID, req); err != nil { + return nil, err + } + if !hasResponse(req) { + return nil, nil + } + id, res, err := ReadResponse(rw, req.ApiKey(), apiVersion) + if err != nil { + return nil, err + } + if id != correlationID { + return nil, Errorf("correlation id mismatch (expected=%d, found=%d)", correlationID, id) + } + return res, nil +} + +func hasResponse(msg Message) bool { + x, _ := msg.(interface{ HasResponse() bool }) + return x == nil || x.HasResponse() +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go b/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go new file mode 100644 index 000000000..fdd7bfbcd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go @@ -0,0 +1,66 @@ +package saslauthenticate + +import ( + "encoding/binary" + "io" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + AuthBytes []byte `kafka:"min=v0,max=v1"` +} + +func (r *Request) RawExchange(rw io.ReadWriter) (protocol.Message, error) { + if err := r.writeTo(rw); err != nil { + return nil, err + } + return r.readResp(rw) +} + +func (*Request) Required(versions map[protocol.ApiKey]int16) bool { + const v0 = 0 + return versions[protocol.SaslHandshake] == v0 +} + +func (r *Request) writeTo(w io.Writer) error { + size := len(r.AuthBytes) + 4 + buf := make([]byte, size) + binary.BigEndian.PutUint32(buf[:4], uint32(len(r.AuthBytes))) + copy(buf[4:], r.AuthBytes) + _, err := w.Write(buf) + return err +} + +func (r *Request) readResp(read io.Reader) (protocol.Message, error) { + var lenBuf [4]byte + if _, err := io.ReadFull(read, lenBuf[:]); err != nil { + return nil, err + } + respLen := int32(binary.BigEndian.Uint32(lenBuf[:])) + data := make([]byte, respLen) + + if _, err := io.ReadFull(read, data[:]); err != nil { + return nil, err + } + return &Response{ + AuthBytes: data, + }, nil +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + AuthBytes []byte `kafka:"min=v0,max=v1"` + SessionLifetimeMs int64 `kafka:"min=v1,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } + +var _ protocol.RawExchanger = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go b/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go new file mode 100644 index 000000000..aa72e8309 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go @@ -0,0 +1,20 @@ +package saslhandshake + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + Mechanism string `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + Mechanisms []string `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/size.go b/vendor/github.com/segmentio/kafka-go/protocol/size.go new file mode 100644 index 000000000..f487dfc5d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/size.go @@ -0,0 +1,33 @@ +package protocol + +import ( + "math/bits" +) + +func sizeOfVarString(s string) int { + return sizeOfVarInt(int64(len(s))) + len(s) +} + +func sizeOfVarNullBytes(b []byte) int { + if b == nil { + return sizeOfVarInt(-1) + } + n := len(b) + return sizeOfVarInt(int64(n)) + n +} + +func sizeOfVarNullBytesIface(b Bytes) int { + if b == nil { + return sizeOfVarInt(-1) + } + n := b.Len() + return sizeOfVarInt(int64(n)) + n +} + +func sizeOfVarInt(i int64) int { + return sizeOfUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) // zig-zag encoding +} + +func sizeOfUnsignedVarInt(i uint64) int { + return (bits.Len64(i|1) + 6) / 7 +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go b/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go new file mode 100644 index 000000000..e1ced0615 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go @@ -0,0 +1,50 @@ +package syncgroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + GroupID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + GenerationID int32 `kafka:"min=v0,max=v5|min=v4,max=v5,compact"` + MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v5,nullable,compact"` + ProtocolType string `kafka:"min=v5,max=v5"` + ProtocolName string `kafka:"min=v5,max=v5"` + Assignments []RequestAssignment `kafka:"min=v0,max=v5"` +} + +type RequestAssignment struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + Assignment []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SyncGroup } + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + ThrottleTimeMS int32 `kafka:"min=v1,max=v5"` + ErrorCode int16 `kafka:"min=v0,max=v5"` + ProtocolType string `kafka:"min=v5,max=v5"` + ProtocolName string `kafka:"min=v5,max=v5"` + Assignments []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SyncGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go b/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go new file mode 100644 index 000000000..85f3f05e3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go @@ -0,0 +1,77 @@ +package txnoffsetcommit + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + GroupID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + GenerationID int32 `kafka:"min=v3,max=v3"` + MemberID string `kafka:"min=v3,max=v3,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,compact,nullable"` + Topics []RequestTopic `kafka:"min=v0,max=v3"` +} + +type RequestTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []RequestPartition `kafka:"min=v0,max=v3"` +} + +type RequestPartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Partition int32 `kafka:"min=v0,max=v3"` + CommittedOffset int64 `kafka:"min=v0,max=v3"` + CommittedLeaderEpoch int32 `kafka:"min=v2,max=v3"` + CommittedMetadata string `kafka:"min=v0,max=v2|min=v3,max=v3,nullable,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit } + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Topics []ResponseTopic `kafka:"min=v0,max=v3"` +} + +type ResponseTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []ResponsePartition `kafka:"min=v0,max=v3"` +} + +type ResponsePartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Partition int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit } diff --git a/vendor/github.com/segmentio/kafka-go/rawproduce.go b/vendor/github.com/segmentio/kafka-go/rawproduce.go new file mode 100644 index 000000000..5928cb2f8 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/rawproduce.go @@ -0,0 +1,103 @@ +package kafka + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/segmentio/kafka-go/protocol" + produceAPI "github.com/segmentio/kafka-go/protocol/produce" + "github.com/segmentio/kafka-go/protocol/rawproduce" +) + +// RawProduceRequest represents a request sent to a kafka broker to produce records +// to a topic partition. The request contains a pre-encoded/raw record set. +type RawProduceRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The topic to produce the records to. + Topic string + + // The partition to produce the records to. + Partition int + + // The level of required acknowledgements to ask the kafka broker for. + RequiredAcks RequiredAcks + + // The message format version used when encoding the records. + // + // By default, the client automatically determine which version should be + // used based on the version of the Produce API supported by the server. + MessageVersion int + + // An optional transaction id when producing to the kafka broker is part of + // a transaction. + TransactionalID string + + // The sequence of records to produce to the topic partition. + RawRecords protocol.RawRecordSet +} + +// RawProduce sends a raw produce request to a kafka broker and returns the response. +// +// If the request contained no records, an error wrapping protocol.ErrNoRecord +// is returned. +// +// When the request is configured with RequiredAcks=none, both the response and +// the error will be nil on success. +func (c *Client) RawProduce(ctx context.Context, req *RawProduceRequest) (*ProduceResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &rawproduce.Request{ + TransactionalID: req.TransactionalID, + Acks: int16(req.RequiredAcks), + Timeout: c.timeoutMs(ctx, defaultProduceTimeout), + Topics: []rawproduce.RequestTopic{{ + Topic: req.Topic, + Partitions: []rawproduce.RequestPartition{{ + Partition: int32(req.Partition), + RecordSet: req.RawRecords, + }}, + }}, + }) + + switch { + case err == nil: + case errors.Is(err, protocol.ErrNoRecord): + return new(ProduceResponse), nil + default: + return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", err) + } + + if req.RequiredAcks == RequireNone { + return nil, nil + } + + res := m.(*produceAPI.Response) + if len(res.Topics) == 0 { + return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoTopic) + } + topic := &res.Topics[0] + if len(topic.Partitions) == 0 { + return nil, fmt.Errorf("kafka.(*Client).RawProduce: %w", protocol.ErrNoPartition) + } + partition := &topic.Partitions[0] + + ret := &ProduceResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(partition.ErrorCode, partition.ErrorMessage), + BaseOffset: partition.BaseOffset, + LogAppendTime: makeTime(partition.LogAppendTime), + LogStartOffset: partition.LogStartOffset, + } + + if len(partition.RecordErrors) != 0 { + ret.RecordErrors = make(map[int]error, len(partition.RecordErrors)) + + for _, recErr := range partition.RecordErrors { + ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage) + } + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/read.go b/vendor/github.com/segmentio/kafka-go/read.go new file mode 100644 index 000000000..ec2b38527 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/read.go @@ -0,0 +1,562 @@ +package kafka + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" +) + +var errShortRead = errors.New("not enough bytes available to load the response") + +func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) { + if n > sz { + return sz, errShortRead + } + b, err := r.Peek(n) + if err != nil { + return sz, err + } + f(b) + return discardN(r, sz, n) +} + +func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) }) +} + +func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) { + return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) }) +} + +func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) { + return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) }) +} + +func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { + return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) +} + +func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) { + // Optimistically assume that most of the time, there will be data buffered + // in the reader. If this is not the case, the buffer will be refilled after + // consuming zero bytes from the input. + input, _ := r.Peek(r.Buffered()) + x := uint64(0) + s := uint(0) + + for { + if len(input) > sz { + input = input[:sz] + } + + for i, b := range input { + if b < 0x80 { + x |= uint64(b) << s + *v = int64(x>>1) ^ -(int64(x) & 1) + n, err := r.Discard(i + 1) + return sz - n, err + } + + x |= uint64(b&0x7f) << s + s += 7 + } + + // Make room in the input buffer to load more data from the underlying + // stream. The x and s variables are left untouched, ensuring that the + // varint decoding can continue on the next loop iteration. + n, _ := r.Discard(len(input)) + sz -= n + if sz == 0 { + return 0, errShortRead + } + + // Fill the buffer: ask for one more byte, but in practice the reader + // will load way more from the underlying stream. + if _, err := r.Peek(1); err != nil { + if errors.Is(err, io.EOF) { + err = errShortRead + } + return sz, err + } + + // Grab as many bytes as possible from the buffer, then go on to the + // next loop iteration which is going to consume it. + input, _ = r.Peek(r.Buffered()) + } +} + +func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) +} + +func readString(r *bufio.Reader, sz int, v *string) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewString(r, sz, n) + return + }) +} + +func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var len int16 + + if sz, err = readInt16(r, sz, &len); err != nil { + return sz, err + } + + n := int(len) + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) { + b, sz, err := readNewBytes(r, sz, n) + return string(b), sz, err +} + +func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewBytes(r, sz, n) + return + }) +} + +func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var n int + + if sz, err = readArrayLen(r, sz, &n); err != nil { + return sz, err + } + + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { + var err error + var b []byte + var shortRead bool + + if n > 0 { + if sz < n { + n = sz + shortRead = true + } + + b = make([]byte, n) + n, err = io.ReadFull(r, b) + b = b[:n] + sz -= n + + if err == nil && shortRead { + err = errShortRead + } + } + + return b, sz, err +} + +func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) { + var err error + var len int32 + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + *n = int(len) + return sz, nil +} + +func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + for n := int(len); n > 0; n-- { + if sz, err = cb(r, sz); err != nil { + break + } + } + + return sz, err +} + +func readStringArray(r *bufio.Reader, sz int, v *[]string) (remain int, err error) { + var content []string + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value string + if fnRemain, fnErr = readString(r, size, &value); fnErr != nil { + return + } + content = append(content, value) + return + } + if remain, err = readArrayWith(r, sz, fn); err != nil { + return + } + + *v = content + return +} + +func readMapStringInt32(r *bufio.Reader, sz int, v *map[string][]int32) (remain int, err error) { + var len int32 + if remain, err = readInt32(r, sz, &len); err != nil { + return + } + + content := make(map[string][]int32, len) + for i := 0; i < int(len); i++ { + var key string + var values []int32 + + if remain, err = readString(r, remain, &key); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value int32 + if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { + return + } + values = append(values, value) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + content[key] = values + } + *v = content + + return +} + +func read(r *bufio.Reader, sz int, a interface{}) (int, error) { + switch v := a.(type) { + case *int8: + return readInt8(r, sz, v) + case *int16: + return readInt16(r, sz, v) + case *int32: + return readInt32(r, sz, v) + case *int64: + return readInt64(r, sz, v) + case *bool: + return readBool(r, sz, v) + case *string: + return readString(r, sz, v) + case *[]byte: + return readBytes(r, sz, v) + } + switch v := reflect.ValueOf(a).Elem(); v.Kind() { + case reflect.Struct: + return readStruct(r, sz, v) + case reflect.Slice: + return readSlice(r, sz, v) + default: + panic(fmt.Sprintf("unsupported type: %T", a)) + } +} + +func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + for i, n := 0, v.NumField(); i != n; i++ { + if sz, err = read(r, sz, v.Field(i).Addr().Interface()); err != nil { + return sz, err + } + } + return sz, nil +} + +func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + if n := int(len); n < 0 { + v.Set(reflect.Zero(v.Type())) + } else { + v.Set(reflect.MakeSlice(v.Type(), n, n)) + + for i := 0; i != n; i++ { + if sz, err = read(r, sz, v.Index(i).Addr().Interface()); err != nil { + return sz, err + } + } + } + + return sz, nil +} + +func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + MessageSetSize int32 + } + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(p.MessageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", p.MessageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return +} + +func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + +func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var errorCode int16 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt16(r, remain, &errorCode); err != nil { + return + } + if errorCode != 0 { + err = Error(errorCode) + return + } + + if remain, err = discardInt32(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} diff --git a/vendor/github.com/segmentio/kafka-go/reader.go b/vendor/github.com/segmentio/kafka-go/reader.go new file mode 100644 index 000000000..cfc7cb8f5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/reader.go @@ -0,0 +1,1619 @@ +package kafka + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" +) + +const ( + LastOffset int64 = -1 // The most recent offset available for a partition. + FirstOffset int64 = -2 // The least recent offset available for a partition. +) + +const ( + // defaultCommitRetries holds the number of commit attempts to make + // before giving up. + defaultCommitRetries = 3 +) + +const ( + // defaultFetchMinBytes of 1 byte means that fetch requests are answered as + // soon as a single byte of data is available or the fetch request times out + // waiting for data to arrive. + defaultFetchMinBytes = 1 +) + +var ( + errOnlyAvailableWithGroup = errors.New("unavailable when GroupID is not set") + errNotAvailableWithGroup = errors.New("unavailable when GroupID is set") +) + +const ( + // defaultReadBackoffMax/Min sets the boundaries for how long the reader wait before + // polling for new messages. + defaultReadBackoffMin = 100 * time.Millisecond + defaultReadBackoffMax = 1 * time.Second +) + +// Reader provides a high-level API for consuming messages from kafka. +// +// A Reader automatically manages reconnections to a kafka server, and +// blocking methods have context support for asynchronous cancellations. +// +// Note that it is important to call `Close()` on a `Reader` when a process exits. +// The kafka server needs a graceful disconnect to stop it from continuing to +// attempt to send messages to the connected clients. The given example will not +// call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or +// SIGTERM (as docker stop or a kubernetes restart does). This can result in a +// delay when a new reader on the same topic connects (e.g. new process started +// or new container running). Use a `signal.Notify` handler to close the reader on +// process shutdown. +type Reader struct { + // immutable fields of the reader + config ReaderConfig + + // communication channels between the parent reader and its subreaders + msgs chan readerMessage + + // mutable fields of the reader (synchronized on the mutex) + mutex sync.Mutex + join sync.WaitGroup + cancel context.CancelFunc + stop context.CancelFunc + done chan struct{} + commits chan commitRequest + version int64 // version holds the generation of the spawned readers + offset int64 + lag int64 + closed bool + + // Without a group subscription (when Reader.config.GroupID == ""), + // when errors occur, the Reader gets a synthetic readerMessage with + // a non-nil err set. With group subscriptions however, when an error + // occurs in Reader.run, there's no reader running (sic, cf. reader vs. + // Reader) and there's no way to let the high-level methods like + // FetchMessage know that an error indeed occurred. If an error in run + // occurs, it will be non-block-sent to this unbuffered channel, where + // the high-level methods can select{} on it and notify the caller. + runError chan error + + // reader stats are all made of atomic values, no need for synchronization. + once uint32 + stctx context.Context + // reader stats are all made of atomic values, no need for synchronization. + // Use a pointer to ensure 64-bit alignment of the values. + stats *readerStats +} + +// useConsumerGroup indicates whether the Reader is part of a consumer group. +func (r *Reader) useConsumerGroup() bool { return r.config.GroupID != "" } + +func (r *Reader) getTopics() []string { + if len(r.config.GroupTopics) > 0 { + return r.config.GroupTopics[:] + } + + return []string{r.config.Topic} +} + +// useSyncCommits indicates whether the Reader is configured to perform sync or +// async commits. +func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 } + +func (r *Reader) unsubscribe() { + r.cancel() + r.join.Wait() + // it would be interesting to drain the r.msgs channel at this point since + // it will contain buffered messages for partitions that may not be + // re-assigned to this reader in the next consumer group generation. + // however, draining the channel could race with the client calling + // ReadMessage, which could result in messages delivered and/or committed + // with gaps in the offset. for now, we will err on the side of caution and + // potentially have those messages be reprocessed in the next generation by + // another consumer to avoid such a race. +} + +func (r *Reader) subscribe(allAssignments map[string][]PartitionAssignment) { + offsets := make(map[topicPartition]int64) + for topic, assignments := range allAssignments { + for _, assignment := range assignments { + key := topicPartition{ + topic: topic, + partition: int32(assignment.ID), + } + offsets[key] = assignment.Offset + } + } + + r.mutex.Lock() + r.start(offsets) + r.mutex.Unlock() + + r.withLogger(func(l Logger) { + l.Printf("subscribed to topics and partitions: %+v", offsets) + }) +} + +// commitOffsetsWithRetry attempts to commit the specified offsets and retries +// up to the specified number of times. +func (r *Reader) commitOffsetsWithRetry(gen *Generation, offsetStash offsetStash, retries int) (err error) { + const ( + backoffDelayMin = 100 * time.Millisecond + backoffDelayMax = 5 * time.Second + ) + + for attempt := 0; attempt < retries; attempt++ { + if attempt != 0 { + if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { + return + } + } + + if err = gen.CommitOffsets(offsetStash); err == nil { + return + } + } + + return // err will not be nil +} + +// offsetStash holds offsets by topic => partition => offset. +type offsetStash map[string]map[int]int64 + +// merge updates the offsetStash with the offsets from the provided messages. +func (o offsetStash) merge(commits []commit) { + for _, c := range commits { + offsetsByPartition, ok := o[c.topic] + if !ok { + offsetsByPartition = map[int]int64{} + o[c.topic] = offsetsByPartition + } + + if offset, ok := offsetsByPartition[c.partition]; !ok || c.offset > offset { + offsetsByPartition[c.partition] = c.offset + } + } +} + +// reset clears the contents of the offsetStash. +func (o offsetStash) reset() { + for key := range o { + delete(o, key) + } +} + +// commitLoopImmediate handles each commit synchronously. +func (r *Reader) commitLoopImmediate(ctx context.Context, gen *Generation) { + offsets := offsetStash{} + + for { + select { + case <-ctx.Done(): + // drain the commit channel and prepare a single, final commit. + // the commit will combine any outstanding requests and the result + // will be sent back to all the callers of CommitMessages so that + // they can return. + var errchs []chan<- error + for hasCommits := true; hasCommits; { + select { + case req := <-r.commits: + offsets.merge(req.commits) + errchs = append(errchs, req.errch) + default: + hasCommits = false + } + } + err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries) + for _, errch := range errchs { + // NOTE : this will be a buffered channel and will not block. + errch <- err + } + return + + case req := <-r.commits: + offsets.merge(req.commits) + req.errch <- r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries) + offsets.reset() + } + } +} + +// commitLoopInterval handles each commit asynchronously with a period defined +// by ReaderConfig.CommitInterval. +func (r *Reader) commitLoopInterval(ctx context.Context, gen *Generation) { + ticker := time.NewTicker(r.config.CommitInterval) + defer ticker.Stop() + + // the offset stash should not survive rebalances b/c the consumer may + // receive new assignments. + offsets := offsetStash{} + + commit := func() { + if err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries); err != nil { + r.withErrorLogger(func(l Logger) { l.Printf("%v", err) }) + } else { + offsets.reset() + } + } + + for { + select { + case <-ctx.Done(): + // drain the commit channel in order to prepare the final commit. + for hasCommits := true; hasCommits; { + select { + case req := <-r.commits: + offsets.merge(req.commits) + default: + hasCommits = false + } + } + commit() + return + + case <-ticker.C: + commit() + + case req := <-r.commits: + offsets.merge(req.commits) + } + } +} + +// commitLoop processes commits off the commit chan. +func (r *Reader) commitLoop(ctx context.Context, gen *Generation) { + r.withLogger(func(l Logger) { + l.Printf("started commit for group %s\n", r.config.GroupID) + }) + defer r.withLogger(func(l Logger) { + l.Printf("stopped commit for group %s\n", r.config.GroupID) + }) + + if r.useSyncCommits() { + r.commitLoopImmediate(ctx, gen) + } else { + r.commitLoopInterval(ctx, gen) + } +} + +// run provides the main consumer group management loop. Each iteration performs the +// handshake to join the Reader to the consumer group. +// +// This function is responsible for closing the consumer group upon exit. +func (r *Reader) run(cg *ConsumerGroup) { + defer close(r.done) + defer cg.Close() + + r.withLogger(func(l Logger) { + l.Printf("entering loop for consumer group, %v\n", r.config.GroupID) + }) + + for { + // Limit the number of attempts at waiting for the next + // consumer generation. + var err error + var gen *Generation + for attempt := 1; attempt <= r.config.MaxAttempts; attempt++ { + gen, err = cg.Next(r.stctx) + if err == nil { + break + } + if errors.Is(err, r.stctx.Err()) { + return + } + r.stats.errors.observe(1) + r.withErrorLogger(func(l Logger) { + l.Printf("%v", err) + }) + // Continue with next attempt... + } + if err != nil { + // All attempts have failed. + select { + case r.runError <- err: + // If somebody's receiving on the runError, let + // them know the error occurred. + default: + // Otherwise, don't block to allow healing. + } + continue + } + + r.stats.rebalances.observe(1) + + r.subscribe(gen.Assignments) + + gen.Start(func(ctx context.Context) { + r.commitLoop(ctx, gen) + }) + gen.Start(func(ctx context.Context) { + // wait for the generation to end and then unsubscribe. + select { + case <-ctx.Done(): + // continue to next generation + case <-r.stctx.Done(): + // this will be the last loop because the reader is closed. + } + r.unsubscribe() + }) + } +} + +// ReaderConfig is a configuration object used to create new instances of +// Reader. +type ReaderConfig struct { + // The list of broker addresses used to connect to the kafka cluster. + Brokers []string + + // GroupID holds the optional consumer group id. If GroupID is specified, then + // Partition should NOT be specified e.g. 0 + GroupID string + + // GroupTopics allows specifying multiple topics, but can only be used in + // combination with GroupID, as it is a consumer-group feature. As such, if + // GroupID is set, then either Topic or GroupTopics must be defined. + GroupTopics []string + + // The topic to read messages from. + Topic string + + // Partition to read messages from. Either Partition or GroupID may + // be assigned, but not both + Partition int + + // An dialer used to open connections to the kafka server. This field is + // optional, if nil, the default dialer is used instead. + Dialer *Dialer + + // The capacity of the internal message queue, defaults to 100 if none is + // set. + QueueCapacity int + + // MinBytes indicates to the broker the minimum batch size that the consumer + // will accept. Setting a high minimum when consuming from a low-volume topic + // may result in delayed delivery when the broker does not have enough data to + // satisfy the defined minimum. + // + // Default: 1 + MinBytes int + + // MaxBytes indicates to the broker the maximum batch size that the consumer + // will accept. The broker will truncate a message to satisfy this maximum, so + // choose a value that is high enough for your largest message size. + // + // Default: 1MB + MaxBytes int + + // Maximum amount of time to wait for new data to come when fetching batches + // of messages from kafka. + // + // Default: 10s + MaxWait time.Duration + + // ReadBatchTimeout amount of time to wait to fetch message from kafka messages batch. + // + // Default: 10s + ReadBatchTimeout time.Duration + + // ReadLagInterval sets the frequency at which the reader lag is updated. + // Setting this field to a negative value disables lag reporting. + ReadLagInterval time.Duration + + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + // + // Only used when GroupID is set + GroupBalancers []GroupBalancer + + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer + // group heartbeat update. + // + // Default: 3s + // + // Only used when GroupID is set + HeartbeatInterval time.Duration + + // CommitInterval indicates the interval at which offsets are committed to + // the broker. If 0, commits will be handled synchronously. + // + // Default: 0 + // + // Only used when GroupID is set + CommitInterval time.Duration + + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + // + // Only used when GroupID is set and WatchPartitionChanges is set. + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + + // SessionTimeout optionally sets the length of time that may pass without a heartbeat + // before the coordinator considers the consumer dead and initiates a rebalance. + // + // Default: 30s + // + // Only used when GroupID is set + SessionTimeout time.Duration + + // RebalanceTimeout optionally sets the length of time the coordinator will wait + // for members to join as part of a rebalance. For kafka servers under higher + // load, it may be useful to set this value higher. + // + // Default: 30s + // + // Only used when GroupID is set + RebalanceTimeout time.Duration + + // JoinGroupBackoff optionally sets the length of time to wait between re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + + // RetentionTime optionally sets the length of time the consumer group will be saved + // by the broker + // + // Default: 24h + // + // Only used when GroupID is set + RetentionTime time.Duration + + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + // + // Only used when GroupID is set + StartOffset int64 + + // BackoffDelayMin optionally sets the smallest amount of time the reader will wait before + // polling for new messages + // + // Default: 100ms + ReadBackoffMin time.Duration + + // BackoffDelayMax optionally sets the maximum amount of time the reader will wait before + // polling for new messages + // + // Default: 1s + ReadBackoffMax time.Duration + + // If not nil, specifies a logger used to report internal changes within the + // reader. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the reader falls + // back to using Logger instead. + ErrorLogger Logger + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel + + // Limit of how many attempts to connect will be made before returning the error. + // + // The default is to try 3 times. + MaxAttempts int + + // OffsetOutOfRangeError indicates that the reader should return an error in + // the event of an OffsetOutOfRange error, rather than retrying indefinitely. + // This flag is being added to retain backwards-compatibility, so it will be + // removed in a future version of kafka-go. + OffsetOutOfRangeError bool +} + +// Validate method validates ReaderConfig properties. +func (config *ReaderConfig) Validate() error { + if len(config.Brokers) == 0 { + return errors.New("cannot create a new kafka reader with an empty list of broker addresses") + } + + if config.Partition < 0 || config.Partition >= math.MaxInt32 { + return fmt.Errorf("partition number out of bounds: %d", config.Partition) + } + + if config.MinBytes < 0 { + return fmt.Errorf("invalid negative minimum batch size (min = %d)", config.MinBytes) + } + + if config.MaxBytes < 0 { + return fmt.Errorf("invalid negative maximum batch size (max = %d)", config.MaxBytes) + } + + if config.GroupID != "" { + if config.Partition != 0 { + return errors.New("either Partition or GroupID may be specified, but not both") + } + + if len(config.Topic) == 0 && len(config.GroupTopics) == 0 { + return errors.New("either Topic or GroupTopics must be specified with GroupID") + } + } else if len(config.Topic) == 0 { + return errors.New("cannot create a new kafka reader with an empty topic") + } + + if config.MinBytes > config.MaxBytes { + return fmt.Errorf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes) + } + + if config.ReadBackoffMax < 0 { + return fmt.Errorf("ReadBackoffMax out of bounds: %d", config.ReadBackoffMax) + } + + if config.ReadBackoffMin < 0 { + return fmt.Errorf("ReadBackoffMin out of bounds: %d", config.ReadBackoffMin) + } + + return nil +} + +// ReaderStats is a data structure returned by a call to Reader.Stats that exposes +// details about the behavior of the reader. +type ReaderStats struct { + Dials int64 `metric:"kafka.reader.dial.count" type:"counter"` + Fetches int64 `metric:"kafka.reader.fetch.count" type:"counter"` + Messages int64 `metric:"kafka.reader.message.count" type:"counter"` + Bytes int64 `metric:"kafka.reader.message.bytes" type:"counter"` + Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"` + Timeouts int64 `metric:"kafka.reader.timeout.count" type:"counter"` + Errors int64 `metric:"kafka.reader.error.count" type:"counter"` + + DialTime DurationStats `metric:"kafka.reader.dial.seconds"` + ReadTime DurationStats `metric:"kafka.reader.read.seconds"` + WaitTime DurationStats `metric:"kafka.reader.wait.seconds"` + FetchSize SummaryStats `metric:"kafka.reader.fetch.size"` + FetchBytes SummaryStats `metric:"kafka.reader.fetch.bytes"` + + Offset int64 `metric:"kafka.reader.offset" type:"gauge"` + Lag int64 `metric:"kafka.reader.lag" type:"gauge"` + MinBytes int64 `metric:"kafka.reader.fetch_bytes.min" type:"gauge"` + MaxBytes int64 `metric:"kafka.reader.fetch_bytes.max" type:"gauge"` + MaxWait time.Duration `metric:"kafka.reader.fetch_wait.max" type:"gauge"` + QueueLength int64 `metric:"kafka.reader.queue.length" type:"gauge"` + QueueCapacity int64 `metric:"kafka.reader.queue.capacity" type:"gauge"` + + ClientID string `tag:"client_id"` + Topic string `tag:"topic"` + Partition string `tag:"partition"` + + // The original `Fetches` field had a typo where the metric name was called + // "kafak..." instead of "kafka...", in order to offer time to fix monitors + // that may be relying on this mistake we are temporarily introducing this + // field. + DeprecatedFetchesWithTypo int64 `metric:"kafak.reader.fetch.count" type:"counter"` +} + +// readerStats is a struct that contains statistics on a reader. +type readerStats struct { + dials counter + fetches counter + messages counter + bytes counter + rebalances counter + timeouts counter + errors counter + dialTime summary + readTime summary + waitTime summary + fetchSize summary + fetchBytes summary + offset gauge + lag gauge + partition string +} + +// NewReader creates and returns a new Reader configured with config. +// The offset is initialized to FirstOffset. +func NewReader(config ReaderConfig) *Reader { + if err := config.Validate(); err != nil { + panic(err) + } + + if config.GroupID != "" { + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } + } + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if config.MaxBytes == 0 { + config.MaxBytes = 1e6 // 1 MB + } + + if config.MinBytes == 0 { + config.MinBytes = defaultFetchMinBytes + } + + if config.MaxWait == 0 { + config.MaxWait = 10 * time.Second + } + + if config.ReadBatchTimeout == 0 { + config.ReadBatchTimeout = 10 * time.Second + } + + if config.ReadLagInterval == 0 { + config.ReadLagInterval = 1 * time.Minute + } + + if config.ReadBackoffMin == 0 { + config.ReadBackoffMin = defaultReadBackoffMin + } + + if config.ReadBackoffMax == 0 { + config.ReadBackoffMax = defaultReadBackoffMax + } + + if config.ReadBackoffMax < config.ReadBackoffMin { + panic(fmt.Errorf("ReadBackoffMax %d smaller than ReadBackoffMin %d", config.ReadBackoffMax, config.ReadBackoffMin)) + } + + if config.QueueCapacity == 0 { + config.QueueCapacity = 100 + } + + if config.MaxAttempts == 0 { + config.MaxAttempts = 3 + } + + // when configured as a consumer group; stats should report a partition of -1 + readerStatsPartition := config.Partition + if config.GroupID != "" { + readerStatsPartition = -1 + } + + // when configured as a consume group, start version as 1 to ensure that only + // the rebalance function will start readers + version := int64(0) + if config.GroupID != "" { + version = 1 + } + + stctx, stop := context.WithCancel(context.Background()) + r := &Reader{ + config: config, + msgs: make(chan readerMessage, config.QueueCapacity), + cancel: func() {}, + commits: make(chan commitRequest, config.QueueCapacity), + stop: stop, + offset: FirstOffset, + stctx: stctx, + stats: &readerStats{ + dialTime: makeSummary(), + readTime: makeSummary(), + waitTime: makeSummary(), + fetchSize: makeSummary(), + fetchBytes: makeSummary(), + // Generate the string representation of the partition number only + // once when the reader is created. + partition: strconv.Itoa(readerStatsPartition), + }, + version: version, + } + if r.useConsumerGroup() { + r.done = make(chan struct{}) + r.runError = make(chan error) + cg, err := NewConsumerGroup(ConsumerGroupConfig{ + ID: r.config.GroupID, + Brokers: r.config.Brokers, + Dialer: r.config.Dialer, + Topics: r.getTopics(), + GroupBalancers: r.config.GroupBalancers, + HeartbeatInterval: r.config.HeartbeatInterval, + PartitionWatchInterval: r.config.PartitionWatchInterval, + WatchPartitionChanges: r.config.WatchPartitionChanges, + SessionTimeout: r.config.SessionTimeout, + RebalanceTimeout: r.config.RebalanceTimeout, + JoinGroupBackoff: r.config.JoinGroupBackoff, + RetentionTime: r.config.RetentionTime, + StartOffset: r.config.StartOffset, + Logger: r.config.Logger, + ErrorLogger: r.config.ErrorLogger, + }) + if err != nil { + panic(err) + } + go r.run(cg) + } + + return r +} + +// Config returns the reader's configuration. +func (r *Reader) Config() ReaderConfig { + return r.config +} + +// Close closes the stream, preventing the program from reading any more +// messages from it. +func (r *Reader) Close() error { + atomic.StoreUint32(&r.once, 1) + + r.mutex.Lock() + closed := r.closed + r.closed = true + r.mutex.Unlock() + + r.cancel() + r.stop() + r.join.Wait() + + if r.done != nil { + <-r.done + } + + if !closed { + close(r.msgs) + } + + return nil +} + +// ReadMessage reads and return the next message from the r. The method call +// blocks until a message becomes available, or an error occurs. The program +// may also specify a context to asynchronously cancel the blocking operation. +// +// The method returns io.EOF to indicate that the reader has been closed. +// +// If consumer groups are used, ReadMessage will automatically commit the +// offset when called. Note that this could result in an offset being committed +// before the message is fully processed. +// +// If more fine-grained control of when offsets are committed is required, it +// is recommended to use FetchMessage with CommitMessages instead. +func (r *Reader) ReadMessage(ctx context.Context) (Message, error) { + m, err := r.FetchMessage(ctx) + if err != nil { + return Message{}, fmt.Errorf("fetching message: %w", err) + } + + if r.useConsumerGroup() { + if err := r.CommitMessages(ctx, m); err != nil { + return Message{}, fmt.Errorf("committing message: %w", err) + } + } + + return m, nil +} + +// FetchMessage reads and return the next message from the r. The method call +// blocks until a message becomes available, or an error occurs. The program +// may also specify a context to asynchronously cancel the blocking operation. +// +// The method returns io.EOF to indicate that the reader has been closed. +// +// FetchMessage does not commit offsets automatically when using consumer groups. +// Use CommitMessages to commit the offset. +func (r *Reader) FetchMessage(ctx context.Context) (Message, error) { + r.activateReadLag() + + for { + r.mutex.Lock() + + if !r.closed && r.version == 0 { + r.start(r.getTopicPartitionOffset()) + } + + version := r.version + r.mutex.Unlock() + + select { + case <-ctx.Done(): + return Message{}, ctx.Err() + + case err := <-r.runError: + return Message{}, err + + case m, ok := <-r.msgs: + if !ok { + return Message{}, io.EOF + } + + if m.version >= version { + r.mutex.Lock() + + switch { + case m.error != nil: + case version == r.version: + r.offset = m.message.Offset + 1 + r.lag = m.watermark - r.offset + } + + r.mutex.Unlock() + + if errors.Is(m.error, io.EOF) { + // io.EOF is used as a marker to indicate that the stream + // has been closed, in case it was received from the inner + // reader we don't want to confuse the program and replace + // the error with io.ErrUnexpectedEOF. + m.error = io.ErrUnexpectedEOF + } + + return m.message, m.error + } + } + } +} + +// CommitMessages commits the list of messages passed as argument. The program +// may pass a context to asynchronously cancel the commit operation when it was +// configured to be blocking. +// +// Because kafka consumer groups track a single offset per partition, the +// highest message offset passed to CommitMessages will cause all previous +// messages to be committed. Applications need to account for these Kafka +// limitations when committing messages, and maintain message ordering if they +// need strong delivery guarantees. This property makes it valid to pass only +// the last message seen to CommitMessages in order to move the offset of the +// topic/partition it belonged to forward, effectively committing all previous +// messages in the partition. +func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error { + if !r.useConsumerGroup() { + return errOnlyAvailableWithGroup + } + + var errch <-chan error + creq := commitRequest{ + commits: makeCommits(msgs...), + } + + if r.useSyncCommits() { + ch := make(chan error, 1) + errch, creq.errch = ch, ch + } + + select { + case r.commits <- creq: + case <-ctx.Done(): + return ctx.Err() + case <-r.stctx.Done(): + // This context is used to ensure we don't allow commits after the + // reader was closed. + return io.ErrClosedPipe + } + + if !r.useSyncCommits() { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errch: + return err + } +} + +// ReadLag returns the current lag of the reader by fetching the last offset of +// the topic and partition and computing the difference between that value and +// the offset of the last message returned by ReadMessage. +// +// This method is intended to be used in cases where a program may be unable to +// call ReadMessage to update the value returned by Lag, but still needs to get +// an up to date estimation of how far behind the reader is. For example when +// the consumer is not ready to process the next message. +// +// The function returns a lag of zero when the reader's current offset is +// negative. +func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) { + if r.useConsumerGroup() { + return 0, errNotAvailableWithGroup + } + + type offsets struct { + first int64 + last int64 + } + + offch := make(chan offsets, 1) + errch := make(chan error, 1) + + go func() { + var off offsets + var err error + + for _, broker := range r.config.Brokers { + var conn *Conn + + if conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition); err != nil { + continue + } + + deadline, _ := ctx.Deadline() + conn.SetDeadline(deadline) + + off.first, off.last, err = conn.ReadOffsets() + conn.Close() + + if err == nil { + break + } + } + + if err != nil { + errch <- err + } else { + offch <- off + } + }() + + select { + case off := <-offch: + switch cur := r.Offset(); { + case cur == FirstOffset: + lag = off.last - off.first + + case cur == LastOffset: + lag = 0 + + default: + lag = off.last - cur + } + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + + return +} + +// Offset returns the current absolute offset of the reader, or -1 +// if r is backed by a consumer group. +func (r *Reader) Offset() int64 { + if r.useConsumerGroup() { + return -1 + } + + r.mutex.Lock() + offset := r.offset + r.mutex.Unlock() + r.withLogger(func(log Logger) { + log.Printf("looking up offset of kafka reader for partition %d of %s: %s", r.config.Partition, r.config.Topic, toHumanOffset(offset)) + }) + return offset +} + +// Lag returns the lag of the last message returned by ReadMessage, or -1 +// if r is backed by a consumer group. +func (r *Reader) Lag() int64 { + if r.useConsumerGroup() { + return -1 + } + + r.mutex.Lock() + lag := r.lag + r.mutex.Unlock() + return lag +} + +// SetOffset changes the offset from which the next batch of messages will be +// read. The method fails with io.ErrClosedPipe if the reader has already been closed. +// +// From version 0.2.0, FirstOffset and LastOffset can be used to indicate the first +// or last available offset in the partition. Please note while -1 and -2 were accepted +// to indicate the first or last offset in previous versions, the meanings of the numbers +// were swapped in 0.2.0 to match the meanings in other libraries and the Kafka protocol +// specification. +func (r *Reader) SetOffset(offset int64) error { + if r.useConsumerGroup() { + return errNotAvailableWithGroup + } + + var err error + r.mutex.Lock() + + if r.closed { + err = io.ErrClosedPipe + } else if offset != r.offset { + r.withLogger(func(log Logger) { + log.Printf("setting the offset of the kafka reader for partition %d of %s from %s to %s", + r.config.Partition, r.config.Topic, toHumanOffset(r.offset), toHumanOffset(offset)) + }) + r.offset = offset + + if r.version != 0 { + r.start(r.getTopicPartitionOffset()) + } + + r.activateReadLag() + } + + r.mutex.Unlock() + return err +} + +// SetOffsetAt changes the offset from which the next batch of messages will be +// read given the timestamp t. +// +// The method fails if the unable to connect partition leader, or unable to read the offset +// given the ts, or if the reader has been closed. +func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error { + r.mutex.Lock() + if r.closed { + r.mutex.Unlock() + return io.ErrClosedPipe + } + r.mutex.Unlock() + + if len(r.config.Brokers) < 1 { + return errors.New("no brokers in config") + } + var conn *Conn + var err error + for _, broker := range r.config.Brokers { + conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition) + if err != nil { + continue + } + deadline, _ := ctx.Deadline() + conn.SetDeadline(deadline) + offset, err := conn.ReadOffset(t) + conn.Close() + if err != nil { + return err + } + + return r.SetOffset(offset) + } + return fmt.Errorf("error dialing all brokers, one of the errors: %w", err) +} + +// Stats returns a snapshot of the reader stats since the last time the method +// was called, or since the reader was created if it is called for the first +// time. +// +// A typical use of this method is to spawn a goroutine that will periodically +// call Stats on a kafka reader and report the metrics to a stats collection +// system. +func (r *Reader) Stats() ReaderStats { + stats := ReaderStats{ + Dials: r.stats.dials.snapshot(), + Fetches: r.stats.fetches.snapshot(), + Messages: r.stats.messages.snapshot(), + Bytes: r.stats.bytes.snapshot(), + Rebalances: r.stats.rebalances.snapshot(), + Timeouts: r.stats.timeouts.snapshot(), + Errors: r.stats.errors.snapshot(), + DialTime: r.stats.dialTime.snapshotDuration(), + ReadTime: r.stats.readTime.snapshotDuration(), + WaitTime: r.stats.waitTime.snapshotDuration(), + FetchSize: r.stats.fetchSize.snapshot(), + FetchBytes: r.stats.fetchBytes.snapshot(), + Offset: r.stats.offset.snapshot(), + Lag: r.stats.lag.snapshot(), + MinBytes: int64(r.config.MinBytes), + MaxBytes: int64(r.config.MaxBytes), + MaxWait: r.config.MaxWait, + QueueLength: int64(len(r.msgs)), + QueueCapacity: int64(cap(r.msgs)), + ClientID: r.config.Dialer.ClientID, + Topic: r.config.Topic, + Partition: r.stats.partition, + } + // TODO: remove when we get rid of the deprecated field. + stats.DeprecatedFetchesWithTypo = stats.Fetches + return stats +} + +func (r *Reader) getTopicPartitionOffset() map[topicPartition]int64 { + key := topicPartition{topic: r.config.Topic, partition: int32(r.config.Partition)} + return map[topicPartition]int64{key: r.offset} +} + +func (r *Reader) withLogger(do func(Logger)) { + if r.config.Logger != nil { + do(r.config.Logger) + } +} + +func (r *Reader) withErrorLogger(do func(Logger)) { + if r.config.ErrorLogger != nil { + do(r.config.ErrorLogger) + } else { + r.withLogger(do) + } +} + +func (r *Reader) activateReadLag() { + if r.config.ReadLagInterval > 0 && atomic.CompareAndSwapUint32(&r.once, 0, 1) { + // read lag will only be calculated when not using consumer groups + // todo discuss how capturing read lag should interact with rebalancing + if !r.useConsumerGroup() { + go r.readLag(r.stctx) + } + } +} + +func (r *Reader) readLag(ctx context.Context) { + ticker := time.NewTicker(r.config.ReadLagInterval) + defer ticker.Stop() + + for { + timeout, cancel := context.WithTimeout(ctx, r.config.ReadLagInterval/2) + lag, err := r.ReadLag(timeout) + cancel() + + if err != nil { + r.stats.errors.observe(1) + r.withErrorLogger(func(log Logger) { + log.Printf("kafka reader failed to read lag of partition %d of %s: %s", r.config.Partition, r.config.Topic, err) + }) + } else { + r.stats.lag.observe(lag) + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + } +} + +func (r *Reader) start(offsetsByPartition map[topicPartition]int64) { + if r.closed { + // don't start child reader if parent Reader is closed + return + } + + ctx, cancel := context.WithCancel(context.Background()) + + r.cancel() // always cancel the previous reader + r.cancel = cancel + r.version++ + + r.join.Add(len(offsetsByPartition)) + for key, offset := range offsetsByPartition { + go func(ctx context.Context, key topicPartition, offset int64, join *sync.WaitGroup) { + defer join.Done() + + (&reader{ + dialer: r.config.Dialer, + logger: r.config.Logger, + errorLogger: r.config.ErrorLogger, + brokers: r.config.Brokers, + topic: key.topic, + partition: int(key.partition), + minBytes: r.config.MinBytes, + maxBytes: r.config.MaxBytes, + maxWait: r.config.MaxWait, + readBatchTimeout: r.config.ReadBatchTimeout, + backoffDelayMin: r.config.ReadBackoffMin, + backoffDelayMax: r.config.ReadBackoffMax, + version: r.version, + msgs: r.msgs, + stats: r.stats, + isolationLevel: r.config.IsolationLevel, + maxAttempts: r.config.MaxAttempts, + + // backwards-compatibility flags + offsetOutOfRangeError: r.config.OffsetOutOfRangeError, + }).run(ctx, offset) + }(ctx, key, offset, &r.join) + } +} + +// A reader reads messages from kafka and produces them on its channels, it's +// used as a way to asynchronously fetch messages while the main program reads +// them using the high level reader API. +type reader struct { + dialer *Dialer + logger Logger + errorLogger Logger + brokers []string + topic string + partition int + minBytes int + maxBytes int + maxWait time.Duration + readBatchTimeout time.Duration + backoffDelayMin time.Duration + backoffDelayMax time.Duration + version int64 + msgs chan<- readerMessage + stats *readerStats + isolationLevel IsolationLevel + maxAttempts int + + offsetOutOfRangeError bool +} + +type readerMessage struct { + version int64 + message Message + watermark int64 + error error +} + +func (r *reader) run(ctx context.Context, offset int64) { + // This is the reader's main loop, it only ends if the context is canceled + // and will keep attempting to reader messages otherwise. + // + // Retrying indefinitely has the nice side effect of preventing Read calls + // on the parent reader to block if connection to the kafka server fails, + // the reader keeps reporting errors on the error channel which will then + // be surfaced to the program. + // If the reader wasn't retrying then the program would block indefinitely + // on a Read call after reading the first error. + for attempt := 0; true; attempt++ { + if attempt != 0 { + if !sleep(ctx, backoff(attempt, r.backoffDelayMin, r.backoffDelayMax)) { + return + } + } + + r.withLogger(func(log Logger) { + log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + + conn, start, err := r.initialize(ctx, offset) + if err != nil { + if errors.Is(err, OffsetOutOfRange) { + if r.offsetOutOfRangeError { + r.sendError(ctx, err) + return + } + + // This would happen if the requested offset is passed the last + // offset on the partition leader. In that case we're just going + // to retry later hoping that enough data has been produced. + r.withErrorLogger(func(log Logger) { + log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err) + }) + + continue + } + + // Perform a configured number of attempts before + // reporting first errors, this helps mitigate + // situations where the kafka server is temporarily + // unavailable. + if attempt >= r.maxAttempts { + r.sendError(ctx, err) + } else { + r.stats.errors.observe(1) + r.withErrorLogger(func(log Logger) { + log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err) + }) + } + continue + } + + // Resetting the attempt counter ensures that if a failure occurs after + // a successful initialization we don't keep increasing the backoff + // timeout. + attempt = 0 + + // Now we're sure to have an absolute offset number, may anything happen + // to the connection we know we'll want to restart from this offset. + offset = start + + errcount := 0 + readLoop: + for { + if !sleep(ctx, backoff(errcount, r.backoffDelayMin, r.backoffDelayMax)) { + conn.Close() + return + } + + offset, err = r.read(ctx, offset, conn) + switch { + case err == nil: + errcount = 0 + continue + + case errors.Is(err, io.EOF): + // done with this batch of messages...carry on. note that this + // block relies on the batch repackaging real io.EOF errors as + // io.UnexpectedEOF. otherwise, we would end up swallowing real + // errors here. + errcount = 0 + continue + + case errors.Is(err, io.ErrNoProgress): + // This error is returned by the Conn when it believes the connection + // has been corrupted, so we need to explicitly close it. Since we are + // explicitly handling it and a retry will pick up, we can suppress the + // error metrics and logs for this case. + conn.Close() + break readLoop + + case errors.Is(err, UnknownTopicOrPartition): + r.withErrorLogger(func(log Logger) { + log.Printf("failed to read from current broker %v for partition %d of %s at offset %d: %v", r.brokers, r.partition, r.topic, toHumanOffset(offset), err) + }) + + conn.Close() + + // The next call to .initialize will re-establish a connection to the proper + // topic/partition broker combo. + r.stats.rebalances.observe(1) + break readLoop + + case errors.Is(err, NotLeaderForPartition): + r.withErrorLogger(func(log Logger) { + log.Printf("failed to read from current broker for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err) + }) + + conn.Close() + + // The next call to .initialize will re-establish a connection to the proper + // partition leader. + r.stats.rebalances.observe(1) + break readLoop + + case errors.Is(err, RequestTimedOut): + // Timeout on the kafka side, this can be safely retried. + errcount = 0 + r.withLogger(func(log Logger) { + log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err) + }) + r.stats.timeouts.observe(1) + continue + + case errors.Is(err, OffsetOutOfRange): + first, last, err := r.readOffsets(conn) + if err != nil { + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader got an error while attempting to determine whether it was reading before the first offset or after the last offset of partition %d of %s: %s", r.partition, r.topic, err) + }) + conn.Close() + break readLoop + } + + switch { + case offset < first: + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, toHumanOffset(offset), first, first-offset) + }) + offset, errcount = first, 0 + continue // retry immediately so we don't keep falling behind due to the backoff + + case offset < last: + errcount = 0 + continue // more messages have already become available, retry immediately + + default: + // We may be reading past the last offset, will retry later. + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + } + + case errors.Is(err, context.Canceled): + // Another reader has taken over, we can safely quit. + conn.Close() + return + + case errors.Is(err, errUnknownCodec): + // The compression codec is either unsupported or has not been + // imported. This is a fatal error b/c the reader cannot + // proceed. + r.sendError(ctx, err) + break readLoop + + default: + var kafkaError Error + if errors.As(err, &kafkaError) { + r.sendError(ctx, err) + } else { + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, toHumanOffset(offset), err) + }) + r.stats.errors.observe(1) + conn.Close() + break readLoop + } + } + + errcount++ + } + } +} + +func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) { + for i := 0; i != len(r.brokers) && conn == nil; i++ { + broker := r.brokers[i] + var first, last int64 + + t0 := time.Now() + conn, err = r.dialer.DialLeader(ctx, "tcp", broker, r.topic, r.partition) + t1 := time.Now() + r.stats.dials.observe(1) + r.stats.dialTime.observeDuration(t1.Sub(t0)) + + if err != nil { + continue + } + + if first, last, err = r.readOffsets(conn); err != nil { + conn.Close() + conn = nil + break + } + + switch { + case offset == FirstOffset: + offset = first + + case offset == LastOffset: + offset = last + + case offset < first: + offset = first + } + + r.withLogger(func(log Logger) { + log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + + if start, err = conn.Seek(offset, SeekAbsolute); err != nil { + conn.Close() + conn = nil + break + } + + conn.SetDeadline(time.Time{}) + } + + return +} + +func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, error) { + r.stats.fetches.observe(1) + r.stats.offset.observe(offset) + + t0 := time.Now() + conn.SetReadDeadline(t0.Add(r.maxWait)) + + batch := conn.ReadBatchWith(ReadBatchConfig{ + MinBytes: r.minBytes, + MaxBytes: r.maxBytes, + IsolationLevel: r.isolationLevel, + }) + highWaterMark := batch.HighWaterMark() + + t1 := time.Now() + r.stats.waitTime.observeDuration(t1.Sub(t0)) + + var msg Message + var err error + var size int64 + var bytes int64 + + for { + conn.SetReadDeadline(time.Now().Add(r.readBatchTimeout)) + + if msg, err = batch.ReadMessage(); err != nil { + batch.Close() + break + } + + n := int64(len(msg.Key) + len(msg.Value)) + r.stats.messages.observe(1) + r.stats.bytes.observe(n) + + if err = r.sendMessage(ctx, msg, highWaterMark); err != nil { + batch.Close() + break + } + + offset = msg.Offset + 1 + r.stats.offset.observe(offset) + r.stats.lag.observe(highWaterMark - offset) + + size++ + bytes += n + } + + conn.SetReadDeadline(time.Time{}) + + t2 := time.Now() + r.stats.readTime.observeDuration(t2.Sub(t1)) + r.stats.fetchSize.observe(size) + r.stats.fetchBytes.observe(bytes) + return offset, err +} + +func (r *reader) readOffsets(conn *Conn) (first, last int64, err error) { + conn.SetDeadline(time.Now().Add(10 * time.Second)) + return conn.ReadOffsets() +} + +func (r *reader) sendMessage(ctx context.Context, msg Message, watermark int64) error { + select { + case r.msgs <- readerMessage{version: r.version, message: msg, watermark: watermark}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (r *reader) sendError(ctx context.Context, err error) error { + select { + case r.msgs <- readerMessage{version: r.version, error: err}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (r *reader) withLogger(do func(Logger)) { + if r.logger != nil { + do(r.logger) + } +} + +func (r *reader) withErrorLogger(do func(Logger)) { + if r.errorLogger != nil { + do(r.errorLogger) + } else { + r.withLogger(do) + } +} + +// extractTopics returns the unique list of topics represented by the set of +// provided members. +func extractTopics(members []GroupMember) []string { + visited := map[string]struct{}{} + var topics []string + + for _, member := range members { + for _, topic := range member.Topics { + if _, seen := visited[topic]; seen { + continue + } + + topics = append(topics, topic) + visited[topic] = struct{}{} + } + } + + sort.Strings(topics) + + return topics +} + +type humanOffset int64 + +func toHumanOffset(v int64) humanOffset { + return humanOffset(v) +} + +func (offset humanOffset) Format(w fmt.State, _ rune) { + v := int64(offset) + switch v { + case FirstOffset: + fmt.Fprint(w, "first offset") + case LastOffset: + fmt.Fprint(w, "last offset") + default: + fmt.Fprint(w, strconv.FormatInt(v, 10)) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/record.go b/vendor/github.com/segmentio/kafka-go/record.go new file mode 100644 index 000000000..1750889ac --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/record.go @@ -0,0 +1,42 @@ +package kafka + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +// Header is a key/value pair type representing headers set on records. +type Header = protocol.Header + +// Bytes is an interface representing a sequence of bytes. This abstraction +// makes it possible for programs to inject data into produce requests without +// having to load in into an intermediary buffer, or read record keys and values +// from a fetch response directly from internal buffers. +// +// Bytes are not safe to use concurrently from multiple goroutines. +type Bytes = protocol.Bytes + +// NewBytes constructs a Bytes value from a byte slice. +// +// If b is nil, nil is returned. +func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) } + +// ReadAll reads b into a byte slice. +func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) } + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +type Record = protocol.Record + +// RecordReader is an interface representing a sequence of records. Record sets +// are used in both produce and fetch requests to represent the sequence of +// records that are sent to or receive from kafka brokers. +// +// RecordReader values are not safe to use concurrently from multiple goroutines. +type RecordReader = protocol.RecordReader + +// NewRecordReade reconstructs a RecordSet which exposes the sequence of records +// passed as arguments. +func NewRecordReader(records ...Record) RecordReader { + return protocol.NewRecordReader(records...) +} diff --git a/vendor/github.com/segmentio/kafka-go/recordbatch.go b/vendor/github.com/segmentio/kafka-go/recordbatch.go new file mode 100644 index 000000000..59ab4937b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/recordbatch.go @@ -0,0 +1,108 @@ +package kafka + +import ( + "bytes" + "time" +) + +const recordBatchHeaderSize int32 = 0 + + 8 + // base offset + 4 + // batch length + 4 + // partition leader epoch + 1 + // magic + 4 + // crc + 2 + // attributes + 4 + // last offset delta + 8 + // first timestamp + 8 + // max timestamp + 8 + // producer id + 2 + // producer epoch + 4 + // base sequence + 4 // msg count + +func recordBatchSize(msgs ...Message) (size int32) { + size = recordBatchHeaderSize + baseTime := msgs[0].Time + + for i := range msgs { + msg := &msgs[i] + msz := recordSize(msg, msg.Time.Sub(baseTime), int64(i)) + size += int32(msz + varIntLen(int64(msz))) + } + + return +} + +func compressRecordBatch(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int16, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + + for i, msg := range msgs { + wb.writeRecord(0, msgs[0].Time, int64(i), msg) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = int16(codec.Code()) + size = recordBatchHeaderSize + int32(compressed.Len()) + return +} + +type recordBatch struct { + // required input parameters + codec CompressionCodec + attributes int16 + msgs []Message + + // parameters calculated during init + compressed *bytes.Buffer + size int32 +} + +func newRecordBatch(codec CompressionCodec, msgs ...Message) (r *recordBatch, err error) { + r = &recordBatch{ + codec: codec, + msgs: msgs, + } + if r.codec == nil { + r.size = recordBatchSize(r.msgs...) + } else { + r.compressed, r.attributes, r.size, err = compressRecordBatch(r.codec, r.msgs...) + } + return +} + +func (r *recordBatch) writeTo(wb *writeBuffer) { + wb.writeInt32(r.size) + + baseTime := r.msgs[0].Time + lastTime := r.msgs[len(r.msgs)-1].Time + if r.compressed != nil { + wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) { + wb.Write(r.compressed.Bytes()) + }) + releaseBuffer(r.compressed) + } else { + wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) { + for i, msg := range r.msgs { + wb.writeRecord(0, r.msgs[0].Time, int64(i), msg) + } + }) + } +} + +func recordSize(msg *Message, timestampDelta time.Duration, offsetDelta int64) int { + return 1 + // attributes + varIntLen(int64(milliseconds(timestampDelta))) + + varIntLen(offsetDelta) + + varBytesLen(msg.Key) + + varBytesLen(msg.Value) + + varArrayLen(len(msg.Headers), func(i int) int { + h := &msg.Headers[i] + return varStringLen(h.Key) + varBytesLen(h.Value) + }) +} diff --git a/vendor/github.com/segmentio/kafka-go/resolver.go b/vendor/github.com/segmentio/kafka-go/resolver.go new file mode 100644 index 000000000..fa5b97d70 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/resolver.go @@ -0,0 +1,57 @@ +package kafka + +import ( + "context" + "net" +) + +// The Resolver interface is used as an abstraction to provide service discovery +// of the hosts of a kafka cluster. +type Resolver interface { + // LookupHost looks up the given host using the local resolver. + // It returns a slice of that host's addresses. + LookupHost(ctx context.Context, host string) (addrs []string, err error) +} + +// BrokerResolver is an interface implemented by types that translate host +// names into a network address. +// +// This resolver is not intended to be a general purpose interface. Instead, +// it is tailored to the particular needs of the kafka protocol, with the goal +// being to provide a flexible mechanism for extending broker name resolution +// while retaining context that is specific to interacting with a kafka cluster. +// +// Resolvers must be safe to use from multiple goroutines. +type BrokerResolver interface { + // Returns the IP addresses of the broker passed as argument. + LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) +} + +// NewBrokerResolver constructs a Resolver from r. +// +// If r is nil, net.DefaultResolver is used instead. +func NewBrokerResolver(r *net.Resolver) BrokerResolver { + return brokerResolver{r} +} + +type brokerResolver struct { + *net.Resolver +} + +func (r brokerResolver) LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) { + ipAddrs, err := r.LookupIPAddr(ctx, broker.Host) + if err != nil { + return nil, err + } + + if len(ipAddrs) == 0 { + return nil, &net.DNSError{ + Err: "no addresses were returned by the resolver", + Name: broker.Host, + IsTemporary: true, + IsNotFound: true, + } + } + + return ipAddrs, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/resource.go b/vendor/github.com/segmentio/kafka-go/resource.go new file mode 100644 index 000000000..b9be107c2 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/resource.go @@ -0,0 +1,123 @@ +package kafka + +import ( + "fmt" + "strings" +) + +// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +type ResourceType int8 + +const ( + ResourceTypeUnknown ResourceType = 0 + ResourceTypeAny ResourceType = 1 + ResourceTypeTopic ResourceType = 2 + ResourceTypeGroup ResourceType = 3 + // See https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L36 + ResourceTypeBroker ResourceType = 4 + ResourceTypeCluster ResourceType = 4 + ResourceTypeTransactionalID ResourceType = 5 + ResourceTypeDelegationToken ResourceType = 6 +) + +func (rt ResourceType) String() string { + mapping := map[ResourceType]string{ + ResourceTypeUnknown: "Unknown", + ResourceTypeAny: "Any", + ResourceTypeTopic: "Topic", + ResourceTypeGroup: "Group", + // Note that ResourceTypeBroker and ResourceTypeCluster have the same value. + // A map cannot have duplicate values so we just use the same value for both. + ResourceTypeCluster: "Cluster", + ResourceTypeTransactionalID: "Transactionalid", + ResourceTypeDelegationToken: "Delegationtoken", + } + s, ok := mapping[rt] + if !ok { + s = mapping[ResourceTypeUnknown] + } + return s +} + +func (rt ResourceType) MarshalText() ([]byte, error) { + return []byte(rt.String()), nil +} + +func (rt *ResourceType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]ResourceType{ + "unknown": ResourceTypeUnknown, + "any": ResourceTypeAny, + "topic": ResourceTypeTopic, + "group": ResourceTypeGroup, + "broker": ResourceTypeBroker, + "cluster": ResourceTypeCluster, + "transactionalid": ResourceTypeTransactionalID, + "delegationtoken": ResourceTypeDelegationToken, + } + parsed, ok := mapping[normalized] + if !ok { + *rt = ResourceTypeUnknown + return fmt.Errorf("cannot parse %s as a ResourceType", normalized) + } + *rt = parsed + return nil +} + +// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +type PatternType int8 + +const ( + // PatternTypeUnknown represents any PatternType which this client cannot + // understand. + PatternTypeUnknown PatternType = 0 + // PatternTypeAny matches any resource pattern type. + PatternTypeAny PatternType = 1 + // PatternTypeMatch perform pattern matching. + PatternTypeMatch PatternType = 2 + // PatternTypeLiteral represents a literal name. + // A literal name defines the full name of a resource, e.g. topic with name + // 'foo', or group with name 'bob'. + PatternTypeLiteral PatternType = 3 + // PatternTypePrefixed represents a prefixed name. + // A prefixed name defines a prefix for a resource, e.g. topics with names + // that start with 'foo'. + PatternTypePrefixed PatternType = 4 +) + +func (pt PatternType) String() string { + mapping := map[PatternType]string{ + PatternTypeUnknown: "Unknown", + PatternTypeAny: "Any", + PatternTypeMatch: "Match", + PatternTypeLiteral: "Literal", + PatternTypePrefixed: "Prefixed", + } + s, ok := mapping[pt] + if !ok { + s = mapping[PatternTypeUnknown] + } + return s +} + +func (pt PatternType) MarshalText() ([]byte, error) { + return []byte(pt.String()), nil +} + +func (pt *PatternType) UnmarshalText(text []byte) error { + normalized := strings.ToLower(string(text)) + mapping := map[string]PatternType{ + "unknown": PatternTypeUnknown, + "any": PatternTypeAny, + "match": PatternTypeMatch, + "literal": PatternTypeLiteral, + "prefixed": PatternTypePrefixed, + } + parsed, ok := mapping[normalized] + if !ok { + *pt = PatternTypeUnknown + return fmt.Errorf("cannot parse %s as a PatternType", normalized) + } + *pt = parsed + return nil +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go b/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go new file mode 100644 index 000000000..10c7632d2 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go @@ -0,0 +1,30 @@ +package plain + +import ( + "context" + "fmt" + + "github.com/segmentio/kafka-go/sasl" +) + +// Mechanism implements the PLAIN mechanism and passes the credentials in clear +// text. +type Mechanism struct { + Username string + Password string +} + +func (Mechanism) Name() string { + return "PLAIN" +} + +func (m Mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { + // Mechanism is stateless, so it can also implement sasl.Session + return m, []byte(fmt.Sprintf("\x00%s\x00%s", m.Username, m.Password)), nil +} + +func (m Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { + // kafka will return error if it rejected the credentials, so we'd only + // arrive here on success. + return true, nil, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/sasl.go b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go new file mode 100644 index 000000000..4056d1f3c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go @@ -0,0 +1,65 @@ +package sasl + +import "context" + +type ctxKey struct{} + +// Mechanism implements the SASL state machine for a particular mode of +// authentication. It is used by the kafka.Dialer to perform the SASL +// handshake. +// +// A Mechanism must be re-usable and safe for concurrent access by multiple +// goroutines. +type Mechanism interface { + // Name returns the identifier for this SASL mechanism. This string will be + // passed to the SASL handshake request and much match one of the mechanisms + // supported by Kafka. + Name() string + + // Start begins SASL authentication. It returns an authentication state + // machine and "initial response" data (if required by the selected + // mechanism). A non-nil error causes the client to abort the authentication + // attempt. + // + // A nil ir value is different from a zero-length value. The nil value + // indicates that the selected mechanism does not use an initial response, + // while a zero-length value indicates an empty initial response, which must + // be sent to the server. + Start(ctx context.Context) (sess StateMachine, ir []byte, err error) +} + +// StateMachine implements the SASL challenge/response flow for a single SASL +// handshake. A StateMachine will be created by the Mechanism per connection, +// so it does not need to be safe for concurrent access by multiple goroutines. +// +// Once the StateMachine is created by the Mechanism, the caller loops by +// passing the server's response into Next and then sending Next's returned +// bytes to the server. Eventually either Next will indicate that the +// authentication has been successfully completed via the done return value, or +// it will indicate that the authentication failed by returning a non-nil error. +type StateMachine interface { + // Next continues challenge-response authentication. A non-nil error + // indicates that the client should abort the authentication attempt. If + // the client has been successfully authenticated, then the done return + // value will be true. + Next(ctx context.Context, challenge []byte) (done bool, response []byte, err error) +} + +// Metadata contains additional data for performing SASL authentication. +type Metadata struct { + // Host is the address of the broker the authentication will be + // performed on. + Host string + Port int +} + +// WithMetadata returns a copy of the context with associated Metadata. +func WithMetadata(ctx context.Context, m *Metadata) context.Context { + return context.WithValue(ctx, ctxKey{}, m) +} + +// MetadataFromContext retrieves the Metadata from the context. +func MetadataFromContext(ctx context.Context) *Metadata { + m, _ := ctx.Value(ctxKey{}).(*Metadata) + return m +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go b/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go new file mode 100644 index 000000000..b29885f32 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go @@ -0,0 +1,91 @@ +package scram + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/segmentio/kafka-go/sasl" + "github.com/xdg-go/scram" +) + +// Algorithm determines the hash function used by SCRAM to protect the user's +// credentials. +type Algorithm interface { + // Name returns the algorithm's name, e.g. "SCRAM-SHA-256" + Name() string + + // Hash returns a new hash.Hash. + Hash() hash.Hash +} + +type sha256Algo struct{} + +func (sha256Algo) Name() string { + return "SCRAM-SHA-256" +} + +func (sha256Algo) Hash() hash.Hash { + return sha256.New() +} + +type sha512Algo struct{} + +func (sha512Algo) Name() string { + return "SCRAM-SHA-512" +} + +func (sha512Algo) Hash() hash.Hash { + return sha512.New() +} + +var ( + SHA256 Algorithm = sha256Algo{} + SHA512 Algorithm = sha512Algo{} +) + +type mechanism struct { + algo Algorithm + client *scram.Client +} + +type session struct { + convo *scram.ClientConversation +} + +// Mechanism returns a new sasl.Mechanism that will use SCRAM with the provided +// Algorithm to securely transmit the provided credentials to Kafka. +// +// SCRAM-SHA-256 and SCRAM-SHA-512 were added to Kafka in 0.10.2.0. These +// mechanisms will not work with older versions. +func Mechanism(algo Algorithm, username, password string) (sasl.Mechanism, error) { + hashGen := scram.HashGeneratorFcn(algo.Hash) + client, err := hashGen.NewClient(username, password, "") + if err != nil { + return nil, err + } + + return &mechanism{ + algo: algo, + client: client, + }, nil +} + +func (m *mechanism) Name() string { + return m.algo.Name() +} + +func (m *mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { + convo := m.client.NewConversation() + str, err := convo.Step("") + if err != nil { + return nil, nil, err + } + return &session{convo: convo}, []byte(str), nil +} + +func (s *session) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { + str, err := s.convo.Step(string(challenge)) + return s.convo.Done(), []byte(str), err +} diff --git a/vendor/github.com/segmentio/kafka-go/saslauthenticate.go b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go new file mode 100644 index 000000000..ad1292918 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go @@ -0,0 +1,54 @@ +package kafka + +import ( + "bufio" +) + +type saslAuthenticateRequestV0 struct { + // Data holds the SASL payload + Data []byte +} + +func (t saslAuthenticateRequestV0) size() int32 { + return sizeofBytes(t.Data) +} + +func (t *saslAuthenticateRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readBytes(r, sz, &t.Data) +} + +func (t saslAuthenticateRequestV0) writeTo(wb *writeBuffer) { + wb.writeBytes(t.Data) +} + +type saslAuthenticateResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + ErrorMessage string + + Data []byte +} + +func (t saslAuthenticateResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofString(t.ErrorMessage) + sizeofBytes(t.Data) +} + +func (t saslAuthenticateResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeString(t.ErrorMessage) + wb.writeBytes(t.Data) +} + +func (t *saslAuthenticateResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.Data); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/saslhandshake.go b/vendor/github.com/segmentio/kafka-go/saslhandshake.go new file mode 100644 index 000000000..3ffaee0f9 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslhandshake.go @@ -0,0 +1,53 @@ +package kafka + +import ( + "bufio" +) + +// saslHandshakeRequestV0 implements the format for V0 and V1 SASL +// requests (they are identical). +type saslHandshakeRequestV0 struct { + // Mechanism holds the SASL Mechanism chosen by the client. + Mechanism string +} + +func (t saslHandshakeRequestV0) size() int32 { + return sizeofString(t.Mechanism) +} + +func (t *saslHandshakeRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readString(r, sz, &t.Mechanism) +} + +func (t saslHandshakeRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.Mechanism) +} + +// saslHandshakeResponseV0 implements the format for V0 and V1 SASL +// responses (they are identical). +type saslHandshakeResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // Array of mechanisms enabled in the server + EnabledMechanisms []string +} + +func (t saslHandshakeResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofStringArray(t.EnabledMechanisms) +} + +func (t saslHandshakeResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeStringArray(t.EnabledMechanisms) +} + +func (t *saslHandshakeResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readStringArray(r, remain, &t.EnabledMechanisms); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/sizeof.go b/vendor/github.com/segmentio/kafka-go/sizeof.go new file mode 100644 index 000000000..48ab469d7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sizeof.go @@ -0,0 +1,72 @@ +package kafka + +import "fmt" + +type sizable interface { + size() int32 +} + +func sizeof(a interface{}) int32 { + switch v := a.(type) { + case int8: + return 1 + case int16: + return 2 + case int32: + return 4 + case int64: + return 8 + case string: + return sizeofString(v) + case bool: + return 1 + case []byte: + return sizeofBytes(v) + case sizable: + return v.size() + } + panic(fmt.Sprintf("unsupported type: %T", a)) +} + +func sizeofInt16(_ int16) int32 { + return 2 +} + +func sizeofInt32(_ int32) int32 { + return 4 +} + +func sizeofInt64(_ int64) int32 { + return 8 +} + +func sizeofString(s string) int32 { + return 2 + int32(len(s)) +} + +func sizeofNullableString(s *string) int32 { + if s == nil { + return 2 + } + return sizeofString(*s) +} + +func sizeofBytes(b []byte) int32 { + return 4 + int32(len(b)) +} + +func sizeofArray(n int, f func(int) int32) int32 { + s := int32(4) + for i := 0; i != n; i++ { + s += f(i) + } + return s +} + +func sizeofInt32Array(a []int32) int32 { + return 4 + (4 * int32(len(a))) +} + +func sizeofStringArray(a []string) int32 { + return sizeofArray(len(a), func(i int) int32 { return sizeofString(a[i]) }) +} diff --git a/vendor/github.com/segmentio/kafka-go/stats.go b/vendor/github.com/segmentio/kafka-go/stats.go new file mode 100644 index 000000000..ef1e582cb --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/stats.go @@ -0,0 +1,189 @@ +package kafka + +import ( + "sync/atomic" + "time" +) + +// SummaryStats is a data structure that carries a summary of observed values. +type SummaryStats struct { + Avg int64 `metric:"avg" type:"gauge"` + Min int64 `metric:"min" type:"gauge"` + Max int64 `metric:"max" type:"gauge"` + Count int64 `metric:"count" type:"counter"` + Sum int64 `metric:"sum" type:"counter"` +} + +// DurationStats is a data structure that carries a summary of observed duration values. +type DurationStats struct { + Avg time.Duration `metric:"avg" type:"gauge"` + Min time.Duration `metric:"min" type:"gauge"` + Max time.Duration `metric:"max" type:"gauge"` + Count int64 `metric:"count" type:"counter"` + Sum time.Duration `metric:"sum" type:"counter"` +} + +// counter is an atomic incrementing counter which gets reset on snapshot. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type counter int64 + +func (c *counter) ptr() *int64 { + return (*int64)(c) +} + +func (c *counter) observe(v int64) { + atomic.AddInt64(c.ptr(), v) +} + +func (c *counter) snapshot() int64 { + return atomic.SwapInt64(c.ptr(), 0) +} + +// gauge is an atomic integer that may be set to any arbitrary value, the value +// does not change after a snapshot. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type gauge int64 + +func (g *gauge) ptr() *int64 { + return (*int64)(g) +} + +func (g *gauge) observe(v int64) { + atomic.StoreInt64(g.ptr(), v) +} + +func (g *gauge) snapshot() int64 { + return atomic.LoadInt64(g.ptr()) +} + +// minimum is an atomic integral type that keeps track of the minimum of all +// values that it observed between snapshots. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type minimum int64 + +func (m *minimum) ptr() *int64 { + return (*int64)(m) +} + +func (m *minimum) observe(v int64) { + for { + ptr := m.ptr() + min := atomic.LoadInt64(ptr) + + if min >= 0 && min <= v { + break + } + + if atomic.CompareAndSwapInt64(ptr, min, v) { + break + } + } +} + +func (m *minimum) snapshot() int64 { + p := m.ptr() + v := atomic.LoadInt64(p) + atomic.CompareAndSwapInt64(p, v, -1) + if v < 0 { + v = 0 + } + return v +} + +// maximum is an atomic integral type that keeps track of the maximum of all +// values that it observed between snapshots. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type maximum int64 + +func (m *maximum) ptr() *int64 { + return (*int64)(m) +} + +func (m *maximum) observe(v int64) { + for { + ptr := m.ptr() + max := atomic.LoadInt64(ptr) + + if max >= 0 && max >= v { + break + } + + if atomic.CompareAndSwapInt64(ptr, max, v) { + break + } + } +} + +func (m *maximum) snapshot() int64 { + p := m.ptr() + v := atomic.LoadInt64(p) + atomic.CompareAndSwapInt64(p, v, -1) + if v < 0 { + v = 0 + } + return v +} + +type summary struct { + min minimum + max maximum + sum counter + count counter +} + +func makeSummary() summary { + return summary{ + min: -1, + max: -1, + } +} + +func (s *summary) observe(v int64) { + s.min.observe(v) + s.max.observe(v) + s.sum.observe(v) + s.count.observe(1) +} + +func (s *summary) observeDuration(v time.Duration) { + s.observe(int64(v)) +} + +func (s *summary) snapshot() SummaryStats { + avg := int64(0) + min := s.min.snapshot() + max := s.max.snapshot() + sum := s.sum.snapshot() + count := s.count.snapshot() + + if count != 0 { + avg = int64(float64(sum) / float64(count)) + } + + return SummaryStats{ + Avg: avg, + Min: min, + Max: max, + Count: count, + Sum: sum, + } +} + +func (s *summary) snapshotDuration() DurationStats { + summary := s.snapshot() + return DurationStats{ + Avg: time.Duration(summary.Avg), + Min: time.Duration(summary.Min), + Max: time.Duration(summary.Max), + Count: summary.Count, + Sum: time.Duration(summary.Sum), + } +} diff --git a/vendor/github.com/segmentio/kafka-go/syncgroup.go b/vendor/github.com/segmentio/kafka-go/syncgroup.go new file mode 100644 index 000000000..ff37569e7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/syncgroup.go @@ -0,0 +1,288 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/consumer" + "github.com/segmentio/kafka-go/protocol/syncgroup" +) + +// SyncGroupRequest is the request structure for the SyncGroup function. +type SyncGroupRequest struct { + // Address of the kafka broker to sent he request to. + Addr net.Addr + + // GroupID of the group to sync. + GroupID string + + // The generation of the group. + GenerationID int + + // The member ID assigned by the group. + MemberID string + + // The unique identifier for the consumer instance. + GroupInstanceID string + + // The name for the class of protocols implemented by the group being joined. + ProtocolType string + + // The group protocol name. + ProtocolName string + + // The group member assignments. + Assignments []SyncGroupRequestAssignment +} + +// SyncGroupRequestAssignment represents an assignement for a goroup memeber. +type SyncGroupRequestAssignment struct { + // The ID of the member to assign. + MemberID string + + // The member assignment. + Assignment GroupProtocolAssignment +} + +// SyncGroupResponse is the response structure for the SyncGroup function. +type SyncGroupResponse struct { + // An error that may have occurred when attempting to sync the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The group protocol type. + ProtocolType string + + // The group protocol name. + ProtocolName string + + // The member assignment. + Assignment GroupProtocolAssignment +} + +// GroupProtocolAssignment represents an assignment of topics and partitions for a group memeber. +type GroupProtocolAssignment struct { + // The topics and partitions assigned to the group memeber. + AssignedPartitions map[string][]int + + // UserData for the assignemnt. + UserData []byte +} + +// SyncGroup sends a sync group request to the coordinator and returns the response. +func (c *Client) SyncGroup(ctx context.Context, req *SyncGroupRequest) (*SyncGroupResponse, error) { + syncGroup := syncgroup.Request{ + GroupID: req.GroupID, + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + ProtocolType: req.ProtocolType, + ProtocolName: req.ProtocolName, + Assignments: make([]syncgroup.RequestAssignment, 0, len(req.Assignments)), + } + + for _, assignment := range req.Assignments { + assign := consumer.Assignment{ + Version: consumer.MaxVersionSupported, + AssignedPartitions: make([]consumer.TopicPartition, 0, len(assignment.Assignment.AssignedPartitions)), + UserData: assignment.Assignment.UserData, + } + + for topic, partitions := range assignment.Assignment.AssignedPartitions { + tp := consumer.TopicPartition{ + Topic: topic, + Partitions: make([]int32, 0, len(partitions)), + } + for _, partition := range partitions { + tp.Partitions = append(tp.Partitions, int32(partition)) + } + assign.AssignedPartitions = append(assign.AssignedPartitions, tp) + } + + assignBytes, err := protocol.Marshal(consumer.MaxVersionSupported, assign) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + syncGroup.Assignments = append(syncGroup.Assignments, syncgroup.RequestAssignment{ + MemberID: assignment.MemberID, + Assignment: assignBytes, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &syncGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + r := m.(*syncgroup.Response) + + var assignment consumer.Assignment + err = protocol.Unmarshal(r.Assignments, consumer.MaxVersionSupported, &assignment) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + res := &SyncGroupResponse{ + Throttle: makeDuration(r.ThrottleTimeMS), + Error: makeError(r.ErrorCode, ""), + ProtocolType: r.ProtocolType, + ProtocolName: r.ProtocolName, + Assignment: GroupProtocolAssignment{ + AssignedPartitions: make(map[string][]int, len(assignment.AssignedPartitions)), + UserData: assignment.UserData, + }, + } + partitions := map[string][]int{} + for _, topicPartition := range assignment.AssignedPartitions { + for _, partition := range topicPartition.Partitions { + partitions[topicPartition.Topic] = append(partitions[topicPartition.Topic], int(partition)) + } + } + res.Assignment.AssignedPartitions = partitions + + return res, nil +} + +type groupAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (t groupAssignment) size() int32 { + sz := sizeofInt16(t.Version) + sizeofInt16(int16(len(t.Topics))) + + for topic, partitions := range t.Topics { + sz += sizeofString(topic) + sizeofInt32Array(partitions) + } + + return sz + sizeofBytes(t.UserData) +} + +func (t groupAssignment) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeInt32(int32(len(t.Topics))) + + for topic, partitions := range t.Topics { + wb.writeString(topic) + wb.writeInt32Array(partitions) + } + + wb.writeBytes(t.UserData) +} + +func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err error) { + // I came across this case when testing for compatibility with bsm/sarama-cluster. It + // appears in some cases, sarama-cluster can send a nil array entry. Admittedly, I + // didn't look too closely at it. + if size == 0 { + t.Topics = map[string][]int32{} + return 0, nil + } + + if remain, err = readInt16(r, size, &t.Version); err != nil { + return + } + if remain, err = readMapStringInt32(r, remain, &t.Topics); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.UserData); err != nil { + return + } + + return +} + +func (t groupAssignment) bytes() []byte { + buf := bytes.NewBuffer(nil) + t.writeTo(&writeBuffer{w: buf}) + return buf.Bytes() +} + +type syncGroupRequestGroupAssignmentV0 struct { + // MemberID assigned by the group coordinator + MemberID string + + // MemberAssignments holds client encoded assignments + // + // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + MemberAssignments []byte +} + +func (t syncGroupRequestGroupAssignmentV0) size() int32 { + return sizeofString(t.MemberID) + + sizeofBytes(t.MemberAssignments) +} + +func (t syncGroupRequestGroupAssignmentV0) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberAssignments) +} + +type syncGroupRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string + + GroupAssignments []syncGroupRequestGroupAssignmentV0 +} + +func (t syncGroupRequestV0) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) + + sizeofArray(len(t.GroupAssignments), func(i int) int32 { return t.GroupAssignments[i].size() }) +} + +func (t syncGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(wb) }) +} + +type syncGroupResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // MemberAssignments holds client encoded assignments + // + // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + MemberAssignments []byte +} + +func (t syncGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + + sizeofBytes(t.MemberAssignments) +} + +func (t syncGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeBytes(t.MemberAssignments) +} + +func (t *syncGroupResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/time.go b/vendor/github.com/segmentio/kafka-go/time.go new file mode 100644 index 000000000..544d84207 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/time.go @@ -0,0 +1,58 @@ +package kafka + +import ( + "math" + "time" +) + +const ( + maxTimeout = time.Duration(math.MaxInt32) * time.Millisecond + minTimeout = time.Duration(math.MinInt32) * time.Millisecond + defaultRTT = 1 * time.Second +) + +func makeTime(t int64) time.Time { + if t <= 0 { + return time.Time{} + } + return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)).UTC() +} + +func timestamp(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() / int64(time.Millisecond) +} + +func makeDuration(ms int32) time.Duration { + return time.Duration(ms) * time.Millisecond +} + +func milliseconds(d time.Duration) int32 { + switch { + case d > maxTimeout: + d = maxTimeout + case d < minTimeout: + d = minTimeout + } + return int32(d / time.Millisecond) +} + +func deadlineToTimeout(deadline time.Time, now time.Time) time.Duration { + if deadline.IsZero() { + return maxTimeout + } + return deadline.Sub(now) +} + +func adjustDeadlineForRTT(deadline time.Time, now time.Time, rtt time.Duration) time.Time { + if !deadline.IsZero() { + timeout := deadline.Sub(now) + if timeout < rtt { + rtt = timeout / 4 + } + deadline = deadline.Add(-rtt) + } + return deadline +} diff --git a/vendor/github.com/segmentio/kafka-go/transport.go b/vendor/github.com/segmentio/kafka-go/transport.go new file mode 100644 index 000000000..685bdddb1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/transport.go @@ -0,0 +1,1363 @@ +package kafka + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "math/rand" + "net" + "runtime/pprof" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/apiversions" + "github.com/segmentio/kafka-go/protocol/createtopics" + "github.com/segmentio/kafka-go/protocol/findcoordinator" + meta "github.com/segmentio/kafka-go/protocol/metadata" + "github.com/segmentio/kafka-go/protocol/saslauthenticate" + "github.com/segmentio/kafka-go/protocol/saslhandshake" + "github.com/segmentio/kafka-go/sasl" +) + +// Request is an interface implemented by types that represent messages sent +// from kafka clients to brokers. +type Request = protocol.Message + +// Response is an interface implemented by types that represent messages sent +// from kafka brokers in response to client requests. +type Response = protocol.Message + +// RoundTripper is an interface implemented by types which support interacting +// with kafka brokers. +type RoundTripper interface { + // RoundTrip sends a request to a kafka broker and returns the response that + // was received, or a non-nil error. + // + // The context passed as first argument can be used to asynchronnously abort + // the call if needed. + RoundTrip(context.Context, net.Addr, Request) (Response, error) +} + +// Transport is an implementation of the RoundTripper interface. +// +// Transport values manage a pool of connections and automatically discovers the +// clusters layout to route requests to the appropriate brokers. +// +// Transport values are safe to use concurrently from multiple goroutines. +// +// Note: The intent is for the Transport to become the underlying layer of the +// kafka.Reader and kafka.Writer types. +type Transport struct { + // A function used to establish connections to the kafka cluster. + Dial func(context.Context, string, string) (net.Conn, error) + + // Time limit set for establishing connections to the kafka cluster. This + // limit includes all round trips done to establish the connections (TLS + // handshake, SASL negotiation, etc...). + // + // Defaults to 5s. + DialTimeout time.Duration + + // Maximum amount of time that connections will remain open and unused. + // The transport will manage to automatically close connections that have + // been idle for too long, and re-open them on demand when the transport is + // used again. + // + // Defaults to 30s. + IdleTimeout time.Duration + + // TTL for the metadata cached by this transport. Note that the value + // configured here is an upper bound, the transport randomizes the TTLs to + // avoid getting into states where multiple clients end up synchronized and + // cause bursts of requests to the kafka broker. + // + // Default to 6s. + MetadataTTL time.Duration + + // Topic names for the metadata cached by this transport. If this field is left blank, + // metadata information of all topics in the cluster will be retrieved. + MetadataTopics []string + + // Unique identifier that the transport communicates to the brokers when it + // sends requests. + ClientID string + + // An optional configuration for TLS connections established by this + // transport. + // + // If the Server + TLS *tls.Config + + // SASL configures the Transfer to use SASL authentication. + SASL sasl.Mechanism + + // An optional resolver used to translate broker host names into network + // addresses. + // + // The resolver will be called for every request (not every connection), + // making it possible to implement ACL policies by validating that the + // program is allowed to connect to the kafka broker. This also means that + // the resolver should probably provide a caching layer to avoid storming + // the service discovery backend with requests. + // + // When set, the Dial function is not responsible for performing name + // resolution, and is always called with a pre-resolved address. + Resolver BrokerResolver + + // The background context used to control goroutines started internally by + // the transport. + // + // If nil, context.Background() is used instead. + Context context.Context + + mutex sync.RWMutex + pools map[networkAddress]*connPool +} + +// DefaultTransport is the default transport used by kafka clients in this +// package. +var DefaultTransport RoundTripper = &Transport{ + Dial: (&net.Dialer{ + Timeout: 3 * time.Second, + DualStack: true, + }).DialContext, +} + +// CloseIdleConnections closes all idle connections immediately, and marks all +// connections that are in use to be closed when they become idle again. +func (t *Transport) CloseIdleConnections() { + t.mutex.Lock() + defer t.mutex.Unlock() + + for _, pool := range t.pools { + pool.unref() + } + + for k := range t.pools { + delete(t.pools, k) + } +} + +// RoundTrip sends a request to a kafka cluster and returns the response, or an +// error if no responses were received. +// +// Message types are available in sub-packages of the protocol package. Each +// kafka API is implemented in a different sub-package. For example, the request +// and response types for the Fetch API are available in the protocol/fetch +// package. +// +// The type of the response message will match the type of the request. For +// example, if RoundTrip was called with a *fetch.Request as argument, the value +// returned will be of type *fetch.Response. It is safe for the program to do a +// type assertion after checking that no error was returned. +// +// This example illustrates the way this method is expected to be used: +// +// r, err := transport.RoundTrip(ctx, addr, &fetch.Request{ ... }) +// if err != nil { +// ... +// } else { +// res := r.(*fetch.Response) +// ... +// } +// +// The transport automatically selects the highest version of the API that is +// supported by both the kafka-go package and the kafka broker. The negotiation +// happens transparently once when connections are established. +// +// This API was introduced in version 0.4 as a way to leverage the lower-level +// features of the kafka protocol, but also provide a more efficient way of +// managing connections to kafka brokers. +func (t *Transport) RoundTrip(ctx context.Context, addr net.Addr, req Request) (Response, error) { + p := t.grabPool(addr) + defer p.unref() + return p.roundTrip(ctx, req) +} + +func (t *Transport) dial() func(context.Context, string, string) (net.Conn, error) { + if t.Dial != nil { + return t.Dial + } + return defaultDialer.DialContext +} + +func (t *Transport) dialTimeout() time.Duration { + if t.DialTimeout > 0 { + return t.DialTimeout + } + return 5 * time.Second +} + +func (t *Transport) idleTimeout() time.Duration { + if t.IdleTimeout > 0 { + return t.IdleTimeout + } + return 30 * time.Second +} + +func (t *Transport) metadataTTL() time.Duration { + if t.MetadataTTL > 0 { + return t.MetadataTTL + } + return 6 * time.Second +} + +func (t *Transport) grabPool(addr net.Addr) *connPool { + k := networkAddress{ + network: addr.Network(), + address: addr.String(), + } + + t.mutex.RLock() + p := t.pools[k] + if p != nil { + p.ref() + } + t.mutex.RUnlock() + + if p != nil { + return p + } + + t.mutex.Lock() + defer t.mutex.Unlock() + + if p := t.pools[k]; p != nil { + p.ref() + return p + } + + ctx, cancel := context.WithCancel(t.context()) + + p = &connPool{ + refc: 2, + + dial: t.dial(), + dialTimeout: t.dialTimeout(), + idleTimeout: t.idleTimeout(), + metadataTTL: t.metadataTTL(), + metadataTopics: t.MetadataTopics, + clientID: t.ClientID, + tls: t.TLS, + sasl: t.SASL, + resolver: t.Resolver, + + ready: make(event), + wake: make(chan event), + conns: make(map[int32]*connGroup), + cancel: cancel, + } + + p.ctrl = p.newConnGroup(addr) + go p.discover(ctx, p.wake) + + if t.pools == nil { + t.pools = make(map[networkAddress]*connPool) + } + t.pools[k] = p + return p +} + +func (t *Transport) context() context.Context { + if t.Context != nil { + return t.Context + } + return context.Background() +} + +type event chan struct{} + +func (e event) trigger() { close(e) } + +type connPool struct { + refc uintptr + // Immutable fields of the connection pool. Connections access these field + // on their parent pool in a ready-only fashion, so no synchronization is + // required. + dial func(context.Context, string, string) (net.Conn, error) + dialTimeout time.Duration + idleTimeout time.Duration + metadataTTL time.Duration + metadataTopics []string + clientID string + tls *tls.Config + sasl sasl.Mechanism + resolver BrokerResolver + // Signaling mechanisms to orchestrate communications between the pool and + // the rest of the program. + once sync.Once // ensure that `ready` is triggered only once + ready event // triggered after the first metadata update + wake chan event // used to force metadata updates + cancel context.CancelFunc + // Mutable fields of the connection pool, access must be synchronized. + mutex sync.RWMutex + conns map[int32]*connGroup // data connections used for produce/fetch/etc... + ctrl *connGroup // control connections used for metadata requests + state atomic.Value // cached cluster state +} + +type connPoolState struct { + metadata *meta.Response // last metadata response seen by the pool + err error // last error from metadata requests + layout protocol.Cluster // cluster layout built from metadata response +} + +func (p *connPool) grabState() connPoolState { + state, _ := p.state.Load().(connPoolState) + return state +} + +func (p *connPool) setState(state connPoolState) { + p.state.Store(state) +} + +func (p *connPool) ref() { + atomic.AddUintptr(&p.refc, +1) +} + +func (p *connPool) unref() { + if atomic.AddUintptr(&p.refc, ^uintptr(0)) == 0 { + p.mutex.Lock() + defer p.mutex.Unlock() + + for _, conns := range p.conns { + conns.closeIdleConns() + } + + p.ctrl.closeIdleConns() + p.cancel() + } +} + +func (p *connPool) roundTrip(ctx context.Context, req Request) (Response, error) { + // This first select should never block after the first metadata response + // that would mark the pool as `ready`. + select { + case <-p.ready: + case <-ctx.Done(): + return nil, ctx.Err() + } + + state := p.grabState() + var response promise + + switch m := req.(type) { + case *meta.Request: + // We serve metadata requests directly from the transport cache unless + // we would like to auto create a topic that isn't in our cache. + // + // This reduces the number of round trips to kafka brokers while keeping + // the logic simple when applying partitioning strategies. + if state.err != nil { + return nil, state.err + } + + cachedMeta := filterMetadataResponse(m, state.metadata) + // requestNeeded indicates if we need to send this metadata request to the server. + // It's true when we want to auto-create topics and we don't have the topic in our + // cache. + var requestNeeded bool + if m.AllowAutoTopicCreation { + for _, topic := range cachedMeta.Topics { + if topic.ErrorCode == int16(UnknownTopicOrPartition) { + requestNeeded = true + break + } + } + } + + if !requestNeeded { + return cachedMeta, nil + } + + case protocol.Splitter: + // Messages that implement the Splitter interface trigger the creation of + // multiple requests that are all merged back into a single results by + // a merger. + messages, merger, err := m.Split(state.layout) + if err != nil { + return nil, err + } + promises := make([]promise, len(messages)) + for i, m := range messages { + promises[i] = p.sendRequest(ctx, m, state) + } + response = join(promises, messages, merger) + } + + if response == nil { + response = p.sendRequest(ctx, req, state) + } + + r, err := response.await(ctx) + if err != nil { + return r, err + } + + switch resp := r.(type) { + case *createtopics.Response: + // Force an update of the metadata when adding topics, + // otherwise the cached state would get out of sync. + topicsToRefresh := make([]string, 0, len(resp.Topics)) + for _, topic := range resp.Topics { + // fixes issue 672: don't refresh topics that failed to create, it causes the library to hang indefinitely + if topic.ErrorCode != 0 { + continue + } + + topicsToRefresh = append(topicsToRefresh, topic.Name) + } + + p.refreshMetadata(ctx, topicsToRefresh) + case *meta.Response: + m := req.(*meta.Request) + // If we get here with allow auto topic creation then + // we didn't have that topic in our cache, so we should update + // the cache. + if m.AllowAutoTopicCreation { + topicsToRefresh := make([]string, 0, len(resp.Topics)) + for _, topic := range resp.Topics { + // Don't refresh topics that failed to create, since that may + // mean that enable automatic topic creation is not enabled. + // That causes the library to hang indefinitely, same as + // don't refresh topics that failed to create, + // createtopics process. Fixes issue 806. + if topic.ErrorCode != 0 { + continue + } + + topicsToRefresh = append(topicsToRefresh, topic.Name) + } + p.refreshMetadata(ctx, topicsToRefresh) + } + } + + return r, nil +} + +// refreshMetadata forces an update of the cached cluster metadata, and waits +// for the given list of topics to appear. This waiting mechanism is necessary +// to account for the fact that topic creation is asynchronous in kafka, and +// causes subsequent requests to fail while the cluster state is propagated to +// all the brokers. +func (p *connPool) refreshMetadata(ctx context.Context, expectTopics []string) { + minBackoff := 100 * time.Millisecond + maxBackoff := 2 * time.Second + cancel := ctx.Done() + + for ctx.Err() == nil { + notify := make(event) + select { + case <-cancel: + return + case p.wake <- notify: + select { + case <-notify: + case <-cancel: + return + } + } + + state := p.grabState() + found := 0 + + for _, topic := range expectTopics { + if _, ok := state.layout.Topics[topic]; ok { + found++ + } + } + + if found == len(expectTopics) { + return + } + + if delay := time.Duration(rand.Int63n(int64(minBackoff))); delay > 0 { + timer := time.NewTimer(minBackoff) + select { + case <-cancel: + case <-timer.C: + } + timer.Stop() + + if minBackoff *= 2; minBackoff > maxBackoff { + minBackoff = maxBackoff + } + } + } +} + +func (p *connPool) setReady() { + p.once.Do(p.ready.trigger) +} + +// update is called periodically by the goroutine running the discover method +// to refresh the cluster layout information used by the transport to route +// requests to brokers. +func (p *connPool) update(ctx context.Context, metadata *meta.Response, err error) { + var layout protocol.Cluster + + if metadata != nil { + metadata.ThrottleTimeMs = 0 + + // Normalize the lists so we can apply binary search on them. + sortMetadataBrokers(metadata.Brokers) + sortMetadataTopics(metadata.Topics) + + for i := range metadata.Topics { + t := &metadata.Topics[i] + sortMetadataPartitions(t.Partitions) + } + + layout = makeLayout(metadata) + } + + state := p.grabState() + addBrokers := make(map[int32]struct{}) + delBrokers := make(map[int32]struct{}) + + if err != nil { + // Only update the error on the transport if the cluster layout was + // unknown. This ensures that we prioritize a previously known state + // of the cluster to reduce the impact of transient failures. + if state.metadata != nil { + return + } + state.err = err + } else { + for id, b2 := range layout.Brokers { + if b1, ok := state.layout.Brokers[id]; !ok { + addBrokers[id] = struct{}{} + } else if b1 != b2 { + addBrokers[id] = struct{}{} + delBrokers[id] = struct{}{} + } + } + + for id := range state.layout.Brokers { + if _, ok := layout.Brokers[id]; !ok { + delBrokers[id] = struct{}{} + } + } + + state.metadata, state.layout = metadata, layout + state.err = nil + } + + defer p.setReady() + defer p.setState(state) + + if len(addBrokers) != 0 || len(delBrokers) != 0 { + // Only acquire the lock when there is a change of layout. This is an + // infrequent event so we don't risk introducing regular contention on + // the mutex if we were to lock it on every update. + p.mutex.Lock() + defer p.mutex.Unlock() + + if ctx.Err() != nil { + return // the pool has been closed, no need to update + } + + for id := range delBrokers { + if broker := p.conns[id]; broker != nil { + broker.closeIdleConns() + delete(p.conns, id) + } + } + + for id := range addBrokers { + broker := layout.Brokers[id] + p.conns[id] = p.newBrokerConnGroup(Broker{ + Rack: broker.Rack, + Host: broker.Host, + Port: int(broker.Port), + ID: int(broker.ID), + }) + } + } +} + +// discover is the entry point of an internal goroutine for the transport which +// periodically requests updates of the cluster metadata and refreshes the +// transport cached cluster layout. +func (p *connPool) discover(ctx context.Context, wake <-chan event) { + prng := rand.New(rand.NewSource(time.Now().UnixNano())) + metadataTTL := func() time.Duration { + return time.Duration(prng.Int63n(int64(p.metadataTTL))) + } + + timer := time.NewTimer(metadataTTL()) + defer timer.Stop() + + var notify event + done := ctx.Done() + + req := &meta.Request{ + TopicNames: p.metadataTopics, + } + + for { + c, err := p.grabClusterConn(ctx) + if err != nil { + p.update(ctx, nil, err) + } else { + res := make(async, 1) + deadline, cancel := context.WithTimeout(ctx, p.metadataTTL) + c.reqs <- connRequest{ + ctx: deadline, + req: req, + res: res, + } + r, err := res.await(deadline) + cancel() + if err != nil && errors.Is(err, ctx.Err()) { + return + } + ret, _ := r.(*meta.Response) + p.update(ctx, ret, err) + } + + if notify != nil { + notify.trigger() + notify = nil + } + + select { + case <-timer.C: + timer.Reset(metadataTTL()) + case <-done: + return + case notify = <-wake: + } + } +} + +// grabBrokerConn returns a connection to a specific broker represented by the +// broker id passed as argument. If the broker id was not known, an error is +// returned. +func (p *connPool) grabBrokerConn(ctx context.Context, brokerID int32) (*conn, error) { + p.mutex.RLock() + g := p.conns[brokerID] + p.mutex.RUnlock() + if g == nil { + return nil, BrokerNotAvailable + } + return g.grabConnOrConnect(ctx) +} + +// grabClusterConn returns the connection to the kafka cluster that the pool is +// configured to connect to. +// +// The transport uses a shared `control` connection to the cluster for any +// requests that aren't supposed to be sent to specific brokers (e.g. Fetch or +// Produce requests). Requests intended to be routed to specific brokers are +// dispatched on a separate pool of connections that the transport maintains. +// This split help avoid head-of-line blocking situations where control requests +// like Metadata would be queued behind large responses from Fetch requests for +// example. +// +// In either cases, the requests are multiplexed so we can keep a minimal number +// of connections open (N+1, where N is the number of brokers in the cluster). +func (p *connPool) grabClusterConn(ctx context.Context) (*conn, error) { + return p.ctrl.grabConnOrConnect(ctx) +} + +func (p *connPool) sendRequest(ctx context.Context, req Request, state connPoolState) promise { + brokerID := int32(-1) + + switch m := req.(type) { + case protocol.BrokerMessage: + // Some requests are supposed to be sent to specific brokers (e.g. the + // partition leaders). They implement the BrokerMessage interface to + // delegate the routing decision to each message type. + broker, err := m.Broker(state.layout) + if err != nil { + return reject(err) + } + brokerID = broker.ID + + case protocol.GroupMessage: + // Some requests are supposed to be sent to a group coordinator, + // look up which broker is currently the coordinator for the group + // so we can get a connection to that broker. + // + // TODO: should we cache the coordinator info? + p := p.sendRequest(ctx, &findcoordinator.Request{Key: m.Group()}, state) + r, err := p.await(ctx) + if err != nil { + return reject(err) + } + brokerID = r.(*findcoordinator.Response).NodeID + case protocol.TransactionalMessage: + p := p.sendRequest(ctx, &findcoordinator.Request{ + Key: m.Transaction(), + KeyType: int8(CoordinatorKeyTypeTransaction), + }, state) + r, err := p.await(ctx) + if err != nil { + return reject(err) + } + brokerID = r.(*findcoordinator.Response).NodeID + } + + var c *conn + var err error + if brokerID >= 0 { + c, err = p.grabBrokerConn(ctx, brokerID) + } else { + c, err = p.grabClusterConn(ctx) + } + if err != nil { + return reject(err) + } + + res := make(async, 1) + + c.reqs <- connRequest{ + ctx: ctx, + req: req, + res: res, + } + + return res +} + +func filterMetadataResponse(req *meta.Request, res *meta.Response) *meta.Response { + ret := *res + + if req.TopicNames != nil { + ret.Topics = make([]meta.ResponseTopic, len(req.TopicNames)) + + for i, topicName := range req.TopicNames { + j, ok := findMetadataTopic(res.Topics, topicName) + if ok { + ret.Topics[i] = res.Topics[j] + } else { + ret.Topics[i] = meta.ResponseTopic{ + ErrorCode: int16(UnknownTopicOrPartition), + Name: topicName, + } + } + } + } + + return &ret +} + +func findMetadataTopic(topics []meta.ResponseTopic, topicName string) (int, bool) { + i := sort.Search(len(topics), func(i int) bool { + return topics[i].Name >= topicName + }) + return i, i >= 0 && i < len(topics) && topics[i].Name == topicName +} + +func sortMetadataBrokers(brokers []meta.ResponseBroker) { + sort.Slice(brokers, func(i, j int) bool { + return brokers[i].NodeID < brokers[j].NodeID + }) +} + +func sortMetadataTopics(topics []meta.ResponseTopic) { + sort.Slice(topics, func(i, j int) bool { + return topics[i].Name < topics[j].Name + }) +} + +func sortMetadataPartitions(partitions []meta.ResponsePartition) { + sort.Slice(partitions, func(i, j int) bool { + return partitions[i].PartitionIndex < partitions[j].PartitionIndex + }) +} + +func makeLayout(metadataResponse *meta.Response) protocol.Cluster { + layout := protocol.Cluster{ + Controller: metadataResponse.ControllerID, + Brokers: make(map[int32]protocol.Broker), + Topics: make(map[string]protocol.Topic), + } + + for _, broker := range metadataResponse.Brokers { + layout.Brokers[broker.NodeID] = protocol.Broker{ + Rack: broker.Rack, + Host: broker.Host, + Port: broker.Port, + ID: broker.NodeID, + } + } + + for _, topic := range metadataResponse.Topics { + if topic.IsInternal { + continue // TODO: do we need to expose those? + } + layout.Topics[topic.Name] = protocol.Topic{ + Name: topic.Name, + Error: topic.ErrorCode, + Partitions: makePartitions(topic.Partitions), + } + } + + return layout +} + +func makePartitions(metadataPartitions []meta.ResponsePartition) map[int32]protocol.Partition { + protocolPartitions := make(map[int32]protocol.Partition, len(metadataPartitions)) + numBrokerIDs := 0 + + for _, p := range metadataPartitions { + numBrokerIDs += len(p.ReplicaNodes) + len(p.IsrNodes) + len(p.OfflineReplicas) + } + + // Reduce the memory footprint a bit by allocating a single buffer to write + // all broker ids. + brokerIDs := make([]int32, 0, numBrokerIDs) + + for _, p := range metadataPartitions { + var rep, isr, off []int32 + brokerIDs, rep = appendBrokerIDs(brokerIDs, p.ReplicaNodes) + brokerIDs, isr = appendBrokerIDs(brokerIDs, p.IsrNodes) + brokerIDs, off = appendBrokerIDs(brokerIDs, p.OfflineReplicas) + + protocolPartitions[p.PartitionIndex] = protocol.Partition{ + ID: p.PartitionIndex, + Error: p.ErrorCode, + Leader: p.LeaderID, + Replicas: rep, + ISR: isr, + Offline: off, + } + } + + return protocolPartitions +} + +func appendBrokerIDs(ids, brokers []int32) ([]int32, []int32) { + i := len(ids) + ids = append(ids, brokers...) + return ids, ids[i:len(ids):len(ids)] +} + +func (p *connPool) newConnGroup(a net.Addr) *connGroup { + return &connGroup{ + addr: a, + pool: p, + broker: Broker{ + ID: -1, + }, + } +} + +func (p *connPool) newBrokerConnGroup(broker Broker) *connGroup { + return &connGroup{ + addr: &networkAddress{ + network: "tcp", + address: net.JoinHostPort(broker.Host, strconv.Itoa(broker.Port)), + }, + pool: p, + broker: broker, + } +} + +type connRequest struct { + ctx context.Context + req Request + res async +} + +// The promise interface is used as a message passing abstraction to coordinate +// between goroutines that handle requests and responses. +type promise interface { + // Waits until the promise is resolved, rejected, or the context canceled. + await(context.Context) (Response, error) +} + +// async is an implementation of the promise interface which supports resolving +// or rejecting the await call asynchronously. +type async chan interface{} + +func (p async) await(ctx context.Context) (Response, error) { + select { + case x := <-p: + switch v := x.(type) { + case nil: + return nil, nil // A nil response is ok (e.g. when RequiredAcks is None) + case Response: + return v, nil + case error: + return nil, v + default: + panic(fmt.Errorf("BUG: promise resolved with impossible value of type %T", v)) + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (p async) resolve(res Response) { p <- res } + +func (p async) reject(err error) { p <- err } + +// rejected is an implementation of the promise interface which is always +// returns an error. Values of this type are constructed using the reject +// function. +type rejected struct{ err error } + +func reject(err error) promise { return &rejected{err: err} } + +func (p *rejected) await(ctx context.Context) (Response, error) { + return nil, p.err +} + +// joined is an implementation of the promise interface which merges results +// from multiple promises into one await call using a merger. +type joined struct { + promises []promise + requests []Request + merger protocol.Merger +} + +func join(promises []promise, requests []Request, merger protocol.Merger) promise { + return &joined{ + promises: promises, + requests: requests, + merger: merger, + } +} + +func (p *joined) await(ctx context.Context) (Response, error) { + results := make([]interface{}, len(p.promises)) + + for i, sub := range p.promises { + m, err := sub.await(ctx) + if err != nil { + results[i] = err + } else { + results[i] = m + } + } + + return p.merger.Merge(p.requests, results) +} + +// Default dialer used by the transport connections when no Dial function +// was configured by the program. +var defaultDialer = net.Dialer{ + Timeout: 3 * time.Second, + DualStack: true, +} + +// connGroup represents a logical connection group to a kafka broker. The +// actual network connections are lazily open before sending requests, and +// closed if they are unused for longer than the idle timeout. +type connGroup struct { + addr net.Addr + broker Broker + // Immutable state of the connection. + pool *connPool + // Shared state of the connection, this is synchronized on the mutex through + // calls to the synchronized method. Both goroutines of the connection share + // the state maintained in these fields. + mutex sync.Mutex + closed bool + idleConns []*conn // stack of idle connections +} + +func (g *connGroup) closeIdleConns() { + g.mutex.Lock() + conns := g.idleConns + g.idleConns = nil + g.closed = true + g.mutex.Unlock() + + for _, c := range conns { + c.close() + } +} + +func (g *connGroup) grabConnOrConnect(ctx context.Context) (*conn, error) { + rslv := g.pool.resolver + addr := g.addr + var c *conn + + if rslv == nil { + c = g.grabConn() + } else { + var err error + broker := g.broker + + if broker.ID < 0 { + host, port, err := splitHostPortNumber(addr.String()) + if err != nil { + return nil, err + } + broker.Host = host + broker.Port = port + } + + ipAddrs, err := rslv.LookupBrokerIPAddr(ctx, broker) + if err != nil { + return nil, err + } + + for _, ipAddr := range ipAddrs { + network := addr.Network() + address := net.JoinHostPort(ipAddr.String(), strconv.Itoa(broker.Port)) + + if c = g.grabConnTo(network, address); c != nil { + break + } + } + } + + if c == nil { + connChan := make(chan *conn) + errChan := make(chan error) + + go func() { + c, err := g.connect(ctx, addr) + if err != nil { + select { + case errChan <- err: + case <-ctx.Done(): + } + } else { + select { + case connChan <- c: + case <-ctx.Done(): + if !g.releaseConn(c) { + c.close() + } + } + } + }() + + select { + case c = <-connChan: + case err := <-errChan: + return nil, err + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return c, nil +} + +func (g *connGroup) grabConnTo(network, address string) *conn { + g.mutex.Lock() + defer g.mutex.Unlock() + + for i := len(g.idleConns) - 1; i >= 0; i-- { + c := g.idleConns[i] + + if c.network == network && c.address == address { + copy(g.idleConns[i:], g.idleConns[i+1:]) + n := len(g.idleConns) - 1 + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + + if c.timer != nil { + c.timer.Stop() + } + + return c + } + } + + return nil +} + +func (g *connGroup) grabConn() *conn { + g.mutex.Lock() + defer g.mutex.Unlock() + + if len(g.idleConns) == 0 { + return nil + } + + n := len(g.idleConns) - 1 + c := g.idleConns[n] + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + + if c.timer != nil { + c.timer.Stop() + } + + return c +} + +func (g *connGroup) removeConn(c *conn) bool { + g.mutex.Lock() + defer g.mutex.Unlock() + + if c.timer != nil { + c.timer.Stop() + } + + for i, x := range g.idleConns { + if x == c { + copy(g.idleConns[i:], g.idleConns[i+1:]) + n := len(g.idleConns) - 1 + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + return true + } + } + + return false +} + +func (g *connGroup) releaseConn(c *conn) bool { + idleTimeout := g.pool.idleTimeout + + g.mutex.Lock() + defer g.mutex.Unlock() + + if g.closed { + return false + } + + if c.timer != nil { + c.timer.Reset(idleTimeout) + } else { + c.timer = time.AfterFunc(idleTimeout, func() { + if g.removeConn(c) { + c.close() + } + }) + } + + g.idleConns = append(g.idleConns, c) + return true +} + +func (g *connGroup) connect(ctx context.Context, addr net.Addr) (*conn, error) { + deadline := time.Now().Add(g.pool.dialTimeout) + + ctx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + + network := strings.Split(addr.Network(), ",") + address := strings.Split(addr.String(), ",") + var netConn net.Conn + var netAddr net.Addr + var err error + + if len(address) > 1 { + // Shuffle the list of addresses to randomize the order in which + // connections are attempted. This prevents routing all connections + // to the first broker (which will usually succeed). + rand.Shuffle(len(address), func(i, j int) { + network[i], network[j] = network[j], network[i] + address[i], address[j] = address[j], address[i] + }) + } + + for i := range address { + netConn, err = g.pool.dial(ctx, network[i], address[i]) + if err == nil { + netAddr = &networkAddress{ + network: network[i], + address: address[i], + } + break + } + } + + if err != nil { + return nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if tlsConfig := g.pool.tls; tlsConfig != nil { + if tlsConfig.ServerName == "" { + host, _ := splitHostPort(netAddr.String()) + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + netConn = tls.Client(netConn, tlsConfig) + } + + pc := protocol.NewConn(netConn, g.pool.clientID) + pc.SetDeadline(deadline) + + r, err := pc.RoundTrip(new(apiversions.Request)) + if err != nil { + return nil, err + } + res := r.(*apiversions.Response) + ver := make(map[protocol.ApiKey]int16, len(res.ApiKeys)) + + if res.ErrorCode != 0 { + return nil, fmt.Errorf("negotating API versions with kafka broker at %s: %w", g.addr, Error(res.ErrorCode)) + } + + for _, r := range res.ApiKeys { + apiKey := protocol.ApiKey(r.ApiKey) + ver[apiKey] = apiKey.SelectVersion(r.MinVersion, r.MaxVersion) + } + + pc.SetVersions(ver) + pc.SetDeadline(time.Time{}) + + if g.pool.sasl != nil { + host, port, err := splitHostPortNumber(netAddr.String()) + if err != nil { + return nil, err + } + metadata := &sasl.Metadata{ + Host: host, + Port: port, + } + if err := authenticateSASL(sasl.WithMetadata(ctx, metadata), pc, g.pool.sasl); err != nil { + return nil, err + } + } + + reqs := make(chan connRequest) + c := &conn{ + network: netAddr.Network(), + address: netAddr.String(), + reqs: reqs, + group: g, + } + go c.run(pc, reqs) + + netConn = nil + return c, nil +} + +type conn struct { + reqs chan<- connRequest + network string + address string + once sync.Once + group *connGroup + timer *time.Timer +} + +func (c *conn) close() { + c.once.Do(func() { close(c.reqs) }) +} + +func (c *conn) run(pc *protocol.Conn, reqs <-chan connRequest) { + defer pc.Close() + + for cr := range reqs { + r, err := c.roundTrip(cr.ctx, pc, cr.req) + if err != nil { + cr.res.reject(err) + if !errors.Is(err, protocol.ErrNoRecord) { + break + } + } else { + cr.res.resolve(r) + } + if !c.group.releaseConn(c) { + break + } + } +} + +func (c *conn) roundTrip(ctx context.Context, pc *protocol.Conn, req Request) (Response, error) { + pprof.SetGoroutineLabels(ctx) + defer pprof.SetGoroutineLabels(context.Background()) + + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + pc.SetDeadline(deadline) + defer pc.SetDeadline(time.Time{}) + } + + return pc.RoundTrip(req) +} + +// authenticateSASL performs all of the required requests to authenticate this +// connection. If any step fails, this function returns with an error. A nil +// error indicates successful authentication. +func authenticateSASL(ctx context.Context, pc *protocol.Conn, mechanism sasl.Mechanism) error { + if err := saslHandshakeRoundTrip(pc, mechanism.Name()); err != nil { + return err + } + + sess, state, err := mechanism.Start(ctx) + if err != nil { + return err + } + + for completed := false; !completed; { + challenge, err := saslAuthenticateRoundTrip(pc, state) + if err != nil { + if errors.Is(err, io.EOF) { + // the broker may communicate a failed exchange by closing the + // connection (esp. in the case where we're passing opaque sasl + // data over the wire since there's no protocol info). + return SASLAuthenticationFailed + } + + return err + } + + completed, state, err = sess.Next(ctx, challenge) + if err != nil { + return err + } + } + + return nil +} + +// saslHandshake sends the SASL handshake message. This will determine whether +// the Mechanism is supported by the cluster. If it's not, this function will +// error out with UnsupportedSASLMechanism. +// +// If the mechanism is unsupported, the handshake request will reply with the +// list of the cluster's configured mechanisms, which could potentially be used +// to facilitate negotiation. At the moment, we are not negotiating the +// mechanism as we believe that brokers are usually known to the client, and +// therefore the client should already know which mechanisms are supported. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake +func saslHandshakeRoundTrip(pc *protocol.Conn, mechanism string) error { + msg, err := pc.RoundTrip(&saslhandshake.Request{ + Mechanism: mechanism, + }) + if err != nil { + return err + } + res := msg.(*saslhandshake.Response) + if res.ErrorCode != 0 { + err = Error(res.ErrorCode) + } + return err +} + +// saslAuthenticate sends the SASL authenticate message. This function must +// be immediately preceded by a successful saslHandshake. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate +func saslAuthenticateRoundTrip(pc *protocol.Conn, data []byte) ([]byte, error) { + msg, err := pc.RoundTrip(&saslauthenticate.Request{ + AuthBytes: data, + }) + if err != nil { + return nil, err + } + res := msg.(*saslauthenticate.Response) + if res.ErrorCode != 0 { + err = makeError(res.ErrorCode, res.ErrorMessage) + } + return res.AuthBytes, err +} + +var _ RoundTripper = (*Transport)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go b/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go new file mode 100644 index 000000000..9480fc3a7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go @@ -0,0 +1,142 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/txnoffsetcommit" +) + +// TxnOffsetCommitRequest represents a request sent to a kafka broker to commit +// offsets for a partition within a transaction. +type TxnOffsetCommitRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // ID of the consumer group to publish the offsets for. + GroupID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // GenerationID is the current generation for the group. + GenerationID int + + // ID of the group member submitting the offsets. + MemberID string + + // GroupInstanceID is a unique identifier for the consumer. + GroupInstanceID string + + // Set of topic partitions to publish the offsets for. + // + // Not that offset commits need to be submitted to the broker acting as the + // group coordinator. This will be automatically resolved by the transport. + Topics map[string][]TxnOffsetCommit +} + +// TxnOffsetCommit represent the commit of an offset to a partition within a transaction. +// +// The extra metadata is opaque to the kafka protocol, it is intended to hold +// information like an identifier for the process that committed the offset, +// or the time at which the commit was made. +type TxnOffsetCommit struct { + Partition int + Offset int64 + Metadata string +} + +// TxnOffsetFetchResponse represents a response from a kafka broker to an offset +// commit request within a transaction. +type TxnOffsetCommitResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has accepted offset commits + // for. + Topics map[string][]TxnOffsetCommitPartition +} + +// TxnOffsetFetchPartition represents the state of a single partition in responses +// to committing offsets within a transaction. +type TxnOffsetCommitPartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to publish consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// TxnOffsetCommit sends an txn offset commit request to a kafka broker and returns the +// response. +func (c *Client) TxnOffsetCommit( + ctx context.Context, + req *TxnOffsetCommitRequest, +) (*TxnOffsetCommitResponse, error) { + protoReq := &txnoffsetcommit.Request{ + TransactionalID: req.TransactionalID, + GroupID: req.GroupID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + Topics: make([]txnoffsetcommit.RequestTopic, 0, len(req.Topics)), + } + + for topic, partitions := range req.Topics { + parts := make([]txnoffsetcommit.RequestPartition, len(partitions)) + for i, partition := range partitions { + parts[i] = txnoffsetcommit.RequestPartition{ + Partition: int32(partition.Partition), + CommittedOffset: int64(partition.Offset), + CommittedMetadata: partition.Metadata, + } + } + t := txnoffsetcommit.RequestTopic{ + Name: topic, + Partitions: parts, + } + + protoReq.Topics = append(protoReq.Topics, t) + } + + m, err := c.roundTrip(ctx, req.Addr, protoReq) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).TxnOffsetCommit: %w", err) + } + + r := m.(*txnoffsetcommit.Response) + + res := &TxnOffsetCommitResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]TxnOffsetCommitPartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]TxnOffsetCommitPartition, 0, len(topic.Partitions)) + for _, partition := range topic.Partitions { + partitions = append(partitions, TxnOffsetCommitPartition{ + Partition: int(partition.Partition), + Error: makeError(partition.ErrorCode, ""), + }) + } + res.Topics[topic.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/write.go b/vendor/github.com/segmentio/kafka-go/write.go new file mode 100644 index 000000000..3b806509c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/write.go @@ -0,0 +1,614 @@ +package kafka + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "time" +) + +type writeBuffer struct { + w io.Writer + b [16]byte +} + +func (wb *writeBuffer) writeInt8(i int8) { + wb.b[0] = byte(i) + wb.Write(wb.b[:1]) +} + +func (wb *writeBuffer) writeInt16(i int16) { + binary.BigEndian.PutUint16(wb.b[:2], uint16(i)) + wb.Write(wb.b[:2]) +} + +func (wb *writeBuffer) writeInt32(i int32) { + binary.BigEndian.PutUint32(wb.b[:4], uint32(i)) + wb.Write(wb.b[:4]) +} + +func (wb *writeBuffer) writeInt64(i int64) { + binary.BigEndian.PutUint64(wb.b[:8], uint64(i)) + wb.Write(wb.b[:8]) +} + +func (wb *writeBuffer) writeVarInt(i int64) { + u := uint64((i << 1) ^ (i >> 63)) + n := 0 + + for u >= 0x80 && n < len(wb.b) { + wb.b[n] = byte(u) | 0x80 + u >>= 7 + n++ + } + + if n < len(wb.b) { + wb.b[n] = byte(u) + n++ + } + + wb.Write(wb.b[:n]) +} + +func (wb *writeBuffer) writeString(s string) { + wb.writeInt16(int16(len(s))) + wb.WriteString(s) +} + +func (wb *writeBuffer) writeVarString(s string) { + wb.writeVarInt(int64(len(s))) + wb.WriteString(s) +} + +func (wb *writeBuffer) writeNullableString(s *string) { + if s == nil { + wb.writeInt16(-1) + } else { + wb.writeString(*s) + } +} + +func (wb *writeBuffer) writeBytes(b []byte) { + n := len(b) + if b == nil { + n = -1 + } + wb.writeInt32(int32(n)) + wb.Write(b) +} + +func (wb *writeBuffer) writeVarBytes(b []byte) { + if b != nil { + wb.writeVarInt(int64(len(b))) + wb.Write(b) + } else { + //-1 is used to indicate nil key + wb.writeVarInt(-1) + } +} + +func (wb *writeBuffer) writeBool(b bool) { + v := int8(0) + if b { + v = 1 + } + wb.writeInt8(v) +} + +func (wb *writeBuffer) writeArrayLen(n int) { + wb.writeInt32(int32(n)) +} + +func (wb *writeBuffer) writeArray(n int, f func(int)) { + wb.writeArrayLen(n) + for i := 0; i < n; i++ { + f(i) + } +} + +func (wb *writeBuffer) writeVarArray(n int, f func(int)) { + wb.writeVarInt(int64(n)) + for i := 0; i < n; i++ { + f(i) + } +} + +func (wb *writeBuffer) writeStringArray(a []string) { + wb.writeArray(len(a), func(i int) { wb.writeString(a[i]) }) +} + +func (wb *writeBuffer) writeInt32Array(a []int32) { + wb.writeArray(len(a), func(i int) { wb.writeInt32(a[i]) }) +} + +func (wb *writeBuffer) write(a interface{}) { + switch v := a.(type) { + case int8: + wb.writeInt8(v) + case int16: + wb.writeInt16(v) + case int32: + wb.writeInt32(v) + case int64: + wb.writeInt64(v) + case string: + wb.writeString(v) + case []byte: + wb.writeBytes(v) + case bool: + wb.writeBool(v) + case writable: + v.writeTo(wb) + default: + panic(fmt.Sprintf("unsupported type: %T", a)) + } +} + +func (wb *writeBuffer) Write(b []byte) (int, error) { + return wb.w.Write(b) +} + +func (wb *writeBuffer) WriteString(s string) (int, error) { + return io.WriteString(wb.w, s) +} + +func (wb *writeBuffer) Flush() error { + if x, ok := wb.w.(interface{ Flush() error }); ok { + return x.Flush() + } + return nil +} + +type writable interface { + writeTo(*writeBuffer) +} + +func (wb *writeBuffer) writeFetchRequestV2(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v2), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 8 + // offset + 4 // max bytes + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt32(int32(maxBytes)) + + return wb.Flush() +} + +func (wb *writeBuffer) writeFetchRequestV5(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v5), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 8 + // offset + 8 + // log start offset + 4 // max bytes + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + return wb.Flush() +} + +func (wb *writeBuffer) writeFetchRequestV10(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v10), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // session ID + 4 + // session epoch + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 4 + // current leader epoch + 8 + // fetch offset + 8 + // log start offset + 4 + // partition max bytes + 4 // forgotten topics data + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + wb.writeInt32(0) //FIXME + wb.writeInt32(-1) //FIXME + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt32(-1) //FIXME + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + // forgotten topics array + wb.writeArrayLen(0) // forgotten topics not supported yet + + return wb.Flush() +} + +func (wb *writeBuffer) writeListOffsetRequestV1(correlationID int32, clientID, topic string, partition int32, time int64) error { + h := requestHeader{ + ApiKey: int16(listOffsets), + ApiVersion: int16(v1), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 8 // time + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(time) + + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV2(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) (err error) { + var size int32 + var attributes int8 + var compressed *bytes.Buffer + + if codec == nil { + size = messageSetSize(msgs...) + } else { + compressed, attributes, size, err = compressMessageSet(codec, msgs...) + if err != nil { + return + } + msgs = []Message{{Value: compressed.Bytes()}} + } + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v2), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + size + + h.writeTo(wb) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + wb.writeInt32(size) + cw := &crc32Writer{table: crc32.IEEETable} + + for _, msg := range msgs { + wb.writeMessage(msg.Offset, attributes, msg.Time, msg.Key, msg.Value, cw) + } + + releaseBuffer(compressed) + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV3(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) { + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v3), + CorrelationID: correlationID, + ClientID: clientID, + } + + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + recordBatch.size + + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + recordBatch.writeTo(wb) + + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV7(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) { + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v7), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + recordBatch.size + + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + recordBatch.writeTo(wb) + + return wb.Flush() +} + +func (wb *writeBuffer) writeRecordBatch(attributes int16, size int32, count int, baseTime, lastTime time.Time, write func(*writeBuffer)) { + var ( + baseTimestamp = timestamp(baseTime) + lastTimestamp = timestamp(lastTime) + lastOffsetDelta = int32(count - 1) + producerID = int64(-1) // default producer id for now + producerEpoch = int16(-1) // default producer epoch for now + baseSequence = int32(-1) // default base sequence + recordCount = int32(count) // record count + writerBackup = wb.w + ) + + // dry run to compute the checksum + cw := &crc32Writer{table: crc32.MakeTable(crc32.Castagnoli)} + wb.w = cw + cw.writeInt16(attributes) // attributes, timestamp type 0 - create time, not part of a transaction, no control messages + cw.writeInt32(lastOffsetDelta) + cw.writeInt64(baseTimestamp) + cw.writeInt64(lastTimestamp) + cw.writeInt64(producerID) + cw.writeInt16(producerEpoch) + cw.writeInt32(baseSequence) + cw.writeInt32(recordCount) + write(wb) + wb.w = writerBackup + + // actual write to the output buffer + wb.writeInt64(int64(0)) + wb.writeInt32(int32(size - 12)) // 12 = batch length + base offset sizes + wb.writeInt32(-1) // partition leader epoch + wb.writeInt8(2) // magic byte + wb.writeInt32(int32(cw.crc32)) + + wb.writeInt16(attributes) + wb.writeInt32(lastOffsetDelta) + wb.writeInt64(baseTimestamp) + wb.writeInt64(lastTimestamp) + wb.writeInt64(producerID) + wb.writeInt16(producerEpoch) + wb.writeInt32(baseSequence) + wb.writeInt32(recordCount) + write(wb) +} + +func compressMessageSet(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int8, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + cw := &crc32Writer{table: crc32.IEEETable} + + for offset, msg := range msgs { + wb.writeMessage(int64(offset), 0, msg.Time, msg.Key, msg.Value, cw) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = codec.Code() + size = messageSetSize(Message{Value: compressed.Bytes()}) + return +} + +func (wb *writeBuffer) writeMessage(offset int64, attributes int8, time time.Time, key, value []byte, cw *crc32Writer) { + const magicByte = 1 // compatible with kafka 0.10.0.0+ + + timestamp := timestamp(time) + size := messageSize(key, value) + + // dry run to compute the checksum + cw.crc32 = 0 + cw.writeInt8(magicByte) + cw.writeInt8(attributes) + cw.writeInt64(timestamp) + cw.writeBytes(key) + cw.writeBytes(value) + + // actual write to the output buffer + wb.writeInt64(offset) + wb.writeInt32(size) + wb.writeInt32(int32(cw.crc32)) + wb.writeInt8(magicByte) + wb.writeInt8(attributes) + wb.writeInt64(timestamp) + wb.writeBytes(key) + wb.writeBytes(value) +} + +// Messages with magic >2 are called records. This method writes messages using message format 2. +func (wb *writeBuffer) writeRecord(attributes int8, baseTime time.Time, offset int64, msg Message) { + timestampDelta := msg.Time.Sub(baseTime) + offsetDelta := int64(offset) + + wb.writeVarInt(int64(recordSize(&msg, timestampDelta, offsetDelta))) + wb.writeInt8(attributes) + wb.writeVarInt(int64(milliseconds(timestampDelta))) + wb.writeVarInt(offsetDelta) + + wb.writeVarBytes(msg.Key) + wb.writeVarBytes(msg.Value) + wb.writeVarArray(len(msg.Headers), func(i int) { + h := &msg.Headers[i] + wb.writeVarString(h.Key) + wb.writeVarBytes(h.Value) + }) +} + +func varIntLen(i int64) int { + u := uint64((i << 1) ^ (i >> 63)) // zig-zag encoding + n := 0 + + for u >= 0x80 { + u >>= 7 + n++ + } + + return n + 1 +} + +func varBytesLen(b []byte) int { + return varIntLen(int64(len(b))) + len(b) +} + +func varStringLen(s string) int { + return varIntLen(int64(len(s))) + len(s) +} + +func varArrayLen(n int, f func(int) int) int { + size := varIntLen(int64(n)) + for i := 0; i < n; i++ { + size += f(i) + } + return size +} + +func messageSize(key, value []byte) int32 { + return 4 + // crc + 1 + // magic byte + 1 + // attributes + 8 + // timestamp + sizeofBytes(key) + + sizeofBytes(value) +} + +func messageSetSize(msgs ...Message) (size int32) { + for _, msg := range msgs { + size += 8 + // offset + 4 + // message size + 4 + // crc + 1 + // magic byte + 1 + // attributes + 8 + // timestamp + sizeofBytes(msg.Key) + + sizeofBytes(msg.Value) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/writer.go b/vendor/github.com/segmentio/kafka-go/writer.go new file mode 100644 index 000000000..3c7af907a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/writer.go @@ -0,0 +1,1309 @@ +package kafka + +import ( + "bytes" + "context" + "errors" + "io" + "net" + "sync" + "sync/atomic" + "time" + + metadataAPI "github.com/segmentio/kafka-go/protocol/metadata" +) + +// The Writer type provides the implementation of a producer of kafka messages +// that automatically distributes messages across partitions of a single topic +// using a configurable balancing policy. +// +// Writes manage the dispatch of messages across partitions of the topic they +// are configured to write to using a Balancer, and aggregate batches to +// optimize the writes to kafka. +// +// Writers may be configured to be used synchronously or asynchronously. When +// use synchronously, calls to WriteMessages block until the messages have been +// written to kafka. In this mode, the program should inspect the error returned +// by the function and test if it an instance of kafka.WriteErrors in order to +// identify which messages have succeeded or failed, for example: +// +// // Construct a synchronous writer (the default mode). +// w := &kafka.Writer{ +// Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), +// Topic: "topic-A", +// RequiredAcks: kafka.RequireAll, +// } +// +// ... +// +// // Passing a context can prevent the operation from blocking indefinitely. +// switch err := w.WriteMessages(ctx, msgs...).(type) { +// case nil: +// case kafka.WriteErrors: +// for i := range msgs { +// if err[i] != nil { +// // handle the error writing msgs[i] +// ... +// } +// } +// default: +// // handle other errors +// ... +// } +// +// In asynchronous mode, the program may configure a completion handler on the +// writer to receive notifications of messages being written to kafka: +// +// w := &kafka.Writer{ +// Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), +// Topic: "topic-A", +// RequiredAcks: kafka.RequireAll, +// Async: true, // make the writer asynchronous +// Completion: func(messages []kafka.Message, err error) { +// ... +// }, +// } +// +// ... +// +// // Because the writer is asynchronous, there is no need for the context to +// // be cancelled, the call will never block. +// if err := w.WriteMessages(context.Background(), msgs...); err != nil { +// // Only validation errors would be reported in this case. +// ... +// } +// +// Methods of Writer are safe to use concurrently from multiple goroutines, +// however the writer configuration should not be modified after first use. +type Writer struct { + // Address of the kafka cluster that this writer is configured to send + // messages to. + // + // This field is required, attempting to write messages to a writer with a + // nil address will error. + Addr net.Addr + + // Topic is the name of the topic that the writer will produce messages to. + // + // Setting this field or not is a mutually exclusive option. If you set Topic + // here, you must not set Topic for any produced Message. Otherwise, if you do + // not set Topic, every Message must have Topic specified. + Topic string + + // The balancer used to distribute messages across partitions. + // + // The default is to use a round-robin distribution. + Balancer Balancer + + // Limit on how many attempts will be made to deliver a message. + // + // The default is to try at most 10 times. + MaxAttempts int + + // WriteBackoffMin optionally sets the smallest amount of time the writer waits before + // it attempts to write a batch of messages + // + // Default: 100ms + WriteBackoffMin time.Duration + + // WriteBackoffMax optionally sets the maximum amount of time the writer waits before + // it attempts to write a batch of messages + // + // Default: 1s + WriteBackoffMax time.Duration + + // Limit on how many messages will be buffered before being sent to a + // partition. + // + // The default is to use a target batch size of 100 messages. + BatchSize int + + // Limit the maximum size of a request in bytes before being sent to + // a partition. + // + // The default is to use a kafka default value of 1048576. + BatchBytes int64 + + // Time limit on how often incomplete message batches will be flushed to + // kafka. + // + // The default is to flush at least every second. + BatchTimeout time.Duration + + // Timeout for read operations performed by the Writer. + // + // Defaults to 10 seconds. + ReadTimeout time.Duration + + // Timeout for write operation performed by the Writer. + // + // Defaults to 10 seconds. + WriteTimeout time.Duration + + // Number of acknowledges from partition replicas required before receiving + // a response to a produce request, the following values are supported: + // + // RequireNone (0) fire-and-forget, do not wait for acknowledgements from the + // RequireOne (1) wait for the leader to acknowledge the writes + // RequireAll (-1) wait for the full ISR to acknowledge the writes + // + // Defaults to RequireNone. + RequiredAcks RequiredAcks + + // Setting this flag to true causes the WriteMessages method to never block. + // It also means that errors are ignored since the caller will not receive + // the returned value. Use this only if you don't care about guarantees of + // whether the messages were written to kafka. + // + // Defaults to false. + Async bool + + // An optional function called when the writer succeeds or fails the + // delivery of messages to a kafka partition. When writing the messages + // fails, the `err` parameter will be non-nil. + // + // The messages that the Completion function is called with have their + // topic, partition, offset, and time set based on the Produce responses + // received from kafka. All messages passed to a call to the function have + // been written to the same partition. The keys and values of messages are + // referencing the original byte slices carried by messages in the calls to + // WriteMessages. + // + // The function is called from goroutines started by the writer. Calls to + // Close will block on the Completion function calls. When the Writer is + // not writing asynchronously, the WriteMessages call will also block on + // Completion function, which is a useful guarantee if the byte slices + // for the message keys and values are intended to be reused after the + // WriteMessages call returned. + // + // If a completion function panics, the program terminates because the + // panic is not recovered by the writer and bubbles up to the top of the + // goroutine's call stack. + Completion func(messages []Message, err error) + + // Compression set the compression codec to be used to compress messages. + Compression Compression + + // If not nil, specifies a logger used to report internal changes within the + // writer. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the writer falls + // back to using Logger instead. + ErrorLogger Logger + + // A transport used to send messages to kafka clusters. + // + // If nil, DefaultTransport is used. + Transport RoundTripper + + // AllowAutoTopicCreation notifies writer to create topic if missing. + AllowAutoTopicCreation bool + + // Manages the current set of partition-topic writers. + group sync.WaitGroup + mutex sync.Mutex + closed bool + writers map[topicPartition]*partitionWriter + + // writer stats are all made of atomic values, no need for synchronization. + // Use a pointer to ensure 64-bit alignment of the values. The once value is + // used to lazily create the value when first used, allowing programs to use + // the zero-value value of Writer. + once sync.Once + *writerStats + + // If no balancer is configured, the writer uses this one. RoundRobin values + // are safe to use concurrently from multiple goroutines, there is no need + // for extra synchronization to access this field. + roundRobin RoundRobin + + // non-nil when a transport was created by NewWriter, remove in 1.0. + transport *Transport +} + +// WriterConfig is a configuration type used to create new instances of Writer. +// +// DEPRECATED: writer values should be configured directly by assigning their +// exported fields. This type is kept for backward compatibility, and will be +// removed in version 1.0. +type WriterConfig struct { + // The list of brokers used to discover the partitions available on the + // kafka cluster. + // + // This field is required, attempting to create a writer with an empty list + // of brokers will panic. + Brokers []string + + // The topic that the writer will produce messages to. + // + // If provided, this will be used to set the topic for all produced messages. + // If not provided, each Message must specify a topic for itself. This must be + // mutually exclusive, otherwise the Writer will return an error. + Topic string + + // The dialer used by the writer to establish connections to the kafka + // cluster. + // + // If nil, the default dialer is used instead. + Dialer *Dialer + + // The balancer used to distribute messages across partitions. + // + // The default is to use a round-robin distribution. + Balancer Balancer + + // Limit on how many attempts will be made to deliver a message. + // + // The default is to try at most 10 times. + MaxAttempts int + + // DEPRECATED: in versions prior to 0.4, the writer used channels internally + // to dispatch messages to partitions. This has been replaced by an in-memory + // aggregation of batches which uses shared state instead of message passing, + // making this option unnecessary. + QueueCapacity int + + // Limit on how many messages will be buffered before being sent to a + // partition. + // + // The default is to use a target batch size of 100 messages. + BatchSize int + + // Limit the maximum size of a request in bytes before being sent to + // a partition. + // + // The default is to use a kafka default value of 1048576. + BatchBytes int + + // Time limit on how often incomplete message batches will be flushed to + // kafka. + // + // The default is to flush at least every second. + BatchTimeout time.Duration + + // Timeout for read operations performed by the Writer. + // + // Defaults to 10 seconds. + ReadTimeout time.Duration + + // Timeout for write operation performed by the Writer. + // + // Defaults to 10 seconds. + WriteTimeout time.Duration + + // DEPRECATED: in versions prior to 0.4, the writer used to maintain a cache + // the topic layout. With the change to use a transport to manage connections, + // the responsibility of syncing the cluster layout has been delegated to the + // transport. + RebalanceInterval time.Duration + + // DEPRECATED: in versions prior to 0.4, the writer used to manage connections + // to the kafka cluster directly. With the change to use a transport to manage + // connections, the writer has no connections to manage directly anymore. + IdleConnTimeout time.Duration + + // Number of acknowledges from partition replicas required before receiving + // a response to a produce request. The default is -1, which means to wait for + // all replicas, and a value above 0 is required to indicate how many replicas + // should acknowledge a message to be considered successful. + RequiredAcks int + + // Setting this flag to true causes the WriteMessages method to never block. + // It also means that errors are ignored since the caller will not receive + // the returned value. Use this only if you don't care about guarantees of + // whether the messages were written to kafka. + Async bool + + // CompressionCodec set the codec to be used to compress Kafka messages. + CompressionCodec + + // If not nil, specifies a logger used to report internal changes within the + // writer. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the writer falls + // back to using Logger instead. + ErrorLogger Logger +} + +type topicPartition struct { + topic string + partition int32 +} + +// Validate method validates WriterConfig properties. +func (config *WriterConfig) Validate() error { + if len(config.Brokers) == 0 { + return errors.New("cannot create a kafka writer with an empty list of brokers") + } + return nil +} + +// WriterStats is a data structure returned by a call to Writer.Stats that +// exposes details about the behavior of the writer. +type WriterStats struct { + Writes int64 `metric:"kafka.writer.write.count" type:"counter"` + Messages int64 `metric:"kafka.writer.message.count" type:"counter"` + Bytes int64 `metric:"kafka.writer.message.bytes" type:"counter"` + Errors int64 `metric:"kafka.writer.error.count" type:"counter"` + + BatchTime DurationStats `metric:"kafka.writer.batch.seconds"` + BatchQueueTime DurationStats `metric:"kafka.writer.batch.queue.seconds"` + WriteTime DurationStats `metric:"kafka.writer.write.seconds"` + WaitTime DurationStats `metric:"kafka.writer.wait.seconds"` + Retries int64 `metric:"kafka.writer.retries.count" type:"counter"` + BatchSize SummaryStats `metric:"kafka.writer.batch.size"` + BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"` + + MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"` + WriteBackoffMin time.Duration `metric:"kafka.writer.backoff.min" type:"gauge"` + WriteBackoffMax time.Duration `metric:"kafka.writer.backoff.max" type:"gauge"` + MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"` + BatchTimeout time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"` + ReadTimeout time.Duration `metric:"kafka.writer.read.timeout" type:"gauge"` + WriteTimeout time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"` + RequiredAcks int64 `metric:"kafka.writer.acks.required" type:"gauge"` + Async bool `metric:"kafka.writer.async" type:"gauge"` + + Topic string `tag:"topic"` + + // DEPRECATED: these fields will only be reported for backward compatibility + // if the Writer was constructed with NewWriter. + Dials int64 `metric:"kafka.writer.dial.count" type:"counter"` + DialTime DurationStats `metric:"kafka.writer.dial.seconds"` + + // DEPRECATED: these fields were meaningful prior to kafka-go 0.4, changes + // to the internal implementation and the introduction of the transport type + // made them unnecessary. + // + // The values will be zero but are left for backward compatibility to avoid + // breaking programs that used these fields. + Rebalances int64 + RebalanceInterval time.Duration + QueueLength int64 + QueueCapacity int64 + ClientID string +} + +// writerStats is a struct that contains statistics on a writer. +// +// Since atomic is used to mutate the statistics the values must be 64-bit aligned. +// This is easily accomplished by always allocating this struct directly, (i.e. using a pointer to the struct). +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type writerStats struct { + dials counter + writes counter + messages counter + bytes counter + errors counter + dialTime summary + batchTime summary + batchQueueTime summary + writeTime summary + waitTime summary + retries counter + batchSize summary + batchSizeBytes summary +} + +// NewWriter creates and returns a new Writer configured with config. +// +// DEPRECATED: Writer value can be instantiated and configured directly, +// this function is retained for backward compatibility and will be removed +// in version 1.0. +func NewWriter(config WriterConfig) *Writer { + if err := config.Validate(); err != nil { + panic(err) + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if config.Balancer == nil { + config.Balancer = &RoundRobin{} + } + + // Converts the pre-0.4 Dialer API into a Transport. + kafkaDialer := DefaultDialer + if config.Dialer != nil { + kafkaDialer = config.Dialer + } + + dialer := (&net.Dialer{ + Timeout: kafkaDialer.Timeout, + Deadline: kafkaDialer.Deadline, + LocalAddr: kafkaDialer.LocalAddr, + DualStack: kafkaDialer.DualStack, + FallbackDelay: kafkaDialer.FallbackDelay, + KeepAlive: kafkaDialer.KeepAlive, + }) + + var resolver Resolver + if r, ok := kafkaDialer.Resolver.(*net.Resolver); ok { + dialer.Resolver = r + } else { + resolver = kafkaDialer.Resolver + } + + stats := new(writerStats) + // For backward compatibility with the pre-0.4 APIs, support custom + // resolvers by wrapping the dial function. + dial := func(ctx context.Context, network, addr string) (net.Conn, error) { + start := time.Now() + defer func() { + stats.dials.observe(1) + stats.dialTime.observe(int64(time.Since(start))) + }() + address, err := lookupHost(ctx, addr, resolver) + if err != nil { + return nil, err + } + return dialer.DialContext(ctx, network, address) + } + + idleTimeout := config.IdleConnTimeout + if idleTimeout == 0 { + // Historical default value of WriterConfig.IdleTimeout, 9 minutes seems + // like it is way too long when there is no ping mechanism in the kafka + // protocol. + idleTimeout = 9 * time.Minute + } + + metadataTTL := config.RebalanceInterval + if metadataTTL == 0 { + // Historical default value of WriterConfig.RebalanceInterval. + metadataTTL = 15 * time.Second + } + + transport := &Transport{ + Dial: dial, + SASL: kafkaDialer.SASLMechanism, + TLS: kafkaDialer.TLS, + ClientID: kafkaDialer.ClientID, + IdleTimeout: idleTimeout, + MetadataTTL: metadataTTL, + } + + w := &Writer{ + Addr: TCP(config.Brokers...), + Topic: config.Topic, + MaxAttempts: config.MaxAttempts, + BatchSize: config.BatchSize, + Balancer: config.Balancer, + BatchBytes: int64(config.BatchBytes), + BatchTimeout: config.BatchTimeout, + ReadTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + RequiredAcks: RequiredAcks(config.RequiredAcks), + Async: config.Async, + Logger: config.Logger, + ErrorLogger: config.ErrorLogger, + Transport: transport, + transport: transport, + writerStats: stats, + } + + if config.RequiredAcks == 0 { + // Historically the writers created by NewWriter have used "all" as the + // default value when 0 was specified. + w.RequiredAcks = RequireAll + } + + if config.CompressionCodec != nil { + w.Compression = Compression(config.CompressionCodec.Code()) + } + + return w +} + +// enter is called by WriteMessages to indicate that a new inflight operation +// has started, which helps synchronize with Close and ensure that the method +// does not return until all inflight operations were completed. +func (w *Writer) enter() bool { + w.mutex.Lock() + defer w.mutex.Unlock() + if w.closed { + return false + } + w.group.Add(1) + return true +} + +// leave is called by WriteMessages to indicate that the inflight operation has +// completed. +func (w *Writer) leave() { w.group.Done() } + +// spawn starts a new asynchronous operation on the writer. This method is used +// instead of starting goroutines inline to help manage the state of the +// writer's wait group. The wait group is used to block Close calls until all +// inflight operations have completed, therefore automatically including those +// started with calls to spawn. +func (w *Writer) spawn(f func()) { + w.group.Add(1) + go func() { + defer w.group.Done() + f() + }() +} + +// Close flushes pending writes, and waits for all writes to complete before +// returning. Calling Close also prevents new writes from being submitted to +// the writer, further calls to WriteMessages and the like will fail with +// io.ErrClosedPipe. +func (w *Writer) Close() error { + w.mutex.Lock() + // Marking the writer as closed here causes future calls to WriteMessages to + // fail with io.ErrClosedPipe. Mutation of this field is synchronized on the + // writer's mutex to ensure that no more increments of the wait group are + // performed afterwards (which could otherwise race with the Wait below). + w.closed = true + + // close all writers to trigger any pending batches + for _, writer := range w.writers { + writer.close() + } + + for partition := range w.writers { + delete(w.writers, partition) + } + + w.mutex.Unlock() + w.group.Wait() + + if w.transport != nil { + w.transport.CloseIdleConnections() + } + + return nil +} + +// WriteMessages writes a batch of messages to the kafka topic configured on this +// writer. +// +// Unless the writer was configured to write messages asynchronously, the method +// blocks until all messages have been written, or until the maximum number of +// attempts was reached. +// +// When sending synchronously and the writer's batch size is configured to be +// greater than 1, this method blocks until either a full batch can be assembled +// or the batch timeout is reached. The batch size and timeouts are evaluated +// per partition, so the choice of Balancer can also influence the flushing +// behavior. For example, the Hash balancer will require on average N * batch +// size messages to trigger a flush where N is the number of partitions. The +// best way to achieve good batching behavior is to share one Writer amongst +// multiple go routines. +// +// When the method returns an error, it may be of type kafka.WriteError to allow +// the caller to determine the status of each message. +// +// The context passed as first argument may also be used to asynchronously +// cancel the operation. Note that in this case there are no guarantees made on +// whether messages were written to kafka, they might also still be written +// after this method has already returned, therefore it is important to not +// modify byte slices of passed messages if WriteMessages returned early due +// to a canceled context. +// The program should assume that the whole batch failed and re-write the +// messages later (which could then cause duplicates). +func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { + if w.Addr == nil { + return errors.New("kafka.(*Writer).WriteMessages: cannot create a kafka writer with a nil address") + } + + if !w.enter() { + return io.ErrClosedPipe + } + defer w.leave() + + if len(msgs) == 0 { + return nil + } + + balancer := w.balancer() + batchBytes := w.batchBytes() + + for i := range msgs { + n := int64(msgs[i].totalSize()) + if n > batchBytes { + // This error is left for backward compatibility with historical + // behavior, but it can yield O(N^2) behaviors. The expectations + // are that the program will check if WriteMessages returned a + // MessageTooLargeError, discard the message that was exceeding + // the maximum size, and try again. + return messageTooLarge(msgs, i) + } + } + + // We use int32 here to half the memory footprint (compared to using int + // on 64 bits architectures). We map lists of the message indexes instead + // of the message values for the same reason, int32 is 4 bytes, vs a full + // Message value which is 100+ bytes and contains pointers and contributes + // to increasing GC work. + assignments := make(map[topicPartition][]int32) + + for i, msg := range msgs { + topic, err := w.chooseTopic(msg) + if err != nil { + return err + } + + numPartitions, err := w.partitions(ctx, topic) + if err != nil { + return err + } + + partition := balancer.Balance(msg, loadCachedPartitions(numPartitions)...) + + key := topicPartition{ + topic: topic, + partition: int32(partition), + } + + assignments[key] = append(assignments[key], int32(i)) + } + + batches := w.batchMessages(msgs, assignments) + if w.Async { + return nil + } + + done := ctx.Done() + hasErrors := false + for batch := range batches { + select { + case <-done: + return ctx.Err() + case <-batch.done: + if batch.err != nil { + hasErrors = true + } + } + } + + if !hasErrors { + return nil + } + + werr := make(WriteErrors, len(msgs)) + + for batch, indexes := range batches { + for _, i := range indexes { + werr[i] = batch.err + } + } + return werr +} + +func (w *Writer) batchMessages(messages []Message, assignments map[topicPartition][]int32) map[*writeBatch][]int32 { + var batches map[*writeBatch][]int32 + if !w.Async { + batches = make(map[*writeBatch][]int32, len(assignments)) + } + + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.writers == nil { + w.writers = map[topicPartition]*partitionWriter{} + } + + for key, indexes := range assignments { + writer := w.writers[key] + if writer == nil { + writer = newPartitionWriter(w, key) + w.writers[key] = writer + } + wbatches := writer.writeMessages(messages, indexes) + + for batch, idxs := range wbatches { + batches[batch] = idxs + } + } + + return batches +} + +func (w *Writer) produce(key topicPartition, batch *writeBatch) (*ProduceResponse, error) { + timeout := w.writeTimeout() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return w.client(timeout).Produce(ctx, &ProduceRequest{ + Partition: int(key.partition), + Topic: key.topic, + RequiredAcks: w.RequiredAcks, + Compression: w.Compression, + Records: &writerRecords{ + msgs: batch.msgs, + }, + }) +} + +func (w *Writer) partitions(ctx context.Context, topic string) (int, error) { + client := w.client(w.readTimeout()) + // Here we use the transport directly as an optimization to avoid the + // construction of temporary request and response objects made by the + // (*Client).Metadata API. + // + // It is expected that the transport will optimize this request by + // caching recent results (the kafka.Transport types does). + r, err := client.transport().RoundTrip(ctx, client.Addr, &metadataAPI.Request{ + TopicNames: []string{topic}, + AllowAutoTopicCreation: w.AllowAutoTopicCreation, + }) + if err != nil { + return 0, err + } + for _, t := range r.(*metadataAPI.Response).Topics { + if t.Name == topic { + // This should always hit, unless kafka has a bug. + if t.ErrorCode != 0 { + return 0, Error(t.ErrorCode) + } + return len(t.Partitions), nil + } + } + return 0, UnknownTopicOrPartition +} + +func (w *Writer) client(timeout time.Duration) *Client { + return &Client{ + Addr: w.Addr, + Transport: w.Transport, + Timeout: timeout, + } +} + +func (w *Writer) balancer() Balancer { + if w.Balancer != nil { + return w.Balancer + } + return &w.roundRobin +} + +func (w *Writer) maxAttempts() int { + if w.MaxAttempts > 0 { + return w.MaxAttempts + } + // TODO: this is a very high default, if something has failed 9 times it + // seems unlikely it will succeed on the 10th attempt. However, it does + // carry the risk to greatly increase the volume of requests sent to the + // kafka cluster. We should consider reducing this default (3?). + return 10 +} + +func (w *Writer) writeBackoffMin() time.Duration { + if w.WriteBackoffMin > 0 { + return w.WriteBackoffMin + } + return 100 * time.Millisecond +} + +func (w *Writer) writeBackoffMax() time.Duration { + if w.WriteBackoffMax > 0 { + return w.WriteBackoffMax + } + return 1 * time.Second +} + +func (w *Writer) batchSize() int { + if w.BatchSize > 0 { + return w.BatchSize + } + return 100 +} + +func (w *Writer) batchBytes() int64 { + if w.BatchBytes > 0 { + return w.BatchBytes + } + return 1048576 +} + +func (w *Writer) batchTimeout() time.Duration { + if w.BatchTimeout > 0 { + return w.BatchTimeout + } + return 1 * time.Second +} + +func (w *Writer) readTimeout() time.Duration { + if w.ReadTimeout > 0 { + return w.ReadTimeout + } + return 10 * time.Second +} + +func (w *Writer) writeTimeout() time.Duration { + if w.WriteTimeout > 0 { + return w.WriteTimeout + } + return 10 * time.Second +} + +func (w *Writer) withLogger(do func(Logger)) { + if w.Logger != nil { + do(w.Logger) + } +} + +func (w *Writer) withErrorLogger(do func(Logger)) { + if w.ErrorLogger != nil { + do(w.ErrorLogger) + } else { + w.withLogger(do) + } +} + +func (w *Writer) stats() *writerStats { + w.once.Do(func() { + // This field is not nil when the writer was constructed with NewWriter + // to share the value with the dial function and count dials. + if w.writerStats == nil { + w.writerStats = new(writerStats) + } + }) + return w.writerStats +} + +// Stats returns a snapshot of the writer stats since the last time the method +// was called, or since the writer was created if it is called for the first +// time. +// +// A typical use of this method is to spawn a goroutine that will periodically +// call Stats on a kafka writer and report the metrics to a stats collection +// system. +func (w *Writer) Stats() WriterStats { + stats := w.stats() + return WriterStats{ + Dials: stats.dials.snapshot(), + Writes: stats.writes.snapshot(), + Messages: stats.messages.snapshot(), + Bytes: stats.bytes.snapshot(), + Errors: stats.errors.snapshot(), + DialTime: stats.dialTime.snapshotDuration(), + BatchTime: stats.batchTime.snapshotDuration(), + BatchQueueTime: stats.batchQueueTime.snapshotDuration(), + WriteTime: stats.writeTime.snapshotDuration(), + WaitTime: stats.waitTime.snapshotDuration(), + Retries: stats.retries.snapshot(), + BatchSize: stats.batchSize.snapshot(), + BatchBytes: stats.batchSizeBytes.snapshot(), + MaxAttempts: int64(w.maxAttempts()), + WriteBackoffMin: w.writeBackoffMin(), + WriteBackoffMax: w.writeBackoffMax(), + MaxBatchSize: int64(w.batchSize()), + BatchTimeout: w.batchTimeout(), + ReadTimeout: w.readTimeout(), + WriteTimeout: w.writeTimeout(), + RequiredAcks: int64(w.RequiredAcks), + Async: w.Async, + Topic: w.Topic, + } +} + +func (w *Writer) chooseTopic(msg Message) (string, error) { + // w.Topic and msg.Topic are mutually exclusive, meaning only 1 must be set + // otherwise we will return an error. + if w.Topic != "" && msg.Topic != "" { + return "", errors.New("kafka.(*Writer): Topic must not be specified for both Writer and Message") + } else if w.Topic == "" && msg.Topic == "" { + return "", errors.New("kafka.(*Writer): Topic must be specified for Writer or Message") + } + + // now we choose the topic, depending on which one is not empty + if msg.Topic != "" { + return msg.Topic, nil + } + + return w.Topic, nil +} + +type batchQueue struct { + queue []*writeBatch + + // Pointers are used here to make `go vet` happy, and avoid copying mutexes. + // It may be better to revert these to non-pointers and avoid the copies in + // a different way. + mutex *sync.Mutex + cond *sync.Cond + + closed bool +} + +func (b *batchQueue) Put(batch *writeBatch) bool { + b.cond.L.Lock() + defer b.cond.L.Unlock() + defer b.cond.Broadcast() + + if b.closed { + return false + } + b.queue = append(b.queue, batch) + return true +} + +func (b *batchQueue) Get() *writeBatch { + b.cond.L.Lock() + defer b.cond.L.Unlock() + + for len(b.queue) == 0 && !b.closed { + b.cond.Wait() + } + + if len(b.queue) == 0 { + return nil + } + + batch := b.queue[0] + b.queue[0] = nil + b.queue = b.queue[1:] + + return batch +} + +func (b *batchQueue) Close() { + b.cond.L.Lock() + defer b.cond.L.Unlock() + defer b.cond.Broadcast() + + b.closed = true +} + +func newBatchQueue(initialSize int) batchQueue { + bq := batchQueue{ + queue: make([]*writeBatch, 0, initialSize), + mutex: &sync.Mutex{}, + cond: &sync.Cond{}, + } + + bq.cond.L = bq.mutex + + return bq +} + +// partitionWriter is a writer for a topic-partion pair. It maintains messaging order +// across batches of messages. +type partitionWriter struct { + meta topicPartition + queue batchQueue + + mutex sync.Mutex + currBatch *writeBatch + + // reference to the writer that owns this batch. Used for the produce logic + // as well as stat tracking + w *Writer +} + +func newPartitionWriter(w *Writer, key topicPartition) *partitionWriter { + writer := &partitionWriter{ + meta: key, + queue: newBatchQueue(10), + w: w, + } + w.spawn(writer.writeBatches) + return writer +} + +func (ptw *partitionWriter) writeBatches() { + for { + batch := ptw.queue.Get() + + // The only time we can return nil is when the queue is closed + // and empty. If the queue is closed that means + // the Writer is closed so once we're here it's time to exit. + if batch == nil { + return + } + + ptw.writeBatch(batch) + } +} + +func (ptw *partitionWriter) writeMessages(msgs []Message, indexes []int32) map[*writeBatch][]int32 { + ptw.mutex.Lock() + defer ptw.mutex.Unlock() + + batchSize := ptw.w.batchSize() + batchBytes := ptw.w.batchBytes() + + var batches map[*writeBatch][]int32 + if !ptw.w.Async { + batches = make(map[*writeBatch][]int32, 1) + } + + for _, i := range indexes { + assignMessage: + batch := ptw.currBatch + if batch == nil { + batch = ptw.newWriteBatch() + ptw.currBatch = batch + } + if !batch.add(msgs[i], batchSize, batchBytes) { + batch.trigger() + ptw.queue.Put(batch) + ptw.currBatch = nil + goto assignMessage + } + + if batch.full(batchSize, batchBytes) { + batch.trigger() + ptw.queue.Put(batch) + ptw.currBatch = nil + } + + if !ptw.w.Async { + batches[batch] = append(batches[batch], i) + } + } + return batches +} + +// ptw.w can be accessed here because this is called with the lock ptw.mutex already held. +func (ptw *partitionWriter) newWriteBatch() *writeBatch { + batch := newWriteBatch(time.Now(), ptw.w.batchTimeout()) + ptw.w.spawn(func() { ptw.awaitBatch(batch) }) + return batch +} + +// awaitBatch waits for a batch to either fill up or time out. +// If the batch is full it only stops the timer, if the timer +// expires it will queue the batch for writing if needed. +func (ptw *partitionWriter) awaitBatch(batch *writeBatch) { + select { + case <-batch.timer.C: + ptw.mutex.Lock() + // detach the batch from the writer if we're still attached + // and queue for writing. + // Only the current batch can expire, all previous batches were already written to the queue. + // If writeMesseages locks pw.mutex after the timer fires but before this goroutine + // can lock pw.mutex it will either have filled the batch and enqueued it which will mean + // pw.currBatch != batch so we just move on. + // Otherwise, we detach the batch from the ptWriter and enqueue it for writing. + if ptw.currBatch == batch { + ptw.queue.Put(batch) + ptw.currBatch = nil + } + ptw.mutex.Unlock() + case <-batch.ready: + // The batch became full, it was removed from the ptwriter and its + // ready channel was closed. We need to close the timer to avoid + // having it leak until it expires. + batch.timer.Stop() + } + stats := ptw.w.stats() + stats.batchQueueTime.observe(int64(time.Since(batch.time))) +} + +func (ptw *partitionWriter) writeBatch(batch *writeBatch) { + stats := ptw.w.stats() + stats.batchTime.observe(int64(time.Since(batch.time))) + stats.batchSize.observe(int64(len(batch.msgs))) + stats.batchSizeBytes.observe(batch.bytes) + + var res *ProduceResponse + var err error + key := ptw.meta + for attempt, maxAttempts := 0, ptw.w.maxAttempts(); attempt < maxAttempts; attempt++ { + if attempt != 0 { + stats.retries.observe(1) + // TODO: should there be a way to asynchronously cancel this + // operation? + // + // * If all goroutines that added message to this batch have stopped + // waiting for it, should we abort? + // + // * If the writer has been closed? It reduces the durability + // guarantees to abort, but may be better to avoid long wait times + // on close. + // + delay := backoff(attempt, ptw.w.writeBackoffMin(), ptw.w.writeBackoffMax()) + ptw.w.withLogger(func(log Logger) { + log.Printf("backing off %s writing %d messages to %s (partition: %d)", delay, len(batch.msgs), key.topic, key.partition) + }) + time.Sleep(delay) + } + + ptw.w.withLogger(func(log Logger) { + log.Printf("writing %d messages to %s (partition: %d)", len(batch.msgs), key.topic, key.partition) + }) + + start := time.Now() + res, err = ptw.w.produce(key, batch) + + stats.writes.observe(1) + stats.messages.observe(int64(len(batch.msgs))) + stats.bytes.observe(batch.bytes) + // stats.writeTime used to report the duration of WriteMessages, but the + // implementation was broken and reporting values in the nanoseconds + // range. In kafka-go 0.4, we recylced this value to instead report the + // duration of produce requests, and changed the stats.waitTime value to + // report the time that kafka has throttled the requests for. + stats.writeTime.observe(int64(time.Since(start))) + + if res != nil { + err = res.Error + stats.waitTime.observe(int64(res.Throttle)) + } + + if err == nil { + break + } + + stats.errors.observe(1) + + ptw.w.withErrorLogger(func(log Logger) { + log.Printf("error writing messages to %s (partition %d, attempt %d): %s", key.topic, key.partition, attempt, err) + }) + + if !isTemporary(err) && !isTransientNetworkError(err) { + break + } + } + + if res != nil { + for i := range batch.msgs { + m := &batch.msgs[i] + m.Topic = key.topic + m.Partition = int(key.partition) + m.Offset = res.BaseOffset + int64(i) + + if m.Time.IsZero() { + m.Time = res.LogAppendTime + } + } + } + + if ptw.w.Completion != nil { + ptw.w.Completion(batch.msgs, err) + } + + batch.complete(err) +} + +func (ptw *partitionWriter) close() { + ptw.mutex.Lock() + defer ptw.mutex.Unlock() + + if ptw.currBatch != nil { + batch := ptw.currBatch + ptw.queue.Put(batch) + ptw.currBatch = nil + batch.trigger() + } + + ptw.queue.Close() +} + +type writeBatch struct { + time time.Time + msgs []Message + size int + bytes int64 + ready chan struct{} + done chan struct{} + timer *time.Timer + err error // result of the batch completion +} + +func newWriteBatch(now time.Time, timeout time.Duration) *writeBatch { + return &writeBatch{ + time: now, + ready: make(chan struct{}), + done: make(chan struct{}), + timer: time.NewTimer(timeout), + } +} + +func (b *writeBatch) add(msg Message, maxSize int, maxBytes int64) bool { + bytes := int64(msg.totalSize()) + + if b.size > 0 && (b.bytes+bytes) > maxBytes { + return false + } + + if cap(b.msgs) == 0 { + b.msgs = make([]Message, 0, maxSize) + } + + b.msgs = append(b.msgs, msg) + b.size++ + b.bytes += bytes + return true +} + +func (b *writeBatch) full(maxSize int, maxBytes int64) bool { + return b.size >= maxSize || b.bytes >= maxBytes +} + +func (b *writeBatch) trigger() { + close(b.ready) +} + +func (b *writeBatch) complete(err error) { + b.err = err + close(b.done) +} + +type writerRecords struct { + msgs []Message + index int + record Record + key bytesReadCloser + value bytesReadCloser +} + +func (r *writerRecords) ReadRecord() (*Record, error) { + if r.index >= 0 && r.index < len(r.msgs) { + m := &r.msgs[r.index] + r.index++ + r.record = Record{ + Time: m.Time, + Headers: m.Headers, + } + if m.Key != nil { + r.key.Reset(m.Key) + r.record.Key = &r.key + } + if m.Value != nil { + r.value.Reset(m.Value) + r.record.Value = &r.value + } + return &r.record, nil + } + return nil, io.EOF +} + +type bytesReadCloser struct{ bytes.Reader } + +func (*bytesReadCloser) Close() error { return nil } + +// A cache of []int values passed to balancers of writers, used to amortize the +// heap allocation of the partition index lists. +// +// With hindsight, the use of `...int` to pass the partition list to Balancers +// was not the best design choice: kafka partition numbers are monotonically +// increasing, we could have simply passed the number of partitions instead. +// If we ever revisit this API, we can hopefully remove this cache. +var partitionsCache atomic.Value + +func loadCachedPartitions(numPartitions int) []int { + partitions, ok := partitionsCache.Load().([]int) + if ok && len(partitions) >= numPartitions { + return partitions[:numPartitions] + } + + const alignment = 128 + n := ((numPartitions / alignment) + 1) * alignment + + partitions = make([]int, n) + for i := range partitions { + partitions[i] = i + } + + partitionsCache.Store(partitions) + return partitions[:numPartitions] +} diff --git a/vendor/github.com/urfave/cli/v2/.flake8 b/vendor/github.com/urfave/cli/v2/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/v2/.gitignore b/vendor/github.com/urfave/cli/v2/.gitignore new file mode 100644 index 000000000..1ef91a60b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.gitignore @@ -0,0 +1,14 @@ +*.coverprofile +*.exe +*.orig +.*envrc +.envrc +.idea +# goimports is installed here if not available +/.local/ +/site/ +coverage.txt +internal/*/built-example +vendor +/cmd/urfave-cli-genflags/urfave-cli-genflags +*.exe diff --git a/vendor/github.com/urfave/cli/v2/.golangci.yaml b/vendor/github.com/urfave/cli/v2/.golangci.yaml new file mode 100644 index 000000000..89b6e8661 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/.golangci.yaml @@ -0,0 +1,4 @@ +# https://golangci-lint.run/usage/configuration/ +linters: + enable: + - misspell diff --git a/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9fee14807 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting urfave-governance@googlegroups.com, a members-only group +that is world-postable. All complaints will be reviewed and investigated and +will result in a response that is deemed necessary and appropriate to the +circumstances. The project team is obligated to maintain confidentiality with +regard to the reporter of an incident. Further details of specific enforcement +policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/urfave/cli/v2/LICENSE b/vendor/github.com/urfave/cli/v2/LICENSE new file mode 100644 index 000000000..2c84c78a1 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 urfave/cli maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/urfave/cli/v2/Makefile b/vendor/github.com/urfave/cli/v2/Makefile new file mode 100644 index 000000000..f0d41905e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/Makefile @@ -0,0 +1,26 @@ +# NOTE: this Makefile is meant to provide a simplified entry point for humans to +# run all of the critical steps to verify one's changes are harmonious in +# nature. Keeping target bodies to one line each and abstaining from make magic +# are very important so that maintainers and contributors can focus their +# attention on files that are primarily Go. + +GO_RUN_BUILD := go run internal/build/build.go + +.PHONY: all +all: generate vet test check-binary-size gfmrun yamlfmt v2diff + +# NOTE: this is a special catch-all rule to run any of the commands +# defined in internal/build/build.go with optional arguments passed +# via GFLAGS (global flags) and FLAGS (command-specific flags), e.g.: +# +# $ make test GFLAGS='--packages cli' +%: + $(GO_RUN_BUILD) $(GFLAGS) $* $(FLAGS) + +.PHONY: docs +docs: + mkdocs build + +.PHONY: serve-docs +serve-docs: + mkdocs serve diff --git a/vendor/github.com/urfave/cli/v2/README.md b/vendor/github.com/urfave/cli/v2/README.md new file mode 100644 index 000000000..9080aee41 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/README.md @@ -0,0 +1,19 @@ +# cli + +[![Run Tests](https://github.com/urfave/cli/actions/workflows/cli.yml/badge.svg?branch=v2-maint)](https://github.com/urfave/cli/actions/workflows/cli.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/urfave/cli/v2.svg)](https://pkg.go.dev/github.com/urfave/cli/v2) +[![Go Report Card](https://goreportcard.com/badge/github.com/urfave/cli/v2)](https://goreportcard.com/report/github.com/urfave/cli/v2) +[![codecov](https://codecov.io/gh/urfave/cli/branch/v2-maint/graph/badge.svg?token=t9YGWLh05g)](https://app.codecov.io/gh/urfave/cli/tree/v2-maint) + +cli is a simple, fast, and fun package for building command line apps in Go. The +goal is to enable developers to write fast and distributable command line +applications in an expressive way. + +## Documentation + +More documentation is available in [`./docs`](./docs) or the hosted +documentation site at . + +## License + +See [`LICENSE`](./LICENSE) diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go new file mode 100644 index 000000000..af072e769 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -0,0 +1,536 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +const suggestDidYouMeanTemplate = "Did you mean %q?" + +var ( + changeLogURL = "https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md" + appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) + contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." + errInvalidActionType = NewExitError("ERROR invalid Action type. "+ + fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ + fmt.Sprintf("See %s", appActionDeprecationURL), 2) + ignoreFlagPrefix = "test." // this is to ignore test flags when adding flags from other packages + + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate +) + +// App is the main structure of a cli application. It is recommended that +// an app be created with the cli.NewApp() function +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Whether this command supports arguments + Args bool + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // DefaultCommand is the (optional) name of a command + // to run if no command names are passed as CLI arguments. + DefaultCommand string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + // flagCategories contains the categorized flags and is populated on app startup + flagCategories FlagCategories + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if a usage error occurs + OnUsageError OnUsageErrorFunc + // Execute this function when an invalid flag is accessed from the context + InvalidFlagAccessHandler InvalidFlagAccessFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Reader reader to write input to (useful for tests) + Reader io.Reader + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // ExitErrHandler processes any error encountered while running an App before + // it is returned to the caller. If no function is provided, HandleExitCoder + // is used as the default behavior. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," + SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + // Enable suggestions for commands and flags + Suggest bool + // Allows global flags set by libraries which use flag.XXXVar(...) directly + // to be parsed through this library + AllowExtFlags bool + // Treat all flags as normal arguments if true + SkipFlagParsing bool + + didSetup bool + separator separatorSpec + + rootCommand *Command +} + +type SuggestFlagFunc func(flags []Flag, provided string, hideHelp bool) string + +type SuggestCommandFunc func(commands []*Command, provided string) string + +// Tries to find out when this binary was compiled. +// Returns the current time if it fails to find it. +func compileTime() time.Time { + info, err := os.Stat(os.Args[0]) + if err != nil { + return time.Now() + } + return info.ModTime() +} + +// NewApp creates a new cli Application with some reasonable defaults for Name, +// Usage, Version and Action. +func NewApp() *App { + return &App{ + Name: filepath.Base(os.Args[0]), + Usage: "A new cli application", + UsageText: "", + BashComplete: DefaultAppComplete, + Action: helpCommand.Action, + Compiled: compileTime(), + Reader: os.Stdin, + Writer: os.Stdout, + ErrWriter: os.Stderr, + } +} + +// Setup runs initialization code to ensure all data structures are ready for +// `Run` or inspection prior to `Run`. It is internally called by `Run`, but +// will return early if setup has already happened. +func (a *App) Setup() { + if a.didSetup { + return + } + + a.didSetup = true + + if a.Name == "" { + a.Name = filepath.Base(os.Args[0]) + } + + if a.HelpName == "" { + a.HelpName = a.Name + } + + if a.Usage == "" { + a.Usage = "A new cli application" + } + + if a.Version == "" { + a.HideVersion = true + } + + if a.BashComplete == nil { + a.BashComplete = DefaultAppComplete + } + + if a.Action == nil { + a.Action = helpCommand.Action + } + + if a.Compiled == (time.Time{}) { + a.Compiled = compileTime() + } + + if a.Reader == nil { + a.Reader = os.Stdin + } + + if a.Writer == nil { + a.Writer = os.Stdout + } + + if a.ErrWriter == nil { + a.ErrWriter = os.Stderr + } + + if a.AllowExtFlags { + // add global flags added by other packages + flag.VisitAll(func(f *flag.Flag) { + // skip test flags + if !strings.HasPrefix(f.Name, ignoreFlagPrefix) { + a.Flags = append(a.Flags, &extFlag{f}) + } + }) + } + + if len(a.SliceFlagSeparator) != 0 { + a.separator.customized = true + a.separator.sep = a.SliceFlagSeparator + } + + if a.DisableSliceFlagSeparator { + a.separator.customized = true + a.separator.disabled = true + } + + for _, c := range a.Commands { + cname := c.Name + if c.HelpName != "" { + cname = c.HelpName + } + c.separator = a.separator + c.HelpName = fmt.Sprintf("%s %s", a.HelpName, cname) + c.flagCategories = newFlagCategoriesFromFlags(c.Flags) + } + + if a.Command(helpCommand.Name) == nil && !a.HideHelp { + if !a.HideHelpCommand { + a.appendCommand(helpCommand) + } + + if HelpFlag != nil { + a.appendFlag(HelpFlag) + } + } + + if !a.HideVersion { + a.appendFlag(VersionFlag) + } + + a.categories = newCommandCategories() + for _, command := range a.Commands { + a.categories.AddCommand(command.Category, command) + } + sort.Sort(a.categories.(*commandCategories)) + + a.flagCategories = newFlagCategoriesFromFlags(a.Flags) + + if a.Metadata == nil { + a.Metadata = make(map[string]interface{}) + } +} + +func (a *App) newRootCommand() *Command { + return &Command{ + Name: a.Name, + Usage: a.Usage, + UsageText: a.UsageText, + Description: a.Description, + ArgsUsage: a.ArgsUsage, + BashComplete: a.BashComplete, + Before: a.Before, + After: a.After, + Action: a.Action, + OnUsageError: a.OnUsageError, + Subcommands: a.Commands, + Flags: a.Flags, + flagCategories: a.flagCategories, + HideHelp: a.HideHelp, + HideHelpCommand: a.HideHelpCommand, + UseShortOptionHandling: a.UseShortOptionHandling, + HelpName: a.HelpName, + CustomHelpTemplate: a.CustomAppHelpTemplate, + categories: a.categories, + SkipFlagParsing: a.SkipFlagParsing, + isRoot: true, + separator: a.separator, + } +} + +func (a *App) newFlagSet() (*flag.FlagSet, error) { + return flagSet(a.Name, a.Flags, a.separator) +} + +func (a *App) useShortOptionHandling() bool { + return a.UseShortOptionHandling +} + +// Run is the entry point to the cli app. Parses the arguments slice and routes +// to the proper flag/args combination +func (a *App) Run(arguments []string) (err error) { + return a.RunContext(context.Background(), arguments) +} + +// RunContext is like Run except it takes a Context that will be +// passed to its commands and sub-commands. Through this, you can +// propagate timeouts and cancellation requests +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) { + a.Setup() + + // handle the completion flag separately from the flagset since + // completion could be attempted after a flag, but before its value was put + // on the command line. this causes the flagset to interpret the completion + // flag name as the value of the flag before it which is undesirable + // note that we can only do this because the shell autocomplete function + // always appends the completion flag at the end of the command + shellComplete, arguments := checkShellCompleteFlag(a, arguments) + + cCtx := NewContext(a, nil, &Context{Context: ctx}) + cCtx.shellComplete = shellComplete + + a.rootCommand = a.newRootCommand() + cCtx.Command = a.rootCommand + + if err := checkDuplicatedCmds(a.rootCommand); err != nil { + return err + } + return a.rootCommand.Run(cCtx, arguments...) +} + +// RunAsSubcommand is for legacy/compatibility purposes only. New code should only +// use App.RunContext. This function is slated to be removed in v3. +func (a *App) RunAsSubcommand(ctx *Context) (err error) { + a.Setup() + + cCtx := NewContext(a, nil, ctx) + cCtx.shellComplete = ctx.shellComplete + + a.rootCommand = a.newRootCommand() + cCtx.Command = a.rootCommand + + return a.rootCommand.Run(cCtx, ctx.Args().Slice()...) +} + +func (a *App) suggestFlagFromError(err error, command string) (string, error) { + flag, parseErr := flagFromError(err) + if parseErr != nil { + return "", err + } + + flags := a.Flags + hideHelp := a.HideHelp + if command != "" { + cmd := a.Command(command) + if cmd == nil { + return "", err + } + flags = cmd.Flags + hideHelp = hideHelp || cmd.HideHelp + } + + if SuggestFlag == nil { + return "", err + } + suggestion := SuggestFlag(flags, flag, hideHelp) + if len(suggestion) == 0 { + return "", err + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate+"\n\n", suggestion), nil +} + +// RunAndExitOnError calls .Run() and exits non-zero if an error was returned +// +// Deprecated: instead you should return an error that fulfills cli.ExitCoder +// to cli.App.Run. This will cause the application to exit with the given error +// code in the cli.ExitCoder +func (a *App) RunAndExitOnError() { + if err := a.Run(os.Args); err != nil { + _, _ = fmt.Fprintln(a.ErrWriter, err) + OsExiter(1) + } +} + +// Command returns the named command on App. Returns nil if the command does not exist +func (a *App) Command(name string) *Command { + for _, c := range a.Commands { + if c.HasName(name) { + return c + } + } + + return nil +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (a *App) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range a.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (a *App) VisibleCommands() []*Command { + var ret []*Command + for _, command := range a.Commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlagCategories returns a slice containing all the categories with the flags they contain +func (a *App) VisibleFlagCategories() []VisibleFlagCategory { + if a.flagCategories == nil { + return []VisibleFlagCategory{} + } + return a.flagCategories.VisibleCategories() +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (a *App) VisibleFlags() []Flag { + return visibleFlags(a.Flags) +} + +func (a *App) appendFlag(fl Flag) { + if !hasFlag(a.Flags, fl) { + a.Flags = append(a.Flags, fl) + } +} + +func (a *App) appendCommand(c *Command) { + if !hasCommand(a.Commands, c) { + a.Commands = append(a.Commands, c) + } +} + +func (a *App) handleExitCoder(cCtx *Context, err error) { + if a.ExitErrHandler != nil { + a.ExitErrHandler(cCtx, err) + } else { + HandleExitCoder(err) + } +} + +func (a *App) argsWithDefaultCommand(oldArgs Args) Args { + if a.DefaultCommand != "" { + rawArgs := append([]string{a.DefaultCommand}, oldArgs.Slice()...) + newArgs := args(rawArgs) + + return &newArgs + } + + return oldArgs +} + +func runFlagActions(c *Context, fs []Flag) error { + for _, f := range fs { + isSet := false + for _, name := range f.Names() { + if c.IsSet(name) { + isSet = true + break + } + } + if isSet { + if af, ok := f.(ActionableFlag); ok { + if err := af.RunAction(c); err != nil { + return err + } + } + } + } + return nil +} + +// Author represents someone who has contributed to a cli project. +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + +// String makes Author comply to the Stringer interface, to allow an easy print in the templating process +func (a *Author) String() string { + e := "" + if a.Email != "" { + e = " <" + a.Email + ">" + } + + return fmt.Sprintf("%v%v", a.Name, e) +} + +// HandleAction attempts to figure out which Action signature was used. If +// it's an ActionFunc or a func with the legacy signature for Action, the func +// is run! +func HandleAction(action interface{}, cCtx *Context) (err error) { + switch a := action.(type) { + case ActionFunc: + return a(cCtx) + case func(*Context) error: + return a(cCtx) + case func(*Context): // deprecated function signature + a(cCtx) + return nil + } + + return errInvalidActionType +} + +func checkStringSliceIncludes(want string, sSlice []string) bool { + found := false + for _, s := range sSlice { + if want == s { + found = true + break + } + } + + return found +} diff --git a/vendor/github.com/urfave/cli/v2/args.go b/vendor/github.com/urfave/cli/v2/args.go new file mode 100644 index 000000000..bd65c17bd --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/args.go @@ -0,0 +1,54 @@ +package cli + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type args []string + +func (a *args) Get(n int) string { + if len(*a) > n { + return (*a)[n] + } + return "" +} + +func (a *args) First() string { + return a.Get(0) +} + +func (a *args) Tail() []string { + if a.Len() >= 2 { + tail := []string((*a)[1:]) + ret := make([]string, len(tail)) + copy(ret, tail) + return ret + } + return []string{} +} + +func (a *args) Len() int { + return len(*a) +} + +func (a *args) Present() bool { + return a.Len() != 0 +} + +func (a *args) Slice() []string { + ret := make([]string, len(*a)) + copy(ret, *a) + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/category.go b/vendor/github.com/urfave/cli/v2/category.go new file mode 100644 index 000000000..0986fffca --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/category.go @@ -0,0 +1,186 @@ +package cli + +import "sort" + +// CommandCategories interface allows for category manipulation +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // Categories returns a slice of categories sorted by name + Categories() []CommandCategory +} + +type commandCategories []*commandCategory + +func newCommandCategories() CommandCategories { + ret := commandCategories([]*commandCategory{}) + return &ret +} + +func (c *commandCategories) Less(i, j int) bool { + return lexicographicLess((*c)[i].Name(), (*c)[j].Name()) +} + +func (c *commandCategories) Len() int { + return len(*c) +} + +func (c *commandCategories) Swap(i, j int) { + (*c)[i], (*c)[j] = (*c)[j], (*c)[i] +} + +func (c *commandCategories) AddCommand(category string, command *Command) { + for _, commandCategory := range []*commandCategory(*c) { + if commandCategory.name == category { + commandCategory.commands = append(commandCategory.commands, command) + return + } + } + newVal := append(*c, + &commandCategory{name: category, commands: []*Command{command}}) + *c = newVal +} + +func (c *commandCategories) Categories() []CommandCategory { + ret := make([]CommandCategory, len(*c)) + for i, cat := range *c { + ret[i] = cat + } + return ret +} + +// CommandCategory is a category containing commands. +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + +type commandCategory struct { + name string + commands []*Command +} + +func (c *commandCategory) Name() string { + return c.name +} + +func (c *commandCategory) VisibleCommands() []*Command { + if c.commands == nil { + c.commands = []*Command{} + } + + var ret []*Command + for _, command := range c.commands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// FlagCategories interface allows for category manipulation +type FlagCategories interface { + // AddFlags adds a flag to a category, creating a new category if necessary. + AddFlag(category string, fl Flag) + // VisibleCategories returns a slice of visible flag categories sorted by name + VisibleCategories() []VisibleFlagCategory +} + +type defaultFlagCategories struct { + m map[string]*defaultVisibleFlagCategory +} + +func newFlagCategories() FlagCategories { + return &defaultFlagCategories{ + m: map[string]*defaultVisibleFlagCategory{}, + } +} + +func newFlagCategoriesFromFlags(fs []Flag) FlagCategories { + fc := newFlagCategories() + + var categorized bool + for _, fl := range fs { + if cf, ok := fl.(CategorizableFlag); ok { + if cat := cf.GetCategory(); cat != "" && cf.IsVisible() { + fc.AddFlag(cat, cf) + categorized = true + } + } + } + + if categorized { + for _, fl := range fs { + if cf, ok := fl.(CategorizableFlag); ok { + if cf.GetCategory() == "" && cf.IsVisible() { + fc.AddFlag("", fl) + } + } + } + } + + return fc +} + +func (f *defaultFlagCategories) AddFlag(category string, fl Flag) { + if _, ok := f.m[category]; !ok { + f.m[category] = &defaultVisibleFlagCategory{name: category, m: map[string]Flag{}} + } + + f.m[category].m[fl.String()] = fl +} + +func (f *defaultFlagCategories) VisibleCategories() []VisibleFlagCategory { + catNames := []string{} + for name := range f.m { + catNames = append(catNames, name) + } + + sort.Strings(catNames) + + ret := make([]VisibleFlagCategory, len(catNames)) + for i, name := range catNames { + ret[i] = f.m[name] + } + + return ret +} + +// VisibleFlagCategory is a category containing flags. +type VisibleFlagCategory interface { + // Name returns the category name string + Name() string + // Flags returns a slice of VisibleFlag sorted by name + Flags() []VisibleFlag +} + +type defaultVisibleFlagCategory struct { + name string + m map[string]Flag +} + +func (fc *defaultVisibleFlagCategory) Name() string { + return fc.name +} + +func (fc *defaultVisibleFlagCategory) Flags() []VisibleFlag { + vfNames := []string{} + for flName, fl := range fc.m { + if vf, ok := fl.(VisibleFlag); ok { + if vf.IsVisible() { + vfNames = append(vfNames, flName) + } + } + } + + sort.Strings(vfNames) + + ret := make([]VisibleFlag, len(vfNames)) + for i, flName := range vfNames { + ret[i] = fc.m[flName].(VisibleFlag) + } + + return ret +} diff --git a/vendor/github.com/urfave/cli/v2/cli.go b/vendor/github.com/urfave/cli/v2/cli.go new file mode 100644 index 000000000..28ad0582b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/cli.go @@ -0,0 +1,25 @@ +// Package cli provides a minimal framework for creating and organizing command line +// Go applications. cli is designed to be easy to understand and write, the most simple +// cli application can be written as follows: +// +// func main() { +// (&cli.App{}).Run(os.Args) +// } +// +// Of course this application does not do much, so let's make this an actual application: +// +// func main() { +// app := &cli.App{ +// Name: "greet", +// Usage: "say a greeting", +// Action: func(c *cli.Context) error { +// fmt.Println("Greetings") +// return nil +// }, +// } +// +// app.Run(os.Args) +// } +package cli + +//go:generate make -C cmd/urfave-cli-genflags run diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go new file mode 100644 index 000000000..472c1ff44 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -0,0 +1,421 @@ +package cli + +import ( + "flag" + "fmt" + "reflect" + "sort" + "strings" +) + +// Command is a subcommand for a cli.App. +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // Whether this command supports arguments + Args bool + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + flagCategories FlagCategories + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + commandNamePath []string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string + + // categories contains the categorized commands and is populated on app startup + categories CommandCategories + + // if this is a root "special" command + isRoot bool + + separator separatorSpec +} + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int { + return len(c) +} + +func (c CommandsByName) Less(i, j int) bool { + return lexicographicLess(c[i].Name, c[j].Name) +} + +func (c CommandsByName) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// FullName returns the full name of the command. +// For subcommands this ensures that parent commands are part of the command path +func (c *Command) FullName() string { + if c.commandNamePath == nil { + return c.Name + } + return strings.Join(c.commandNamePath, " ") +} + +func (cmd *Command) Command(name string) *Command { + for _, c := range cmd.Subcommands { + if c.HasName(name) { + return c + } + } + + return nil +} + +func (c *Command) setup(ctx *Context) { + if c.Command(helpCommand.Name) == nil && !c.HideHelp { + if !c.HideHelpCommand { + c.Subcommands = append(c.Subcommands, helpCommand) + } + } + + if !c.HideHelp && HelpFlag != nil { + // append help to flags + c.appendFlag(HelpFlag) + } + + if ctx.App.UseShortOptionHandling { + c.UseShortOptionHandling = true + } + + c.categories = newCommandCategories() + for _, command := range c.Subcommands { + c.categories.AddCommand(command.Category, command) + } + sort.Sort(c.categories.(*commandCategories)) + + for _, scmd := range c.Subcommands { + if scmd.HelpName == "" { + scmd.HelpName = fmt.Sprintf("%s %s", c.HelpName, scmd.Name) + } + scmd.separator = c.separator + } + + if c.BashComplete == nil { + c.BashComplete = DefaultCompleteWithFlags(c) + } +} + +func (c *Command) Run(cCtx *Context, arguments ...string) (err error) { + + if !c.isRoot { + c.setup(cCtx) + if err := checkDuplicatedCmds(c); err != nil { + return err + } + } + + a := args(arguments) + set, err := c.parseFlags(&a, cCtx.shellComplete) + cCtx.flagSet = set + + if checkCompletions(cCtx) { + return nil + } + + if err != nil { + if c.OnUsageError != nil { + err = c.OnUsageError(cCtx, err, !c.isRoot) + cCtx.App.handleExitCoder(cCtx, err) + return err + } + _, _ = fmt.Fprintf(cCtx.App.Writer, "%s %s\n\n", "Incorrect Usage:", err.Error()) + if cCtx.App.Suggest { + if suggestion, err := c.suggestFlagFromError(err, ""); err == nil { + fmt.Fprintf(cCtx.App.Writer, "%s", suggestion) + } + } + if !c.HideHelp { + if c.isRoot { + _ = ShowAppHelp(cCtx) + } else { + _ = ShowCommandHelp(cCtx.parentContext, c.Name) + } + } + return err + } + + if checkHelp(cCtx) { + return helpCommand.Action(cCtx) + } + + if c.isRoot && !cCtx.App.HideVersion && checkVersion(cCtx) { + ShowVersion(cCtx) + return nil + } + + if c.After != nil && !cCtx.shellComplete { + defer func() { + afterErr := c.After(cCtx) + if afterErr != nil { + cCtx.App.handleExitCoder(cCtx, err) + if err != nil { + err = newMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + cerr := cCtx.checkRequiredFlags(c.Flags) + if cerr != nil { + _ = helpCommand.Action(cCtx) + return cerr + } + + if c.Before != nil && !cCtx.shellComplete { + beforeErr := c.Before(cCtx) + if beforeErr != nil { + cCtx.App.handleExitCoder(cCtx, beforeErr) + err = beforeErr + return err + } + } + + if err = runFlagActions(cCtx, c.Flags); err != nil { + return err + } + + var cmd *Command + args := cCtx.Args() + if args.Present() { + name := args.First() + cmd = c.Command(name) + if cmd == nil { + hasDefault := cCtx.App.DefaultCommand != "" + isFlagName := checkStringSliceIncludes(name, cCtx.FlagNames()) + + var ( + isDefaultSubcommand = false + defaultHasSubcommands = false + ) + + if hasDefault { + dc := cCtx.App.Command(cCtx.App.DefaultCommand) + defaultHasSubcommands = len(dc.Subcommands) > 0 + for _, dcSub := range dc.Subcommands { + if checkStringSliceIncludes(name, dcSub.Names()) { + isDefaultSubcommand = true + break + } + } + } + + if isFlagName || (hasDefault && (defaultHasSubcommands && isDefaultSubcommand)) { + argsWithDefault := cCtx.App.argsWithDefaultCommand(args) + if !reflect.DeepEqual(args, argsWithDefault) { + cmd = cCtx.App.rootCommand.Command(argsWithDefault.First()) + } + } + } + } else if c.isRoot && cCtx.App.DefaultCommand != "" { + if dc := cCtx.App.Command(cCtx.App.DefaultCommand); dc != c { + cmd = dc + } + } + + if cmd != nil { + newcCtx := NewContext(cCtx.App, nil, cCtx) + newcCtx.Command = cmd + return cmd.Run(newcCtx, cCtx.Args().Slice()...) + } + + if c.Action == nil { + c.Action = helpCommand.Action + } + + err = c.Action(cCtx) + + cCtx.App.handleExitCoder(cCtx, err) + return err +} + +func (c *Command) newFlagSet() (*flag.FlagSet, error) { + return flagSet(c.Name, c.Flags, c.separator) +} + +func (c *Command) useShortOptionHandling() bool { + return c.UseShortOptionHandling +} + +func (c *Command) suggestFlagFromError(err error, command string) (string, error) { + flag, parseErr := flagFromError(err) + if parseErr != nil { + return "", err + } + + flags := c.Flags + hideHelp := c.HideHelp + if command != "" { + cmd := c.Command(command) + if cmd == nil { + return "", err + } + flags = cmd.Flags + hideHelp = hideHelp || cmd.HideHelp + } + + suggestion := SuggestFlag(flags, flag, hideHelp) + if len(suggestion) == 0 { + return "", err + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate, suggestion) + "\n\n", nil +} + +func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + if c.SkipFlagParsing { + return set, set.Parse(append([]string{"--"}, args.Tail()...)) + } + + err = parseIter(set, c, args.Tail(), shellComplete) + if err != nil { + return nil, err + } + + err = normalizeFlags(c.Flags, set) + if err != nil { + return nil, err + } + + return set, nil +} + +// Names returns the names including short names and aliases. +func (c *Command) Names() []string { + return append([]string{c.Name}, c.Aliases...) +} + +// HasName returns true if Command.Name matches given name +func (c *Command) HasName(name string) bool { + for _, n := range c.Names() { + if n == name { + return true + } + } + return false +} + +// VisibleCategories returns a slice of categories and commands that are +// Hidden=false +func (c *Command) VisibleCategories() []CommandCategory { + ret := []CommandCategory{} + for _, category := range c.categories.Categories() { + if visible := func() CommandCategory { + if len(category.VisibleCommands()) > 0 { + return category + } + return nil + }(); visible != nil { + ret = append(ret, visible) + } + } + return ret +} + +// VisibleCommands returns a slice of the Commands with Hidden=false +func (c *Command) VisibleCommands() []*Command { + var ret []*Command + for _, command := range c.Subcommands { + if !command.Hidden { + ret = append(ret, command) + } + } + return ret +} + +// VisibleFlagCategories returns a slice containing all the visible flag categories with the flags they contain +func (c *Command) VisibleFlagCategories() []VisibleFlagCategory { + if c.flagCategories == nil { + c.flagCategories = newFlagCategoriesFromFlags(c.Flags) + } + return c.flagCategories.VisibleCategories() +} + +// VisibleFlags returns a slice of the Flags with Hidden=false +func (c *Command) VisibleFlags() []Flag { + return visibleFlags(c.Flags) +} + +func (c *Command) appendFlag(fl Flag) { + if !hasFlag(c.Flags, fl) { + c.Flags = append(c.Flags, fl) + } +} + +func hasCommand(commands []*Command, command *Command) bool { + for _, existing := range commands { + if command == existing { + return true + } + } + + return false +} + +func checkDuplicatedCmds(parent *Command) error { + seen := make(map[string]struct{}) + for _, c := range parent.Subcommands { + for _, name := range c.Names() { + if _, exists := seen[name]; exists { + return fmt.Errorf("parent command [%s] has duplicated subcommand name or alias: %s", parent.Name, name) + } + seen[name] = struct{}{} + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go new file mode 100644 index 000000000..8dd476521 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/context.go @@ -0,0 +1,272 @@ +package cli + +import ( + "context" + "flag" + "fmt" + "strings" +) + +// Context is a type that is passed through to +// each Handler action in a cli application. Context +// can be used to retrieve context-specific args and +// parsed command-line options. +type Context struct { + context.Context + App *App + Command *Command + shellComplete bool + flagSet *flag.FlagSet + parentContext *Context +} + +// NewContext creates a new context. For use in when invoking an App or Command action. +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { + c := &Context{App: app, flagSet: set, parentContext: parentCtx} + if parentCtx != nil { + c.Context = parentCtx.Context + c.shellComplete = parentCtx.shellComplete + if parentCtx.flagSet == nil { + parentCtx.flagSet = &flag.FlagSet{} + } + } + + c.Command = &Command{} + + if c.Context == nil { + c.Context = context.Background() + } + + return c +} + +// NumFlags returns the number of flags set +func (cCtx *Context) NumFlags() int { + return cCtx.flagSet.NFlag() +} + +// Set sets a context flag to a value. +func (cCtx *Context) Set(name, value string) error { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return fs.Set(name, value) + } + + return fmt.Errorf("no such flag -%s", name) +} + +// IsSet determines if the flag was actually set +func (cCtx *Context) IsSet(name string) bool { + + if fs := cCtx.lookupFlagSet(name); fs != nil { + isSet := false + fs.Visit(func(f *flag.Flag) { + if f.Name == name { + isSet = true + } + }) + if isSet { + return true + } + + f := cCtx.lookupFlag(name) + if f == nil { + return false + } + + if f.IsSet() { + return true + } + + // now redo flagset search on aliases + aliases := f.Names() + fs.Visit(func(f *flag.Flag) { + for _, alias := range aliases { + if f.Name == alias { + isSet = true + } + } + }) + + if isSet { + return true + } + } + + return false +} + +// LocalFlagNames returns a slice of flag names used in this context. +func (cCtx *Context) LocalFlagNames() []string { + var names []string + cCtx.flagSet.Visit(makeFlagNameVisitor(&names)) + // Check the flags which have been set via env or file + if cCtx.Command != nil && cCtx.Command.Flags != nil { + for _, f := range cCtx.Command.Flags { + if f.IsSet() { + names = append(names, f.Names()...) + } + } + } + + // Sort out the duplicates since flag could be set via multiple + // paths + m := map[string]struct{}{} + var unames []string + for _, name := range names { + if _, ok := m[name]; !ok { + m[name] = struct{}{} + unames = append(unames, name) + } + } + + return unames +} + +// FlagNames returns a slice of flag names used by the this context and all of +// its parent contexts. +func (cCtx *Context) FlagNames() []string { + var names []string + for _, pCtx := range cCtx.Lineage() { + names = append(names, pCtx.LocalFlagNames()...) + } + return names +} + +// Lineage returns *this* context and all of its ancestor contexts in order from +// child to parent +func (cCtx *Context) Lineage() []*Context { + var lineage []*Context + + for cur := cCtx; cur != nil; cur = cur.parentContext { + lineage = append(lineage, cur) + } + + return lineage +} + +// Count returns the num of occurrences of this flag +func (cCtx *Context) Count(name string) int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + if cf, ok := fs.Lookup(name).Value.(Countable); ok { + return cf.Count() + } + } + return 0 +} + +// Value returns the value of the flag corresponding to `name` +func (cCtx *Context) Value(name string) interface{} { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return fs.Lookup(name).Value.(flag.Getter).Get() + } + return nil +} + +// Args returns the command line arguments associated with the context. +func (cCtx *Context) Args() Args { + ret := args(cCtx.flagSet.Args()) + return &ret +} + +// NArg returns the number of the command line arguments. +func (cCtx *Context) NArg() int { + return cCtx.Args().Len() +} + +func (cCtx *Context) lookupFlag(name string) Flag { + for _, c := range cCtx.Lineage() { + if c.Command == nil { + continue + } + + for _, f := range c.Command.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + if cCtx.App != nil { + for _, f := range cCtx.App.Flags { + for _, n := range f.Names() { + if n == name { + return f + } + } + } + } + + return nil +} + +func (cCtx *Context) lookupFlagSet(name string) *flag.FlagSet { + for _, c := range cCtx.Lineage() { + if c.flagSet == nil { + continue + } + if f := c.flagSet.Lookup(name); f != nil { + return c.flagSet + } + } + cCtx.onInvalidFlag(name) + return nil +} + +func (cCtx *Context) checkRequiredFlags(flags []Flag) requiredFlagsErr { + var missingFlags []string + for _, f := range flags { + if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { + var flagPresent bool + var flagName string + + flagNames := f.Names() + flagName = flagNames[0] + + for _, key := range flagNames { + if cCtx.IsSet(strings.TrimSpace(key)) { + flagPresent = true + } + } + + if !flagPresent && flagName != "" { + missingFlags = append(missingFlags, flagName) + } + } + } + + if len(missingFlags) != 0 { + return &errRequiredFlags{missingFlags: missingFlags} + } + + return nil +} + +func (cCtx *Context) onInvalidFlag(name string) { + for cCtx != nil { + if cCtx.App != nil && cCtx.App.InvalidFlagAccessHandler != nil { + cCtx.App.InvalidFlagAccessHandler(cCtx, name) + break + } + cCtx = cCtx.parentContext + } +} + +func makeFlagNameVisitor(names *[]string) func(*flag.Flag) { + return func(f *flag.Flag) { + nameParts := strings.Split(f.Name, ",") + name := strings.TrimSpace(nameParts[0]) + + for _, part := range nameParts { + part = strings.TrimSpace(part) + if len(part) > len(name) { + name = part + } + } + + if name != "" { + *names = append(*names, name) + } + } +} diff --git a/vendor/github.com/urfave/cli/v2/docs.go b/vendor/github.com/urfave/cli/v2/docs.go new file mode 100644 index 000000000..6cd0624ae --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/docs.go @@ -0,0 +1,203 @@ +//go:build !urfave_cli_no_docs +// +build !urfave_cli_no_docs + +package cli + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" + "text/template" + + "github.com/cpuguy83/go-md2man/v2/md2man" +) + +// ToMarkdown creates a markdown string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMarkdown() (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w, 0); err != nil { + return "", err + } + return w.String(), nil +} + +// ToMan creates a man page string with section number for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToManWithSection(sectionNumber int) (string, error) { + var w bytes.Buffer + if err := a.writeDocTemplate(&w, sectionNumber); err != nil { + return "", err + } + man := md2man.Render(w.Bytes()) + return string(man), nil +} + +// ToMan creates a man page string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToMan() (string, error) { + man, err := a.ToManWithSection(8) + return man, err +} + +type cliTemplate struct { + App *App + SectionNum int + Commands []string + GlobalArgs []string + SynopsisArgs []string +} + +func (a *App) writeDocTemplate(w io.Writer, sectionNum int) error { + const name = "cli" + t, err := template.New(name).Parse(MarkdownDocTemplate) + if err != nil { + return err + } + return t.ExecuteTemplate(w, name, &cliTemplate{ + App: a, + SectionNum: sectionNum, + Commands: prepareCommands(a.Commands, 0), + GlobalArgs: prepareArgsWithValues(a.VisibleFlags()), + SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()), + }) +} + +func prepareCommands(commands []*Command, level int) []string { + var coms []string + for _, command := range commands { + if command.Hidden { + continue + } + + usageText := prepareUsageText(command) + + usage := prepareUsage(command, usageText) + + prepared := fmt.Sprintf("%s %s\n\n%s%s", + strings.Repeat("#", level+2), + strings.Join(command.Names(), ", "), + usage, + usageText, + ) + + flags := prepareArgsWithValues(command.VisibleFlags()) + if len(flags) > 0 { + prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) + } + + coms = append(coms, prepared) + + // recursively iterate subcommands + if len(command.Subcommands) > 0 { + coms = append( + coms, + prepareCommands(command.Subcommands, level+1)..., + ) + } + } + + return coms +} + +func prepareArgsWithValues(flags []Flag) []string { + return prepareFlags(flags, ", ", "**", "**", `""`, true) +} + +func prepareArgsSynopsis(flags []Flag) []string { + return prepareFlags(flags, "|", "[", "]", "[value]", false) +} + +func prepareFlags( + flags []Flag, + sep, opener, closer, value string, + addDetails bool, +) []string { + args := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + modifiedArg := opener + + for _, s := range flag.Names() { + trimmed := strings.TrimSpace(s) + if len(modifiedArg) > len(opener) { + modifiedArg += sep + } + if len(trimmed) > 1 { + modifiedArg += fmt.Sprintf("--%s", trimmed) + } else { + modifiedArg += fmt.Sprintf("-%s", trimmed) + } + } + modifiedArg += closer + if flag.TakesValue() { + modifiedArg += fmt.Sprintf("=%s", value) + } + + if addDetails { + modifiedArg += flagDetails(flag) + } + + args = append(args, modifiedArg+"\n") + + } + sort.Strings(args) + return args +} + +// flagDetails returns a string containing the flags metadata +func flagDetails(flag DocGenerationFlag) string { + description := flag.GetUsage() + if flag.TakesValue() { + defaultText := flag.GetDefaultText() + if defaultText == "" { + defaultText = flag.GetValue() + } + if defaultText != "" { + description += " (default: " + defaultText + ")" + } + } + return ": " + description +} + +func prepareUsageText(command *Command) string { + if command.UsageText == "" { + return "" + } + + // Remove leading and trailing newlines + preparedUsageText := strings.Trim(command.UsageText, "\n") + + var usageText string + if strings.Contains(preparedUsageText, "\n") { + // Format multi-line string as a code block using the 4 space schema to allow for embedded markdown such + // that it will not break the continuous code block. + for _, ln := range strings.Split(preparedUsageText, "\n") { + usageText += fmt.Sprintf(" %s\n", ln) + } + } else { + // Style a single line as a note + usageText = fmt.Sprintf(">%s\n", preparedUsageText) + } + + return usageText +} + +func prepareUsage(command *Command, usageText string) string { + if command.Usage == "" { + return "" + } + + usage := command.Usage + "\n" + // Add a newline to the Usage IFF there is a UsageText + if usageText != "" { + usage += "\n" + } + + return usage +} diff --git a/vendor/github.com/urfave/cli/v2/errors.go b/vendor/github.com/urfave/cli/v2/errors.go new file mode 100644 index 000000000..a818727db --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/errors.go @@ -0,0 +1,178 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OsExiter is the function used when the app exits. If not set defaults to os.Exit. +var OsExiter = os.Exit + +// ErrWriter is used to write errors to the user. This can be anything +// implementing the io.Writer interface and defaults to os.Stderr. +var ErrWriter io.Writer = os.Stderr + +// MultiError is an error that wraps multiple errors. +type MultiError interface { + error + Errors() []error +} + +// newMultiError creates a new MultiError. Pass in one or more errors. +func newMultiError(err ...error) MultiError { + ret := multiError(err) + return &ret +} + +type multiError []error + +// Error implements the error interface. +func (m *multiError) Error() string { + errs := make([]string, len(*m)) + for i, err := range *m { + errs[i] = err.Error() + } + + return strings.Join(errs, "\n") +} + +// Errors returns a copy of the errors slice +func (m *multiError) Errors() []error { + errs := make([]error, len(*m)) + for _, err := range *m { + errs = append(errs, err) + } + return errs +} + +type requiredFlagsErr interface { + error + getMissingFlags() []string +} + +type errRequiredFlags struct { + missingFlags []string +} + +func (e *errRequiredFlags) Error() string { + numberOfMissingFlags := len(e.missingFlags) + if numberOfMissingFlags == 1 { + return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) + } + joinedMissingFlags := strings.Join(e.missingFlags, ", ") + return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) +} + +func (e *errRequiredFlags) getMissingFlags() []string { + return e.missingFlags +} + +// ErrorFormatter is the interface that will suitably format the error output +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + +// ExitCoder is the interface checked by `App` and `Command` for a custom exit +// code +type ExitCoder interface { + error + ExitCode() int +} + +type exitError struct { + exitCode int + err error +} + +// NewExitError calls Exit to create a new ExitCoder. +// +// Deprecated: This function is a duplicate of Exit and will eventually be removed. +func NewExitError(message interface{}, exitCode int) ExitCoder { + return Exit(message, exitCode) +} + +// Exit wraps a message and exit code into an error, which by default is +// handled with a call to os.Exit during default error handling. +// +// This is the simplest way to trigger a non-zero exit code for an App without +// having to call os.Exit manually. During testing, this behavior can be avoided +// by overriding the ExitErrHandler function on an App or the package-global +// OsExiter function. +func Exit(message interface{}, exitCode int) ExitCoder { + var err error + + switch e := message.(type) { + case ErrorFormatter: + err = fmt.Errorf("%+v", message) + case error: + err = e + default: + err = fmt.Errorf("%+v", message) + } + + return &exitError{ + err: err, + exitCode: exitCode, + } +} + +func (ee *exitError) Error() string { + return ee.err.Error() +} + +func (ee *exitError) ExitCode() int { + return ee.exitCode +} + +func (ee *exitError) Unwrap() error { + return ee.err +} + +// HandleExitCoder handles errors implementing ExitCoder by printing their +// message and calling OsExiter with the given exit code. +// +// If the given error instead implements MultiError, each error will be checked +// for the ExitCoder interface, and OsExiter will be called with the last exit +// code found, or exit code 1 if no ExitCoder is found. +// +// This function is the default error-handling behavior for an App. +func HandleExitCoder(err error) { + if err == nil { + return + } + + if exitErr, ok := err.(ExitCoder); ok { + if err.Error() != "" { + if _, ok := exitErr.(ErrorFormatter); ok { + _, _ = fmt.Fprintf(ErrWriter, "%+v\n", err) + } else { + _, _ = fmt.Fprintln(ErrWriter, err) + } + } + OsExiter(exitErr.ExitCode()) + return + } + + if multiErr, ok := err.(MultiError); ok { + code := handleMultiError(multiErr) + OsExiter(code) + return + } +} + +func handleMultiError(multiErr MultiError) int { + code := 1 + for _, merr := range multiErr.Errors() { + if multiErr2, ok := merr.(MultiError); ok { + code = handleMultiError(multiErr2) + } else if merr != nil { + fmt.Fprintln(ErrWriter, merr) + if exitErr, ok := merr.(ExitCoder); ok { + code = exitErr.ExitCode() + } + } + } + return code +} diff --git a/vendor/github.com/urfave/cli/v2/fish.go b/vendor/github.com/urfave/cli/v2/fish.go new file mode 100644 index 000000000..909dfc5a2 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/fish.go @@ -0,0 +1,196 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// ToFishCompletion creates a fish completion string for the `*App` +// The function errors if either parsing or writing of the string fails. +func (a *App) ToFishCompletion() (string, error) { + var w bytes.Buffer + if err := a.writeFishCompletionTemplate(&w); err != nil { + return "", err + } + return w.String(), nil +} + +type fishCompletionTemplate struct { + App *App + Completions []string + AllCommands []string +} + +func (a *App) writeFishCompletionTemplate(w io.Writer) error { + const name = "cli" + t, err := template.New(name).Parse(FishCompletionTemplate) + if err != nil { + return err + } + allCommands := []string{} + + // Add global flags + completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) + + // Add help flag + if !a.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., + ) + } + + // Add version flag + if !a.HideVersion { + completions = append( + completions, + a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., + ) + } + + // Add commands and their flags + completions = append( + completions, + a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., + ) + + return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ + App: a, + Completions: completions, + AllCommands: allCommands, + }) +} + +func (a *App) prepareFishCommands(commands []*Command, allCommands *[]string, previousCommands []string) []string { + completions := []string{} + for _, command := range commands { + if command.Hidden { + continue + } + + var completion strings.Builder + completion.WriteString(fmt.Sprintf( + "complete -r -c %s -n '%s' -a '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + strings.Join(command.Names(), " "), + )) + + if command.Usage != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(command.Usage))) + } + + if !command.HideHelp { + completions = append( + completions, + a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., + ) + } + + *allCommands = append(*allCommands, command.Names()...) + completions = append(completions, completion.String()) + completions = append( + completions, + a.prepareFishFlags(command.VisibleFlags(), command.Names())..., + ) + + // recursively iterate subcommands + if len(command.Subcommands) > 0 { + completions = append( + completions, + a.prepareFishCommands( + command.Subcommands, allCommands, command.Names(), + )..., + ) + } + } + + return completions +} + +func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { + completions := []string{} + for _, f := range flags { + flag, ok := f.(DocGenerationFlag) + if !ok { + continue + } + + completion := &strings.Builder{} + completion.WriteString(fmt.Sprintf( + "complete -c %s -n '%s'", + a.Name, + a.fishSubcommandHelper(previousCommands), + )) + + fishAddFileFlag(f, completion) + + for idx, opt := range flag.Names() { + if idx == 0 { + completion.WriteString(fmt.Sprintf( + " -l %s", strings.TrimSpace(opt), + )) + } else { + completion.WriteString(fmt.Sprintf( + " -s %s", strings.TrimSpace(opt), + )) + + } + } + + if flag.TakesValue() { + completion.WriteString(" -r") + } + + if flag.GetUsage() != "" { + completion.WriteString(fmt.Sprintf(" -d '%s'", + escapeSingleQuotes(flag.GetUsage()))) + } + + completions = append(completions, completion.String()) + } + + return completions +} + +func fishAddFileFlag(flag Flag, completion *strings.Builder) { + switch f := flag.(type) { + case *GenericFlag: + if f.TakesFile { + return + } + case *StringFlag: + if f.TakesFile { + return + } + case *StringSliceFlag: + if f.TakesFile { + return + } + case *PathFlag: + if f.TakesFile { + return + } + } + completion.WriteString(" -f") +} + +func (a *App) fishSubcommandHelper(allCommands []string) string { + fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) + if len(allCommands) > 0 { + fishHelper = fmt.Sprintf( + "__fish_seen_subcommand_from %s", + strings.Join(allCommands, " "), + ) + } + return fishHelper + +} + +func escapeSingleQuotes(input string) string { + return strings.Replace(input, `'`, `\'`, -1) +} diff --git a/vendor/github.com/urfave/cli/v2/flag-spec.yaml b/vendor/github.com/urfave/cli/v2/flag-spec.yaml new file mode 100644 index 000000000..03d82e701 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag-spec.yaml @@ -0,0 +1,131 @@ +# NOTE: this file is used by the tool defined in +# ./cmd/urfave-cli-genflags/main.go which uses the +# `Spec` type that maps to this file structure. +flag_types: + bool: + struct_fields: + - name: Count + type: int + pointer: true + - name: DisableDefaultText + type: bool + - name: Action + type: "func(*Context, bool) error" + float64: + struct_fields: + - name: Action + type: "func(*Context, float64) error" + Float64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []float64) error" + int: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, int) error" + IntSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []int) error" + int64: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, int64) error" + Int64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []int64) error" + uint: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, uint) error" + UintSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []uint) error" + uint64: + struct_fields: + - name: Base + type: int + - name: Action + type: "func(*Context, uint64) error" + Uint64Slice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: Action + type: "func(*Context, []uint64) error" + string: + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, string) error" + StringSlice: + value_pointer: true + skip_interfaces: + - fmt.Stringer + struct_fields: + - name: separator + type: separatorSpec + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, []string) error" + - name: KeepSpace + type: bool + time.Duration: + struct_fields: + - name: Action + type: "func(*Context, time.Duration) error" + Timestamp: + value_pointer: true + struct_fields: + - name: Layout + type: string + - name: Timezone + type: "*time.Location" + - name: Action + type: "func(*Context, *time.Time) error" + Generic: + no_destination_pointer: true + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, interface{}) error" + Path: + struct_fields: + - name: TakesFile + type: bool + - name: Action + type: "func(*Context, Path) error" diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go new file mode 100644 index 000000000..4d04de3da --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag.go @@ -0,0 +1,419 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strings" + "syscall" + "time" +) + +const defaultPlaceholder = "value" + +const ( + defaultSliceFlagSeparator = "," + disableSliceFlagSeparator = false +) + +var ( + slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano()) + + commaWhitespace = regexp.MustCompile("[, ]+.*") +) + +// BashCompletionFlag enables bash-completion for all commands and subcommands +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + +// VersionFlag prints the version for the application +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", + DisableDefaultText: true, +} + +// HelpFlag prints the help for all commands and subcommands. +// Set to nil to disable the flag. The subcommand +// will still be added unless HideHelp or HideHelpCommand is set to true. +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", + DisableDefaultText: true, +} + +// FlagStringer converts a flag definition to a string. This is used by help +// to display a flag. +var FlagStringer FlagStringFunc = stringifyFlag + +// Serializer is used to circumvent the limitations of flag.FlagSet.Set +type Serializer interface { + Serialize() string +} + +// FlagNamePrefixer converts a full flag name and its placeholder into the help +// message flag prefix. This is used by the default FlagStringer. +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + +// FlagEnvHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + +// FlagFileHinter annotates flag help message with the environment variable +// details. This is used by the default FlagStringer. +var FlagFileHinter FlagFileHintFunc = withFileHint + +// FlagsByName is a slice of Flag. +type FlagsByName []Flag + +func (f FlagsByName) Len() int { + return len(f) +} + +func (f FlagsByName) Less(i, j int) bool { + if len(f[j].Names()) == 0 { + return false + } else if len(f[i].Names()) == 0 { + return true + } + return lexicographicLess(f[i].Names()[0], f[j].Names()[0]) +} + +func (f FlagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// ActionableFlag is an interface that wraps Flag interface and RunAction operation. +type ActionableFlag interface { + Flag + RunAction(*Context) error +} + +// Flag is a common interface related to parsing flags in cli. +// For more advanced flag parsing techniques, it is recommended that +// this interface be implemented. +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + +// RequiredFlag is an interface that allows us to mark flags as required +// it allows flags required flags to be backwards compatible with the Flag interface +type RequiredFlag interface { + Flag + + IsRequired() bool +} + +// DocGenerationFlag is an interface that allows documentation generation for the flag +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string + + // GetDefaultText returns the default text for this flag + GetDefaultText() string + + // GetEnvVars returns the env vars for this flag + GetEnvVars() []string +} + +// DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags. +type DocGenerationSliceFlag interface { + DocGenerationFlag + + // IsSliceFlag returns true for flags that can be given multiple times. + IsSliceFlag() bool +} + +// VisibleFlag is an interface that allows to check if a flag is visible +type VisibleFlag interface { + Flag + + // IsVisible returns true if the flag is not hidden, otherwise false + IsVisible() bool +} + +// CategorizableFlag is an interface that allows us to potentially +// use a flag in a categorized representation. +type CategorizableFlag interface { + VisibleFlag + + GetCategory() string +} + +// Countable is an interface to enable detection of flag values which support +// repetitive flags +type Countable interface { + Count() int +} + +func flagSet(name string, flags []Flag, spec separatorSpec) (*flag.FlagSet, error) { + set := flag.NewFlagSet(name, flag.ContinueOnError) + + for _, f := range flags { + if c, ok := f.(customizedSeparator); ok { + c.WithSeparatorSpec(spec) + } + if err := f.Apply(set); err != nil { + return nil, err + } + } + set.SetOutput(io.Discard) + return set, nil +} + +func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { + switch ff.Value.(type) { + case Serializer: + _ = set.Set(name, ff.Value.(Serializer).Serialize()) + default: + _ = set.Set(name, ff.Value.String()) + } +} + +func normalizeFlags(flags []Flag, set *flag.FlagSet) error { + visited := make(map[string]bool) + set.Visit(func(f *flag.Flag) { + visited[f.Name] = true + }) + for _, f := range flags { + parts := f.Names() + if len(parts) == 1 { + continue + } + var ff *flag.Flag + for _, name := range parts { + name = strings.Trim(name, " ") + if visited[name] { + if ff != nil { + return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) + } + ff = set.Lookup(name) + } + } + if ff == nil { + continue + } + for _, name := range parts { + name = strings.Trim(name, " ") + if !visited[name] { + copyFlag(name, ff, set) + } + } + } + return nil +} + +func visibleFlags(fl []Flag) []Flag { + var visible []Flag + for _, f := range fl { + if vf, ok := f.(VisibleFlag); ok && vf.IsVisible() { + visible = append(visible, f) + } + } + return visible +} + +func prefixFor(name string) (prefix string) { + if len(name) == 1 { + prefix = "-" + } else { + prefix = "--" + } + + return +} + +// Returns the placeholder, if any, and the unquoted usage string. +func unquoteUsage(usage string) (string, string) { + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name := usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break + } + } + return "", usage +} + +func prefixedNames(names []string, placeholder string) string { + var prefixed string + for i, name := range names { + if name == "" { + continue + } + + prefixed += prefixFor(name) + name + if placeholder != "" { + prefixed += " " + placeholder + } + if i < len(names)-1 { + prefixed += ", " + } + } + return prefixed +} + +func envFormat(envVars []string, prefix, sep, suffix string) string { + if len(envVars) > 0 { + return fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(envVars, sep), suffix) + } + return "" +} + +func defaultEnvFormat(envVars []string) string { + return envFormat(envVars, "$", ", $", "") +} + +func withEnvHint(envVars []string, str string) string { + envText := "" + if runtime.GOOS != "windows" || os.Getenv("PSHOME") != "" { + envText = defaultEnvFormat(envVars) + } else { + envText = envFormat(envVars, "%", "%, %", "%") + } + return str + envText +} + +func FlagNames(name string, aliases []string) []string { + var ret []string + + for _, part := range append([]string{name}, aliases...) { + // v1 -> v2 migration warning zone: + // Strip off anything after the first found comma or space, which + // *hopefully* makes it a tiny bit more obvious that unexpected behavior is + // caused by using the v1 form of stringly typed "Name". + ret = append(ret, commaWhitespace.ReplaceAllString(part, "")) + } + + return ret +} + +func withFileHint(filePath, str string) string { + fileText := "" + if filePath != "" { + fileText = fmt.Sprintf(" [%s]", filePath) + } + return str + fileText +} + +func formatDefault(format string) string { + return " (default: " + format + ")" +} + +func stringifyFlag(f Flag) string { + // enforce DocGeneration interface on flags to avoid reflection + df, ok := f.(DocGenerationFlag) + if !ok { + return "" + } + + placeholder, usage := unquoteUsage(df.GetUsage()) + needsPlaceholder := df.TakesValue() + + if needsPlaceholder && placeholder == "" { + placeholder = defaultPlaceholder + } + + defaultValueString := "" + + // set default text for all flags except bool flags + // for bool flags display default text if DisableDefaultText is not + // set + if bf, ok := f.(*BoolFlag); !ok || !bf.DisableDefaultText { + if s := df.GetDefaultText(); s != "" { + defaultValueString = fmt.Sprintf(formatDefault("%s"), s) + } + } + + usageWithDefault := strings.TrimSpace(usage + defaultValueString) + + pn := prefixedNames(df.Names(), placeholder) + sliceFlag, ok := f.(DocGenerationSliceFlag) + if ok && sliceFlag.IsSliceFlag() { + pn = pn + " [ " + pn + " ]" + } + + return withEnvHint(df.GetEnvVars(), fmt.Sprintf("%s\t%s", pn, usageWithDefault)) +} + +func hasFlag(flags []Flag, fl Flag) bool { + for _, existing := range flags { + if fl == existing { + return true + } + } + + return false +} + +// Return the first value from a list of environment variables and files +// (which may or may not exist), a description of where the value was found, +// and a boolean which is true if a value was found. +func flagFromEnvOrFile(envVars []string, filePath string) (value string, fromWhere string, found bool) { + for _, envVar := range envVars { + envVar = strings.TrimSpace(envVar) + if value, found := syscall.Getenv(envVar); found { + return value, fmt.Sprintf("environment variable %q", envVar), true + } + } + for _, fileVar := range strings.Split(filePath, ",") { + if fileVar != "" { + if data, err := os.ReadFile(fileVar); err == nil { + return string(data), fmt.Sprintf("file %q", filePath), true + } + } + } + return "", "", false +} + +type customizedSeparator interface { + WithSeparatorSpec(separatorSpec) +} + +type separatorSpec struct { + sep string + disabled bool + customized bool +} + +func (s separatorSpec) flagSplitMultiValues(val string) []string { + var ( + disabled bool = s.disabled + sep string = s.sep + ) + if !s.customized { + disabled = disableSliceFlagSeparator + sep = defaultSliceFlagSeparator + } + if disabled { + return []string{val} + } + + return strings.Split(val, sep) +} diff --git a/vendor/github.com/urfave/cli/v2/flag_bool.go b/vendor/github.com/urfave/cli/v2/flag_bool.go new file mode 100644 index 000000000..01862ea76 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_bool.go @@ -0,0 +1,178 @@ +package cli + +import ( + "errors" + "flag" + "fmt" + "strconv" +) + +// boolValue needs to implement the boolFlag internal interface in flag +// to be able to capture bool fields and values +// +// type boolFlag interface { +// Value +// IsBoolFlag() bool +// } +type boolValue struct { + destination *bool + count *int +} + +func newBoolValue(val bool, p *bool, count *int) *boolValue { + *p = val + return &boolValue{ + destination: p, + count: count, + } +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + err = errors.New("parse error") + return err + } + *b.destination = v + if b.count != nil { + *b.count = *b.count + 1 + } + return err +} + +func (b *boolValue) Get() interface{} { return *b.destination } + +func (b *boolValue) String() string { + if b.destination != nil { + return strconv.FormatBool(*b.destination) + } + return strconv.FormatBool(false) +} + +func (b *boolValue) IsBoolFlag() bool { return true } + +func (b *boolValue) Count() int { + if b.count != nil && *b.count > 0 { + return *b.count + } + return 0 +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *BoolFlag) TakesValue() bool { + return false +} + +// GetUsage returns the usage string for the flag +func (f *BoolFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *BoolFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *BoolFlag) GetValue() string { + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *BoolFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *BoolFlag) GetEnvVars() []string { + return f.EnvVars +} + +// RunAction executes flag action if set +func (f *BoolFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Bool(f.Name)) + } + + return nil +} + +// Apply populates the flag given the flag set and environment +func (f *BoolFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valBool, err := strconv.ParseBool(val) + + if err != nil { + return fmt.Errorf("could not parse %q as bool value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valBool + } else { + // empty value implies that the env is defined but set to empty string, we have to assume that this is + // what the user wants. If user doesnt want this then the env needs to be deleted or the flag removed from + // file + f.Value = false + } + f.HasBeenSet = true + } + + count := f.Count + dest := f.Destination + + if count == nil { + count = new(int) + } + + // since count will be incremented for each alias as well + // subtract number of aliases from overall count + *count -= len(f.Aliases) + + if dest == nil { + dest = new(bool) + } + + for _, name := range f.Names() { + value := newBoolValue(f.Value, dest, count) + set.Var(value, name, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *BoolFlag) Get(ctx *Context) bool { + return ctx.Bool(f.Name) +} + +// Bool looks up the value of a local BoolFlag, returns +// false if not found +func (cCtx *Context) Bool(name string) bool { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupBool(name, fs) + } + return false +} + +func lookupBool(name string, set *flag.FlagSet) bool { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseBool(f.Value.String()) + if err != nil { + return false + } + return parsed + } + return false +} diff --git a/vendor/github.com/urfave/cli/v2/flag_duration.go b/vendor/github.com/urfave/cli/v2/flag_duration.go new file mode 100644 index 000000000..e600cc30a --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_duration.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *DurationFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *DurationFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *DurationFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *DurationFlag) GetValue() string { + return f.Value.String() +} + +// GetDefaultText returns the default text for this flag +func (f *DurationFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return f.defaultValue.String() + } + return f.Value.String() +} + +// GetEnvVars returns the env vars for this flag +func (f *DurationFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *DurationFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valDuration, err := time.ParseDuration(val) + + if err != nil { + return fmt.Errorf("could not parse %q as duration value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valDuration + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.DurationVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Duration(name, f.Value, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *DurationFlag) Get(ctx *Context) time.Duration { + return ctx.Duration(f.Name) +} + +// RunAction executes flag action if set +func (f *DurationFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Duration(f.Name)) + } + + return nil +} + +// Duration looks up the value of a local DurationFlag, returns +// 0 if not found +func (cCtx *Context) Duration(name string) time.Duration { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupDuration(name, fs) + } + return 0 +} + +func lookupDuration(name string, set *flag.FlagSet) time.Duration { + f := set.Lookup(name) + if f != nil { + parsed, err := time.ParseDuration(f.Value.String()) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_ext.go b/vendor/github.com/urfave/cli/v2/flag_ext.go new file mode 100644 index 000000000..64da59ea9 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_ext.go @@ -0,0 +1,48 @@ +package cli + +import "flag" + +type extFlag struct { + f *flag.Flag +} + +func (e *extFlag) Apply(fs *flag.FlagSet) error { + fs.Var(e.f.Value, e.f.Name, e.f.Usage) + return nil +} + +func (e *extFlag) Names() []string { + return []string{e.f.Name} +} + +func (e *extFlag) IsSet() bool { + return false +} + +func (e *extFlag) String() string { + return FlagStringer(e) +} + +func (e *extFlag) IsVisible() bool { + return true +} + +func (e *extFlag) TakesValue() bool { + return false +} + +func (e *extFlag) GetUsage() string { + return e.f.Usage +} + +func (e *extFlag) GetValue() string { + return e.f.Value.String() +} + +func (e *extFlag) GetDefaultText() string { + return e.f.DefValue +} + +func (e *extFlag) GetEnvVars() []string { + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64.go b/vendor/github.com/urfave/cli/v2/flag_float64.go new file mode 100644 index 000000000..6a4de5c88 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64.go @@ -0,0 +1,107 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Float64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Float64Flag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64Flag) GetValue() string { + return fmt.Sprintf("%v", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Float64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%v", f.defaultValue) + } + return fmt.Sprintf("%v", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Float64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *Float64Flag) Apply(set *flag.FlagSet) error { + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valFloat, err := strconv.ParseFloat(val, 64) + if err != nil { + return fmt.Errorf("could not parse %q as float64 value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valFloat + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Float64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Float64(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *Float64Flag) Get(ctx *Context) float64 { + return ctx.Float64(f.Name) +} + +// RunAction executes flag action if set +func (f *Float64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Float64(f.Name)) + } + + return nil +} + +// Float64 looks up the value of a local Float64Flag, returns +// 0 if not found +func (cCtx *Context) Float64(name string) float64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupFloat64(name, fs) + } + return 0 +} + +func lookupFloat64(name string, set *flag.FlagSet) float64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseFloat(f.Value.String(), 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_float64_slice.go b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go new file mode 100644 index 000000000..0bc4612c8 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_float64_slice.go @@ -0,0 +1,216 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Float64Slice wraps []float64 to satisfy flag.Value +type Float64Slice struct { + slice []float64 + separator separatorSpec + hasBeenSet bool +} + +// NewFloat64Slice makes a *Float64Slice with default values +func NewFloat64Slice(defaults ...float64) *Float64Slice { + return &Float64Slice{slice: append([]float64{}, defaults...)} +} + +// clone allocate a copy of self object +func (f *Float64Slice) clone() *Float64Slice { + n := &Float64Slice{ + slice: make([]float64, len(f.slice)), + hasBeenSet: f.hasBeenSet, + } + copy(n.slice, f.slice) + return n +} + +func (f *Float64Slice) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Set parses the value into a float64 and appends it to the list of values +func (f *Float64Slice) Set(value string) error { + if !f.hasBeenSet { + f.slice = []float64{} + f.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice) + f.hasBeenSet = true + return nil + } + + for _, s := range f.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseFloat(strings.TrimSpace(s), 64) + if err != nil { + return err + } + + f.slice = append(f.slice, tmp) + } + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Slice) String() string { + v := f.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]float64, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows Float64Slice to fulfill Serializer +func (f *Float64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(f.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of float64s set by this flag +func (f *Float64Slice) Value() []float64 { + return f.slice +} + +// Get returns the slice of float64s set by this flag +func (f *Float64Slice) Get() interface{} { + return *f +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Float64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true if the flag takes a value, otherwise false +func (f *Float64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Float64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Float64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Float64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strings.TrimRight(strings.TrimRight(fmt.Sprintf("%f", i), "0"), ".")) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Float64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Float64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Float64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]float64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Float64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Float64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as float64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Float64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Float64SliceFlag) Get(ctx *Context) []float64 { + return ctx.Float64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Float64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Float64Slice(f.Name)) + } + + return nil +} + +// Float64Slice looks up the value of a local Float64SliceFlag, returns +// nil if not found +func (cCtx *Context) Float64Slice(name string) []float64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupFloat64Slice(name, fs) + } + return nil +} + +func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Float64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_generic.go b/vendor/github.com/urfave/cli/v2/flag_generic.go new file mode 100644 index 000000000..7528c934c --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_generic.go @@ -0,0 +1,131 @@ +package cli + +import ( + "flag" + "fmt" +) + +// Generic is a generic parseable type identified by a specific flag +type Generic interface { + Set(value string) error + String() string +} + +type stringGeneric struct { + value string +} + +func (s *stringGeneric) Set(value string) error { + s.value = value + return nil +} + +func (s *stringGeneric) String() string { + return s.value +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *GenericFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *GenericFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *GenericFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *GenericFlag) GetValue() string { + if f.Value != nil { + return f.Value.String() + } + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *GenericFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val != nil { + return val.String() + } + + return "" +} + +// GetEnvVars returns the env vars for this flag +func (f *GenericFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply takes the flagset and calls Set on the generic flag with the value +// provided by the user for parsing by the flag +func (f *GenericFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + if f.Value != nil { + f.defaultValue = &stringGeneric{value: f.Value.String()} + f.defaultValueSet = true + } + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q from %s as value for flag %s: %s", val, source, f.Name, err) + } + + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + set.Var(f.Value, name, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *GenericFlag) Get(ctx *Context) interface{} { + return ctx.Generic(f.Name) +} + +// RunAction executes flag action if set +func (f *GenericFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Generic(f.Name)) + } + + return nil +} + +// Generic looks up the value of a local GenericFlag, returns +// nil if not found +func (cCtx *Context) Generic(name string) interface{} { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupGeneric(name, fs) + } + return nil +} + +func lookupGeneric(name string, set *flag.FlagSet) interface{} { + if f := set.Lookup(name); f != nil { + return f.Value + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int.go b/vendor/github.com/urfave/cli/v2/flag_int.go new file mode 100644 index 000000000..750e7ebfc --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int.go @@ -0,0 +1,109 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *IntFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *IntFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *IntFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *IntFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseInt(val, f.Base, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = int(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.IntVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *IntFlag) Get(ctx *Context) int { + return ctx.Int(f.Name) +} + +// RunAction executes flag action if set +func (f *IntFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int(f.Name)) + } + + return nil +} + +// Int looks up the value of a local IntFlag, returns +// 0 if not found +func (cCtx *Context) Int(name string) int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt(name, fs) + } + return 0 +} + +func lookupInt(name string, set *flag.FlagSet) int { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return int(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64.go b/vendor/github.com/urfave/cli/v2/flag_int64.go new file mode 100644 index 000000000..688c26716 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Int64Flag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Int64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Int64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *Int64Flag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseInt(val, f.Base, 64) + + if err != nil { + return fmt.Errorf("could not parse %q as int value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Int64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Int64(name, f.Value, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *Int64Flag) Get(ctx *Context) int64 { + return ctx.Int64(f.Name) +} + +// RunAction executes flag action if set +func (f *Int64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int64(f.Name)) + } + + return nil +} + +// Int64 looks up the value of a local Int64Flag, returns +// 0 if not found +func (cCtx *Context) Int64(name string) int64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt64(name, fs) + } + return 0 +} + +func lookupInt64(name string, set *flag.FlagSet) int64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int64_slice.go b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go new file mode 100644 index 000000000..d45c2dd44 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int64_slice.go @@ -0,0 +1,215 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Int64Slice wraps []int64 to satisfy flag.Value +type Int64Slice struct { + slice []int64 + separator separatorSpec + hasBeenSet bool +} + +// NewInt64Slice makes an *Int64Slice with default values +func NewInt64Slice(defaults ...int64) *Int64Slice { + return &Int64Slice{slice: append([]int64{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *Int64Slice) clone() *Int64Slice { + n := &Int64Slice{ + slice: make([]int64, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +func (i *Int64Slice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Int64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + } + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Int64Slice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]int64, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows Int64Slice to fulfill Serializer +func (i *Int64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Int64Slice) Value() []int64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Int64Slice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Int64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Int64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Int64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Int64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Int64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Int64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Int64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Int64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]int64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Int64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Int64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Int64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Int64SliceFlag) Get(ctx *Context) []int64 { + return ctx.Int64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Int64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Int64Slice(f.Name)) + } + + return nil +} + +// Int64Slice looks up the value of a local Int64SliceFlag, returns +// nil if not found +func (cCtx *Context) Int64Slice(name string) []int64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupInt64Slice(name, fs) + } + return nil +} + +func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Int64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_int_slice.go b/vendor/github.com/urfave/cli/v2/flag_int_slice.go new file mode 100644 index 000000000..da9c09bc7 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_int_slice.go @@ -0,0 +1,226 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// IntSlice wraps []int to satisfy flag.Value +type IntSlice struct { + slice []int + separator separatorSpec + hasBeenSet bool +} + +// NewIntSlice makes an *IntSlice with default values +func NewIntSlice(defaults ...int) *IntSlice { + return &IntSlice{slice: append([]int{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *IntSlice) clone() *IntSlice { + n := &IntSlice{ + slice: make([]int, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *IntSlice) SetInt(value int) { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +func (i *IntSlice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// Set parses the value into an integer and appends it to the list of values +func (i *IntSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []int{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, int(tmp)) + } + + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (i *IntSlice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]int, 0) + } + return fmt.Sprintf("%#v", v) +} + +// Serialize allows IntSlice to fulfill Serializer +func (i *IntSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *IntSlice) Value() []int { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *IntSlice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *IntSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *IntSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *IntSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *IntSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *IntSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.Itoa(i)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *IntSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *IntSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *IntSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]int, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *IntSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(IntSlice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as int slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *IntSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *IntSliceFlag) Get(ctx *Context) []int { + return ctx.IntSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *IntSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.IntSlice(f.Name)) + } + + return nil +} + +// IntSlice looks up the value of a local IntSliceFlag, returns +// nil if not found +func (cCtx *Context) IntSlice(name string) []int { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupIntSlice(name, fs) + } + return nil +} + +func lookupIntSlice(name string, set *flag.FlagSet) []int { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*IntSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_path.go b/vendor/github.com/urfave/cli/v2/flag_path.go new file mode 100644 index 000000000..76cb35248 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_path.go @@ -0,0 +1,102 @@ +package cli + +import ( + "flag" + "fmt" +) + +type Path = string + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *PathFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *PathFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *PathFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *PathFlag) GetValue() string { + return f.Value +} + +// GetDefaultText returns the default text for this flag +func (f *PathFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + if val == "" { + return val + } + return fmt.Sprintf("%q", val) +} + +// GetEnvVars returns the env vars for this flag +func (f *PathFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *PathFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *PathFlag) Get(ctx *Context) string { + return ctx.Path(f.Name) +} + +// RunAction executes flag action if set +func (f *PathFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Path(f.Name)) + } + + return nil +} + +// Path looks up the value of a local PathFlag, returns +// "" if not found +func (cCtx *Context) Path(name string) string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupPath(name, fs) + } + + return "" +} + +func lookupPath(name string, set *flag.FlagSet) string { + if f := set.Lookup(name); f != nil { + return f.Value.String() + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string.go b/vendor/github.com/urfave/cli/v2/flag_string.go new file mode 100644 index 000000000..0f73e0621 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string.go @@ -0,0 +1,100 @@ +package cli + +import ( + "flag" + "fmt" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *StringFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringFlag) GetValue() string { + return f.Value +} + +// GetDefaultText returns the default text for this flag +func (f *StringFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val == "" { + return val + } + return fmt.Sprintf("%q", val) +} + +// GetEnvVars returns the env vars for this flag +func (f *StringFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *StringFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, _, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + f.Value = val + f.HasBeenSet = true + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.StringVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.String(name, f.Value, f.Usage) + } + + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *StringFlag) Get(ctx *Context) string { + return ctx.String(f.Name) +} + +// RunAction executes flag action if set +func (f *StringFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.String(f.Name)) + } + + return nil +} + +// String looks up the value of a local StringFlag, returns +// "" if not found +func (cCtx *Context) String(name string) string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupString(name, fs) + } + return "" +} + +func lookupString(name string, set *flag.FlagSet) string { + if f := set.Lookup(name); f != nil { + return f.Value.String() + } + return "" +} diff --git a/vendor/github.com/urfave/cli/v2/flag_string_slice.go b/vendor/github.com/urfave/cli/v2/flag_string_slice.go new file mode 100644 index 000000000..28f4798f5 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_string_slice.go @@ -0,0 +1,216 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// StringSlice wraps a []string to satisfy flag.Value +type StringSlice struct { + slice []string + separator separatorSpec + hasBeenSet bool + keepSpace bool +} + +// NewStringSlice creates a *StringSlice with default values +func NewStringSlice(defaults ...string) *StringSlice { + return &StringSlice{slice: append([]string{}, defaults...)} +} + +// clone allocate a copy of self object +func (s *StringSlice) clone() *StringSlice { + n := &StringSlice{ + slice: make([]string, len(s.slice)), + hasBeenSet: s.hasBeenSet, + } + copy(n.slice, s.slice) + return n +} + +// Set appends the string value to the list of values +func (s *StringSlice) Set(value string) error { + if !s.hasBeenSet { + s.slice = []string{} + s.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &s.slice) + s.hasBeenSet = true + return nil + } + + for _, t := range s.separator.flagSplitMultiValues(value) { + if !s.keepSpace { + t = strings.TrimSpace(t) + } + s.slice = append(s.slice, t) + } + + return nil +} + +func (s *StringSlice) WithSeparatorSpec(spec separatorSpec) { + s.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (s *StringSlice) String() string { + return fmt.Sprintf("%s", s.slice) +} + +// Serialize allows StringSlice to fulfill Serializer +func (s *StringSlice) Serialize() string { + jsonBytes, _ := json.Marshal(s.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of strings set by this flag +func (s *StringSlice) Value() []string { + return s.slice +} + +// Get returns the slice of strings set by this flag +func (s *StringSlice) Get() interface{} { + return *s +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *StringSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *StringSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *StringSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *StringSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *StringSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, s := range f.Value.Value() { + if len(s) > 0 { + defaultVals = append(defaultVals, strconv.Quote(s)) + } + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *StringSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *StringSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *StringSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]string, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *StringSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(StringSlice) + setValue.WithSeparatorSpec(f.separator) + } + + setValue.keepSpace = f.KeepSpace + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + for _, s := range f.separator.flagSplitMultiValues(val) { + if !f.KeepSpace { + s = strings.TrimSpace(s) + } + if err := setValue.Set(s); err != nil { + return fmt.Errorf("could not parse %q as string value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *StringSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *StringSliceFlag) Get(ctx *Context) []string { + return ctx.StringSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *StringSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.StringSlice(f.Name)) + } + + return nil +} + +// StringSlice looks up the value of a local StringSliceFlag, returns +// nil if not found +func (cCtx *Context) StringSlice(name string) []string { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupStringSlice(name, fs) + } + return nil +} + +func lookupStringSlice(name string, set *flag.FlagSet) []string { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*StringSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_timestamp.go b/vendor/github.com/urfave/cli/v2/flag_timestamp.go new file mode 100644 index 000000000..b90123087 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_timestamp.go @@ -0,0 +1,205 @@ +package cli + +import ( + "flag" + "fmt" + "time" +) + +// Timestamp wrap to satisfy golang's flag interface. +type Timestamp struct { + timestamp *time.Time + hasBeenSet bool + layout string + location *time.Location +} + +// Timestamp constructor +func NewTimestamp(timestamp time.Time) *Timestamp { + return &Timestamp{timestamp: ×tamp} +} + +// Set the timestamp value directly +func (t *Timestamp) SetTimestamp(value time.Time) { + if !t.hasBeenSet { + t.timestamp = &value + t.hasBeenSet = true + } +} + +// Set the timestamp string layout for future parsing +func (t *Timestamp) SetLayout(layout string) { + t.layout = layout +} + +// Set perceived timezone of the to-be parsed time string +func (t *Timestamp) SetLocation(loc *time.Location) { + t.location = loc +} + +// Parses the string value to timestamp +func (t *Timestamp) Set(value string) error { + var timestamp time.Time + var err error + + if t.location != nil { + timestamp, err = time.ParseInLocation(t.layout, value, t.location) + } else { + timestamp, err = time.Parse(t.layout, value) + } + + if err != nil { + return err + } + + t.timestamp = ×tamp + t.hasBeenSet = true + return nil +} + +// String returns a readable representation of this value (for usage defaults) +func (t *Timestamp) String() string { + return fmt.Sprintf("%#v", t.timestamp) +} + +// Value returns the timestamp value stored in the flag +func (t *Timestamp) Value() *time.Time { + return t.timestamp +} + +// Get returns the flag structure +func (t *Timestamp) Get() interface{} { + return *t +} + +// clone timestamp +func (t *Timestamp) clone() *Timestamp { + tc := &Timestamp{ + timestamp: nil, + hasBeenSet: t.hasBeenSet, + layout: t.layout, + location: nil, + } + if t.timestamp != nil { + tts := *t.timestamp + tc.timestamp = &tts + } + if t.location != nil { + loc := *t.location + tc.location = &loc + } + return tc +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *TimestampFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *TimestampFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *TimestampFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *TimestampFlag) GetValue() string { + if f.Value != nil && f.Value.timestamp != nil { + return f.Value.timestamp.String() + } + return "" +} + +// GetDefaultText returns the default text for this flag +func (f *TimestampFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + val := f.Value + if f.defaultValueSet { + val = f.defaultValue + } + + if val != nil && val.timestamp != nil { + return val.timestamp.String() + } + + return "" +} + +// GetEnvVars returns the env vars for this flag +func (f *TimestampFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Apply populates the flag given the flag set and environment +func (f *TimestampFlag) Apply(set *flag.FlagSet) error { + if f.Layout == "" { + return fmt.Errorf("timestamp Layout is required") + } + if f.Value == nil { + f.Value = &Timestamp{} + } + f.Value.SetLayout(f.Layout) + f.Value.SetLocation(f.Timezone) + + f.defaultValue = f.Value.clone() + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if err := f.Value.Set(val); err != nil { + return fmt.Errorf("could not parse %q as timestamp value from %s for flag %s: %s", val, source, f.Name, err) + } + f.HasBeenSet = true + } + + if f.Destination != nil { + *f.Destination = *f.Value + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Var(f.Destination, name, f.Usage) + continue + } + + set.Var(f.Value, name, f.Usage) + } + return nil +} + +// Get returns the flag’s value in the given Context. +func (f *TimestampFlag) Get(ctx *Context) *time.Time { + return ctx.Timestamp(f.Name) +} + +// RunAction executes flag action if set +func (f *TimestampFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Timestamp(f.Name)) + } + + return nil +} + +// Timestamp gets the timestamp from a flag name +func (cCtx *Context) Timestamp(name string) *time.Time { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupTimestamp(name, fs) + } + return nil +} + +// Fetches the timestamp value from the local timestampWrap +func lookupTimestamp(name string, set *flag.FlagSet) *time.Time { + f := set.Lookup(name) + if f != nil { + return (f.Value.(*Timestamp)).Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint.go b/vendor/github.com/urfave/cli/v2/flag_uint.go new file mode 100644 index 000000000..8d5b85458 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *UintFlag) GetCategory() string { + return f.Category +} + +// Apply populates the flag given the flag set and environment +func (f *UintFlag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseUint(val, f.Base, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = uint(valInt) + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.UintVar(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint(name, f.Value, f.Usage) + } + + return nil +} + +// RunAction executes flag action if set +func (f *UintFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint(f.Name)) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintFlag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *UintFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *UintFlag) GetEnvVars() []string { + return f.EnvVars +} + +// Get returns the flag’s value in the given Context. +func (f *UintFlag) Get(ctx *Context) uint { + return ctx.Uint(f.Name) +} + +// Uint looks up the value of a local UintFlag, returns +// 0 if not found +func (cCtx *Context) Uint(name string) uint { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint(name, fs) + } + return 0 +} + +func lookupUint(name string, set *flag.FlagSet) uint { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return uint(parsed) + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64.go b/vendor/github.com/urfave/cli/v2/flag_uint64.go new file mode 100644 index 000000000..c356e533b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64.go @@ -0,0 +1,108 @@ +package cli + +import ( + "flag" + "fmt" + "strconv" +) + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64Flag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64Flag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Uint64Flag) GetCategory() string { + return f.Category +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64Flag) Apply(set *flag.FlagSet) error { + // set default value so that environment wont be able to overwrite it + f.defaultValue = f.Value + f.defaultValueSet = true + + if val, source, found := flagFromEnvOrFile(f.EnvVars, f.FilePath); found { + if val != "" { + valInt, err := strconv.ParseUint(val, f.Base, 64) + if err != nil { + return fmt.Errorf("could not parse %q as uint64 value from %s for flag %s: %s", val, source, f.Name, err) + } + + f.Value = valInt + f.HasBeenSet = true + } + } + + for _, name := range f.Names() { + if f.Destination != nil { + set.Uint64Var(f.Destination, name, f.Value, f.Usage) + continue + } + set.Uint64(name, f.Value, f.Usage) + } + + return nil +} + +// RunAction executes flag action if set +func (f *Uint64Flag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint64(f.Name)) + } + + return nil +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64Flag) GetValue() string { + return fmt.Sprintf("%d", f.Value) +} + +// GetDefaultText returns the default text for this flag +func (f *Uint64Flag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + if f.defaultValueSet { + return fmt.Sprintf("%d", f.defaultValue) + } + return fmt.Sprintf("%d", f.Value) +} + +// GetEnvVars returns the env vars for this flag +func (f *Uint64Flag) GetEnvVars() []string { + return f.EnvVars +} + +// Get returns the flag’s value in the given Context. +func (f *Uint64Flag) Get(ctx *Context) uint64 { + return ctx.Uint64(f.Name) +} + +// Uint64 looks up the value of a local Uint64Flag, returns +// 0 if not found +func (cCtx *Context) Uint64(name string) uint64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint64(name, fs) + } + return 0 +} + +func lookupUint64(name string, set *flag.FlagSet) uint64 { + f := set.Lookup(name) + if f != nil { + parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) + if err != nil { + return 0 + } + return parsed + } + return 0 +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go new file mode 100644 index 000000000..d34201868 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go @@ -0,0 +1,219 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// Uint64Slice wraps []int64 to satisfy flag.Value +type Uint64Slice struct { + slice []uint64 + separator separatorSpec + hasBeenSet bool +} + +// NewUint64Slice makes an *Uint64Slice with default values +func NewUint64Slice(defaults ...uint64) *Uint64Slice { + return &Uint64Slice{slice: append([]uint64{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *Uint64Slice) clone() *Uint64Slice { + n := &Uint64Slice{ + slice: make([]uint64, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// Set parses the value into an integer and appends it to the list of values +func (i *Uint64Slice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []uint64{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseUint(strings.TrimSpace(s), 0, 64) + if err != nil { + return err + } + + i.slice = append(i.slice, tmp) + } + + return nil +} + +func (i *Uint64Slice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (i *Uint64Slice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]uint64, 0) + } + str := fmt.Sprintf("%d", v) + str = strings.Replace(str, " ", ", ", -1) + str = strings.Replace(str, "[", "{", -1) + str = strings.Replace(str, "]", "}", -1) + return fmt.Sprintf("[]uint64%s", str) +} + +// Serialize allows Uint64Slice to fulfill Serializer +func (i *Uint64Slice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *Uint64Slice) Value() []uint64 { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *Uint64Slice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *Uint64SliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *Uint64SliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *Uint64SliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *Uint64SliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *Uint64SliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatUint(i, 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *Uint64SliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *Uint64SliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *Uint64SliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]uint64, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *Uint64Slice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(Uint64Slice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as uint64 slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *Uint64SliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 { + return ctx.Uint64Slice(f.Name) +} + +// RunAction executes flag action if set +func (f *Uint64SliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.Uint64Slice(f.Name)) + } + + return nil +} + +// Uint64Slice looks up the value of a local Uint64SliceFlag, returns +// nil if not found +func (cCtx *Context) Uint64Slice(name string) []uint64 { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUint64Slice(name, fs) + } + return nil +} + +func lookupUint64Slice(name string, set *flag.FlagSet) []uint64 { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*Uint64Slice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go new file mode 100644 index 000000000..4dc13e126 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go @@ -0,0 +1,230 @@ +package cli + +import ( + "encoding/json" + "flag" + "fmt" + "strconv" + "strings" +) + +// UintSlice wraps []int to satisfy flag.Value +type UintSlice struct { + slice []uint + separator separatorSpec + hasBeenSet bool +} + +// NewUintSlice makes an *UintSlice with default values +func NewUintSlice(defaults ...uint) *UintSlice { + return &UintSlice{slice: append([]uint{}, defaults...)} +} + +// clone allocate a copy of self object +func (i *UintSlice) clone() *UintSlice { + n := &UintSlice{ + slice: make([]uint, len(i.slice)), + hasBeenSet: i.hasBeenSet, + } + copy(n.slice, i.slice) + return n +} + +// TODO: Consistently have specific Set function for Int64 and Float64 ? +// SetInt directly adds an integer to the list of values +func (i *UintSlice) SetUint(value uint) { + if !i.hasBeenSet { + i.slice = []uint{} + i.hasBeenSet = true + } + + i.slice = append(i.slice, value) +} + +// Set parses the value into an integer and appends it to the list of values +func (i *UintSlice) Set(value string) error { + if !i.hasBeenSet { + i.slice = []uint{} + i.hasBeenSet = true + } + + if strings.HasPrefix(value, slPfx) { + // Deserializing assumes overwrite + _ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &i.slice) + i.hasBeenSet = true + return nil + } + + for _, s := range i.separator.flagSplitMultiValues(value) { + tmp, err := strconv.ParseUint(strings.TrimSpace(s), 0, 32) + if err != nil { + return err + } + + i.slice = append(i.slice, uint(tmp)) + } + + return nil +} + +func (i *UintSlice) WithSeparatorSpec(spec separatorSpec) { + i.separator = spec +} + +// String returns a readable representation of this value (for usage defaults) +func (i *UintSlice) String() string { + v := i.slice + if v == nil { + // treat nil the same as zero length non-nil + v = make([]uint, 0) + } + str := fmt.Sprintf("%d", v) + str = strings.Replace(str, " ", ", ", -1) + str = strings.Replace(str, "[", "{", -1) + str = strings.Replace(str, "]", "}", -1) + return fmt.Sprintf("[]uint%s", str) +} + +// Serialize allows UintSlice to fulfill Serializer +func (i *UintSlice) Serialize() string { + jsonBytes, _ := json.Marshal(i.slice) + return fmt.Sprintf("%s%s", slPfx, string(jsonBytes)) +} + +// Value returns the slice of ints set by this flag +func (i *UintSlice) Value() []uint { + return i.slice +} + +// Get returns the slice of ints set by this flag +func (i *UintSlice) Get() interface{} { + return *i +} + +// String returns a readable representation of this value +// (for usage defaults) +func (f *UintSliceFlag) String() string { + return FlagStringer(f) +} + +// TakesValue returns true of the flag takes a value, otherwise false +func (f *UintSliceFlag) TakesValue() bool { + return true +} + +// GetUsage returns the usage string for the flag +func (f *UintSliceFlag) GetUsage() string { + return f.Usage +} + +// GetCategory returns the category for the flag +func (f *UintSliceFlag) GetCategory() string { + return f.Category +} + +// GetValue returns the flags value as string representation and an empty +// string if the flag takes no value at all. +func (f *UintSliceFlag) GetValue() string { + var defaultVals []string + if f.Value != nil && len(f.Value.Value()) > 0 { + for _, i := range f.Value.Value() { + defaultVals = append(defaultVals, strconv.FormatUint(uint64(i), 10)) + } + } + return strings.Join(defaultVals, ", ") +} + +// GetDefaultText returns the default text for this flag +func (f *UintSliceFlag) GetDefaultText() string { + if f.DefaultText != "" { + return f.DefaultText + } + return f.GetValue() +} + +// GetEnvVars returns the env vars for this flag +func (f *UintSliceFlag) GetEnvVars() []string { + return f.EnvVars +} + +// IsSliceFlag implements DocGenerationSliceFlag. +func (f *UintSliceFlag) IsSliceFlag() bool { + return true +} + +// Apply populates the flag given the flag set and environment +func (f *UintSliceFlag) Apply(set *flag.FlagSet) error { + // apply any default + if f.Destination != nil && f.Value != nil { + f.Destination.slice = make([]uint, len(f.Value.slice)) + copy(f.Destination.slice, f.Value.slice) + } + + // resolve setValue (what we will assign to the set) + var setValue *UintSlice + switch { + case f.Destination != nil: + setValue = f.Destination + case f.Value != nil: + setValue = f.Value.clone() + default: + setValue = new(UintSlice) + setValue.WithSeparatorSpec(f.separator) + } + + if val, source, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok && val != "" { + for _, s := range f.separator.flagSplitMultiValues(val) { + if err := setValue.Set(strings.TrimSpace(s)); err != nil { + return fmt.Errorf("could not parse %q as uint slice value from %s for flag %s: %s", val, source, f.Name, err) + } + } + + // Set this to false so that we reset the slice if we then set values from + // flags that have already been set by the environment. + setValue.hasBeenSet = false + f.HasBeenSet = true + } + + for _, name := range f.Names() { + set.Var(setValue, name, f.Usage) + } + + return nil +} + +func (f *UintSliceFlag) WithSeparatorSpec(spec separatorSpec) { + f.separator = spec +} + +// Get returns the flag’s value in the given Context. +func (f *UintSliceFlag) Get(ctx *Context) []uint { + return ctx.UintSlice(f.Name) +} + +// RunAction executes flag action if set +func (f *UintSliceFlag) RunAction(c *Context) error { + if f.Action != nil { + return f.Action(c, c.UintSlice(f.Name)) + } + + return nil +} + +// UintSlice looks up the value of a local UintSliceFlag, returns +// nil if not found +func (cCtx *Context) UintSlice(name string) []uint { + if fs := cCtx.lookupFlagSet(name); fs != nil { + return lookupUintSlice(name, fs) + } + return nil +} + +func lookupUintSlice(name string, set *flag.FlagSet) []uint { + f := set.Lookup(name) + if f != nil { + if slice, ok := unwrapFlagValue(f.Value).(*UintSlice); ok { + return slice.Value() + } + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/funcs.go b/vendor/github.com/urfave/cli/v2/funcs.go new file mode 100644 index 000000000..e77b0d0a1 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/funcs.go @@ -0,0 +1,47 @@ +package cli + +// BashCompleteFunc is an action to execute when the shell completion flag is set +type BashCompleteFunc func(*Context) + +// BeforeFunc is an action to execute before any subcommands are run, but after +// the context is ready if a non-nil error is returned, no subcommands are run +type BeforeFunc func(*Context) error + +// AfterFunc is an action to execute after any subcommands are run, but after the +// subcommand has finished it is run even if Action() panics +type AfterFunc func(*Context) error + +// ActionFunc is the action to execute when no subcommands are specified +type ActionFunc func(*Context) error + +// CommandNotFoundFunc is executed if the proper command cannot be found +type CommandNotFoundFunc func(*Context, string) + +// OnUsageErrorFunc is executed if a usage error occurs. This is useful for displaying +// customized usage error messages. This function is able to replace the +// original error messages. If this function is not set, the "Incorrect usage" +// is displayed and the execution is interrupted. +type OnUsageErrorFunc func(cCtx *Context, err error, isSubcommand bool) error + +// InvalidFlagAccessFunc is executed when an invalid flag is accessed from the context. +type InvalidFlagAccessFunc func(*Context, string) + +// ExitErrHandlerFunc is executed if provided in order to handle exitError values +// returned by Actions and Before/After functions. +type ExitErrHandlerFunc func(cCtx *Context, err error) + +// FlagStringFunc is used by the help generation to display a flag, which is +// expected to be a single line. +type FlagStringFunc func(Flag) string + +// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix +// text for a flag's full name. +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + +// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help +// with the environment variable details. +type FlagEnvHintFunc func(envVars []string, str string) string + +// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help +// with the file path details. +type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt new file mode 100644 index 000000000..4b620feeb --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt @@ -0,0 +1,2724 @@ +package cli // import "github.com/urfave/cli/v2" + +Package cli provides a minimal framework for creating and organizing command +line Go applications. cli is designed to be easy to understand and write, +the most simple cli application can be written as follows: + + func main() { + (&cli.App{}).Run(os.Args) + } + +Of course this application does not do much, so let's make this an actual +application: + + func main() { + app := &cli.App{ + Name: "greet", + Usage: "say a greeting", + Action: func(c *cli.Context) error { + fmt.Println("Greetings") + return nil + }, + } + + app.Run(os.Args) + } + +VARIABLES + +var ( + SuggestFlag SuggestFlagFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestCommand SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest + SuggestDidYouMeanTemplate string = suggestDidYouMeanTemplate +) +var AppHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}} +{{- if len .Authors}} + +AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{template "copyrightTemplate" .}}{{end}} +` + AppHelpTemplate is the text template for the Default help topic. cli.go + uses text/template to render templates. You can render custom help text by + setting this variable. + +var CommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + CommandHelpTemplate is the text template for the command help topic. cli.go + uses text/template to render templates. You can render custom help text by + setting this variable. + +var ErrWriter io.Writer = os.Stderr + ErrWriter is used to write errors to the user. This can be anything + implementing the io.Writer interface and defaults to os.Stderr. + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` +var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }} + +{{end}}# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.Description }} +# DESCRIPTION + +{{ .App.Description }} +{{ end }} +**Usage**: + +` + "```" + `{{ if .App.UsageText }} +{{ .App.UsageText }} +{{ else }} +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +{{ end }}` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` +var OsExiter = os.Exit + OsExiter is the function used when the app exits. If not set defaults to + os.Exit. + +var SubcommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + SubcommandHelpTemplate is the text template for the subcommand help topic. + cli.go uses text/template to render templates. You can render custom help + text by setting this variable. + +var VersionPrinter = printVersion + VersionPrinter prints the version for the App + +var HelpPrinter helpPrinter = printHelp + HelpPrinter is a function that writes the help output. If not set + explicitly, this calls HelpPrinterCustom using only the default template + functions. + + If custom logic for printing help is required, this function can be + overridden. If the ExtraInfo field is defined on an App, this function + should not be modified, as HelpPrinterCustom will be used directly in order + to capture the extra information. + +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + HelpPrinterCustom is a function that writes the help output. It is used as + the default implementation of HelpPrinter, and may be called directly if the + ExtraInfo field is set on an App. + + In the default implementation, if the customFuncs argument contains a + "wrapAt" key, which is a function which takes no arguments and returns an + int, this int value will be used to produce a "wrap" function used by the + default template to wrap long lines. + + +FUNCTIONS + +func DefaultAppComplete(cCtx *Context) + DefaultAppComplete prints the list of subcommands as the default app + completion method + +func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) +func FlagNames(name string, aliases []string) []string +func HandleAction(action interface{}, cCtx *Context) (err error) + HandleAction attempts to figure out which Action signature was used. + If it's an ActionFunc or a func with the legacy signature for Action, + the func is run! + +func HandleExitCoder(err error) + HandleExitCoder handles errors implementing ExitCoder by printing their + message and calling OsExiter with the given exit code. + + If the given error instead implements MultiError, each error will be checked + for the ExitCoder interface, and OsExiter will be called with the last exit + code found, or exit code 1 if no ExitCoder is found. + + This function is the default error-handling behavior for an App. + +func ShowAppHelp(cCtx *Context) error + ShowAppHelp is an action that displays the help. + +func ShowAppHelpAndExit(c *Context, exitCode int) + ShowAppHelpAndExit - Prints the list of subcommands for the app and exits + with exit code. + +func ShowCommandCompletions(ctx *Context, command string) + ShowCommandCompletions prints the custom completions for a given command + +func ShowCommandHelp(ctx *Context, command string) error + ShowCommandHelp prints help for the given command + +func ShowCommandHelpAndExit(c *Context, command string, code int) + ShowCommandHelpAndExit - exits with code after showing help + +func ShowCompletions(cCtx *Context) + ShowCompletions prints the lists of commands within a given context + +func ShowSubcommandHelp(cCtx *Context) error + ShowSubcommandHelp prints help for the given subcommand + +func ShowSubcommandHelpAndExit(c *Context, exitCode int) + ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits + with exit code. + +func ShowVersion(cCtx *Context) + ShowVersion prints the version number of the App + + +TYPES + +type ActionFunc func(*Context) error + ActionFunc is the action to execute when no subcommands are specified + +type ActionableFlag interface { + Flag + RunAction(*Context) error +} + ActionableFlag is an interface that wraps Flag interface and RunAction + operation. + +type AfterFunc func(*Context) error + AfterFunc is an action to execute after any subcommands are run, but after + the subcommand has finished it is run even if Action() panics + +type App struct { + // The name of the program. Defaults to path.Base(os.Args[0]) + Name string + // Full name of command for help, defaults to Name + HelpName string + // Description of the program. + Usage string + // Text to override the USAGE section of help + UsageText string + // Whether this command supports arguments + Args bool + // Description of the program argument format. + ArgsUsage string + // Version of the program + Version string + // Description of the program + Description string + // DefaultCommand is the (optional) name of a command + // to run if no command names are passed as CLI arguments. + DefaultCommand string + // List of commands to execute + Commands []*Command + // List of flags to parse + Flags []Flag + // Boolean to enable bash completion commands + EnableBashCompletion bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag. + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide built-in version flag and the VERSION section of help + HideVersion bool + + // An action to execute when the shell completion flag is set + BashComplete BashCompleteFunc + // An action to execute before any subcommands are run, but after the context is ready + // If a non-nil error is returned, no subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The action to execute when no subcommands are specified + Action ActionFunc + // Execute this function if the proper command cannot be found + CommandNotFound CommandNotFoundFunc + // Execute this function if a usage error occurs + OnUsageError OnUsageErrorFunc + // Execute this function when an invalid flag is accessed from the context + InvalidFlagAccessHandler InvalidFlagAccessFunc + // Compilation date + Compiled time.Time + // List of all authors who contributed + Authors []*Author + // Copyright of the binary if any + Copyright string + // Reader reader to write input to (useful for tests) + Reader io.Reader + // Writer writer to write output to + Writer io.Writer + // ErrWriter writes error output + ErrWriter io.Writer + // ExitErrHandler processes any error encountered while running an App before + // it is returned to the caller. If no function is provided, HandleExitCoder + // is used as the default behavior. + ExitErrHandler ExitErrHandlerFunc + // Other custom info + Metadata map[string]interface{} + // Carries a function which returns app specific info. + ExtraInfo func() map[string]string + // CustomAppHelpTemplate the text template for app help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomAppHelpTemplate string + // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," + SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + // Enable suggestions for commands and flags + Suggest bool + // Allows global flags set by libraries which use flag.XXXVar(...) directly + // to be parsed through this library + AllowExtFlags bool + // Treat all flags as normal arguments if true + SkipFlagParsing bool + + // Has unexported fields. +} + App is the main structure of a cli application. It is recommended that an + app be created with the cli.NewApp() function + +func NewApp() *App + NewApp creates a new cli Application with some reasonable defaults for Name, + Usage, Version and Action. + +func (a *App) Command(name string) *Command + Command returns the named command on App. Returns nil if the command does + not exist + +func (a *App) Run(arguments []string) (err error) + Run is the entry point to the cli app. Parses the arguments slice and routes + to the proper flag/args combination + +func (a *App) RunAndExitOnError() + RunAndExitOnError calls .Run() and exits non-zero if an error was returned + + Deprecated: instead you should return an error that fulfills cli.ExitCoder + to cli.App.Run. This will cause the application to exit with the given error + code in the cli.ExitCoder + +func (a *App) RunAsSubcommand(ctx *Context) (err error) + RunAsSubcommand is for legacy/compatibility purposes only. New code should + only use App.RunContext. This function is slated to be removed in v3. + +func (a *App) RunContext(ctx context.Context, arguments []string) (err error) + RunContext is like Run except it takes a Context that will be passed to + its commands and sub-commands. Through this, you can propagate timeouts and + cancellation requests + +func (a *App) Setup() + Setup runs initialization code to ensure all data structures are ready + for `Run` or inspection prior to `Run`. It is internally called by `Run`, + but will return early if setup has already happened. + +func (a *App) ToFishCompletion() (string, error) + ToFishCompletion creates a fish completion string for the `*App` The + function errors if either parsing or writing of the string fails. + +func (a *App) ToMan() (string, error) + ToMan creates a man page string for the `*App` The function errors if either + parsing or writing of the string fails. + +func (a *App) ToManWithSection(sectionNumber int) (string, error) + ToMan creates a man page string with section number for the `*App` The + function errors if either parsing or writing of the string fails. + +func (a *App) ToMarkdown() (string, error) + ToMarkdown creates a markdown string for the `*App` The function errors if + either parsing or writing of the string fails. + +func (a *App) VisibleCategories() []CommandCategory + VisibleCategories returns a slice of categories and commands that are + Hidden=false + +func (a *App) VisibleCommands() []*Command + VisibleCommands returns a slice of the Commands with Hidden=false + +func (a *App) VisibleFlagCategories() []VisibleFlagCategory + VisibleFlagCategories returns a slice containing all the categories with the + flags they contain + +func (a *App) VisibleFlags() []Flag + VisibleFlags returns a slice of the Flags with Hidden=false + +type Args interface { + // Get returns the nth argument, or else a blank string + Get(n int) string + // First returns the first argument, or else a blank string + First() string + // Tail returns the rest of the arguments (not the first one) + // or else an empty string slice + Tail() []string + // Len returns the length of the wrapped slice + Len() int + // Present checks if there are any arguments present + Present() bool + // Slice returns a copy of the internal slice + Slice() []string +} + +type Author struct { + Name string // The Authors name + Email string // The Authors email +} + Author represents someone who has contributed to a cli project. + +func (a *Author) String() string + String makes Author comply to the Stringer interface, to allow an easy print + in the templating process + +type BashCompleteFunc func(*Context) + BashCompleteFunc is an action to execute when the shell completion flag is + set + +type BeforeFunc func(*Context) error + BeforeFunc is an action to execute before any subcommands are run, but after + the context is ready if a non-nil error is returned, no subcommands are run + +type BoolFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value bool + Destination *bool + + Aliases []string + EnvVars []string + + Count *int + + DisableDefaultText bool + + Action func(*Context, bool) error + // Has unexported fields. +} + BoolFlag is a flag with type bool + +func (f *BoolFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *BoolFlag) Get(ctx *Context) bool + Get returns the flag’s value in the given Context. + +func (f *BoolFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *BoolFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *BoolFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *BoolFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *BoolFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *BoolFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *BoolFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *BoolFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *BoolFlag) Names() []string + Names returns the names of the flag + +func (f *BoolFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *BoolFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *BoolFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type CategorizableFlag interface { + VisibleFlag + + GetCategory() string +} + CategorizableFlag is an interface that allows us to potentially use a flag + in a categorized representation. + +type Command struct { + // The name of the command + Name string + // A list of aliases for the command + Aliases []string + // A short description of the usage of this command + Usage string + // Custom text to show on USAGE section of help + UsageText string + // A longer explanation of how the command works + Description string + // Whether this command supports arguments + Args bool + // A short description of the arguments of this command + ArgsUsage string + // The category the command is part of + Category string + // The function to call when checking for bash command completions + BashComplete BashCompleteFunc + // An action to execute before any sub-subcommands are run, but after the context is ready + // If a non-nil error is returned, no sub-subcommands are run + Before BeforeFunc + // An action to execute after any subcommands are run, but after the subcommand has finished + // It is run even if Action() panics + After AfterFunc + // The function to call when this command is invoked + Action ActionFunc + // Execute this function if a usage error occurs. + OnUsageError OnUsageErrorFunc + // List of child commands + Subcommands []*Command + // List of flags to parse + Flags []Flag + + // Treat all flags as normal arguments if true + SkipFlagParsing bool + // Boolean to hide built-in help command and help flag + HideHelp bool + // Boolean to hide built-in help command but keep help flag + // Ignored if HideHelp is true. + HideHelpCommand bool + // Boolean to hide this command from help or completion + Hidden bool + // Boolean to enable short-option handling so user can combine several + // single-character bool arguments into one + // i.e. foobar -o -v -> foobar -ov + UseShortOptionHandling bool + + // Full name of command for help, defaults to full command name, including parent commands. + HelpName string + + // CustomHelpTemplate the text template for the command help topic. + // cli.go uses text/template to render templates. You can + // render custom help text by setting this variable. + CustomHelpTemplate string + + // Has unexported fields. +} + Command is a subcommand for a cli.App. + +func (cmd *Command) Command(name string) *Command + +func (c *Command) FullName() string + FullName returns the full name of the command. For subcommands this ensures + that parent commands are part of the command path + +func (c *Command) HasName(name string) bool + HasName returns true if Command.Name matches given name + +func (c *Command) Names() []string + Names returns the names including short names and aliases. + +func (c *Command) Run(cCtx *Context, arguments ...string) (err error) + +func (c *Command) VisibleCategories() []CommandCategory + VisibleCategories returns a slice of categories and commands that are + Hidden=false + +func (c *Command) VisibleCommands() []*Command + VisibleCommands returns a slice of the Commands with Hidden=false + +func (c *Command) VisibleFlagCategories() []VisibleFlagCategory + VisibleFlagCategories returns a slice containing all the visible flag + categories with the flags they contain + +func (c *Command) VisibleFlags() []Flag + VisibleFlags returns a slice of the Flags with Hidden=false + +type CommandCategories interface { + // AddCommand adds a command to a category, creating a new category if necessary. + AddCommand(category string, command *Command) + // Categories returns a slice of categories sorted by name + Categories() []CommandCategory +} + CommandCategories interface allows for category manipulation + +type CommandCategory interface { + // Name returns the category name string + Name() string + // VisibleCommands returns a slice of the Commands with Hidden=false + VisibleCommands() []*Command +} + CommandCategory is a category containing commands. + +type CommandNotFoundFunc func(*Context, string) + CommandNotFoundFunc is executed if the proper command cannot be found + +type Commands []*Command + +type CommandsByName []*Command + +func (c CommandsByName) Len() int + +func (c CommandsByName) Less(i, j int) bool + +func (c CommandsByName) Swap(i, j int) + +type Context struct { + context.Context + App *App + Command *Command + + // Has unexported fields. +} + Context is a type that is passed through to each Handler action in a cli + application. Context can be used to retrieve context-specific args and + parsed command-line options. + +func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context + NewContext creates a new context. For use in when invoking an App or Command + action. + +func (cCtx *Context) Args() Args + Args returns the command line arguments associated with the context. + +func (cCtx *Context) Bool(name string) bool + Bool looks up the value of a local BoolFlag, returns false if not found + +func (cCtx *Context) Count(name string) int + Count returns the num of occurrences of this flag + +func (cCtx *Context) Duration(name string) time.Duration + Duration looks up the value of a local DurationFlag, returns 0 if not found + +func (cCtx *Context) FlagNames() []string + FlagNames returns a slice of flag names used by the this context and all of + its parent contexts. + +func (cCtx *Context) Float64(name string) float64 + Float64 looks up the value of a local Float64Flag, returns 0 if not found + +func (cCtx *Context) Float64Slice(name string) []float64 + Float64Slice looks up the value of a local Float64SliceFlag, returns nil if + not found + +func (cCtx *Context) Generic(name string) interface{} + Generic looks up the value of a local GenericFlag, returns nil if not found + +func (cCtx *Context) Int(name string) int + Int looks up the value of a local IntFlag, returns 0 if not found + +func (cCtx *Context) Int64(name string) int64 + Int64 looks up the value of a local Int64Flag, returns 0 if not found + +func (cCtx *Context) Int64Slice(name string) []int64 + Int64Slice looks up the value of a local Int64SliceFlag, returns nil if not + found + +func (cCtx *Context) IntSlice(name string) []int + IntSlice looks up the value of a local IntSliceFlag, returns nil if not + found + +func (cCtx *Context) IsSet(name string) bool + IsSet determines if the flag was actually set + +func (cCtx *Context) Lineage() []*Context + Lineage returns *this* context and all of its ancestor contexts in order + from child to parent + +func (cCtx *Context) LocalFlagNames() []string + LocalFlagNames returns a slice of flag names used in this context. + +func (cCtx *Context) NArg() int + NArg returns the number of the command line arguments. + +func (cCtx *Context) NumFlags() int + NumFlags returns the number of flags set + +func (cCtx *Context) Path(name string) string + Path looks up the value of a local PathFlag, returns "" if not found + +func (cCtx *Context) Set(name, value string) error + Set sets a context flag to a value. + +func (cCtx *Context) String(name string) string + String looks up the value of a local StringFlag, returns "" if not found + +func (cCtx *Context) StringSlice(name string) []string + StringSlice looks up the value of a local StringSliceFlag, returns nil if + not found + +func (cCtx *Context) Timestamp(name string) *time.Time + Timestamp gets the timestamp from a flag name + +func (cCtx *Context) Uint(name string) uint + Uint looks up the value of a local UintFlag, returns 0 if not found + +func (cCtx *Context) Uint64(name string) uint64 + Uint64 looks up the value of a local Uint64Flag, returns 0 if not found + +func (cCtx *Context) Uint64Slice(name string) []uint64 + Uint64Slice looks up the value of a local Uint64SliceFlag, returns nil if + not found + +func (cCtx *Context) UintSlice(name string) []uint + UintSlice looks up the value of a local UintSliceFlag, returns nil if not + found + +func (cCtx *Context) Value(name string) interface{} + Value returns the value of the flag corresponding to `name` + +type Countable interface { + Count() int +} + Countable is an interface to enable detection of flag values which support + repetitive flags + +type DocGenerationFlag interface { + Flag + + // TakesValue returns true if the flag takes a value, otherwise false + TakesValue() bool + + // GetUsage returns the usage string for the flag + GetUsage() string + + // GetValue returns the flags value as string representation and an empty + // string if the flag takes no value at all. + GetValue() string + + // GetDefaultText returns the default text for this flag + GetDefaultText() string + + // GetEnvVars returns the env vars for this flag + GetEnvVars() []string +} + DocGenerationFlag is an interface that allows documentation generation for + the flag + +type DocGenerationSliceFlag interface { + DocGenerationFlag + + // IsSliceFlag returns true for flags that can be given multiple times. + IsSliceFlag() bool +} + DocGenerationSliceFlag extends DocGenerationFlag for slice-based flags. + +type DurationFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value time.Duration + Destination *time.Duration + + Aliases []string + EnvVars []string + + Action func(*Context, time.Duration) error + // Has unexported fields. +} + DurationFlag is a flag with type time.Duration + +func (f *DurationFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *DurationFlag) Get(ctx *Context) time.Duration + Get returns the flag’s value in the given Context. + +func (f *DurationFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *DurationFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *DurationFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *DurationFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *DurationFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *DurationFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *DurationFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *DurationFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *DurationFlag) Names() []string + Names returns the names of the flag + +func (f *DurationFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *DurationFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *DurationFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type ErrorFormatter interface { + Format(s fmt.State, verb rune) +} + ErrorFormatter is the interface that will suitably format the error output + +type ExitCoder interface { + error + ExitCode() int +} + ExitCoder is the interface checked by `App` and `Command` for a custom exit + code + +func Exit(message interface{}, exitCode int) ExitCoder + Exit wraps a message and exit code into an error, which by default is + handled with a call to os.Exit during default error handling. + + This is the simplest way to trigger a non-zero exit code for an App + without having to call os.Exit manually. During testing, this behavior + can be avoided by overriding the ExitErrHandler function on an App or the + package-global OsExiter function. + +func NewExitError(message interface{}, exitCode int) ExitCoder + NewExitError calls Exit to create a new ExitCoder. + + Deprecated: This function is a duplicate of Exit and will eventually be + removed. + +type ExitErrHandlerFunc func(cCtx *Context, err error) + ExitErrHandlerFunc is executed if provided in order to handle exitError + values returned by Actions and Before/After functions. + +type Flag interface { + fmt.Stringer + // Apply Flag settings to the given flag set + Apply(*flag.FlagSet) error + Names() []string + IsSet() bool +} + Flag is a common interface related to parsing flags in cli. For more + advanced flag parsing techniques, it is recommended that this interface be + implemented. + +var BashCompletionFlag Flag = &BoolFlag{ + Name: "generate-bash-completion", + Hidden: true, +} + BashCompletionFlag enables bash-completion for all commands and subcommands + +var HelpFlag Flag = &BoolFlag{ + Name: "help", + Aliases: []string{"h"}, + Usage: "show help", + DisableDefaultText: true, +} + HelpFlag prints the help for all commands and subcommands. Set to nil to + disable the flag. The subcommand will still be added unless HideHelp or + HideHelpCommand is set to true. + +var VersionFlag Flag = &BoolFlag{ + Name: "version", + Aliases: []string{"v"}, + Usage: "print the version", + DisableDefaultText: true, +} + VersionFlag prints the version for the application + +type FlagCategories interface { + // AddFlags adds a flag to a category, creating a new category if necessary. + AddFlag(category string, fl Flag) + // VisibleCategories returns a slice of visible flag categories sorted by name + VisibleCategories() []VisibleFlagCategory +} + FlagCategories interface allows for category manipulation + +type FlagEnvHintFunc func(envVars []string, str string) string + FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help + with the environment variable details. + +var FlagEnvHinter FlagEnvHintFunc = withEnvHint + FlagEnvHinter annotates flag help message with the environment variable + details. This is used by the default FlagStringer. + +type FlagFileHintFunc func(filePath, str string) string + FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help + with the file path details. + +var FlagFileHinter FlagFileHintFunc = withFileHint + FlagFileHinter annotates flag help message with the environment variable + details. This is used by the default FlagStringer. + +type FlagNamePrefixFunc func(fullName []string, placeholder string) string + FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix + text for a flag's full name. + +var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames + FlagNamePrefixer converts a full flag name and its placeholder into the help + message flag prefix. This is used by the default FlagStringer. + +type FlagStringFunc func(Flag) string + FlagStringFunc is used by the help generation to display a flag, which is + expected to be a single line. + +var FlagStringer FlagStringFunc = stringifyFlag + FlagStringer converts a flag definition to a string. This is used by help to + display a flag. + +type FlagsByName []Flag + FlagsByName is a slice of Flag. + +func (f FlagsByName) Len() int + +func (f FlagsByName) Less(i, j int) bool + +func (f FlagsByName) Swap(i, j int) + +type Float64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value float64 + Destination *float64 + + Aliases []string + EnvVars []string + + Action func(*Context, float64) error + // Has unexported fields. +} + Float64Flag is a flag with type float64 + +func (f *Float64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Float64Flag) Get(ctx *Context) float64 + Get returns the flag’s value in the given Context. + +func (f *Float64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Float64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Float64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Float64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Float64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Float64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Float64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Float64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Float64Flag) Names() []string + Names returns the names of the flag + +func (f *Float64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Float64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Float64Slice struct { + // Has unexported fields. +} + Float64Slice wraps []float64 to satisfy flag.Value + +func NewFloat64Slice(defaults ...float64) *Float64Slice + NewFloat64Slice makes a *Float64Slice with default values + +func (f *Float64Slice) Get() interface{} + Get returns the slice of float64s set by this flag + +func (f *Float64Slice) Serialize() string + Serialize allows Float64Slice to fulfill Serializer + +func (f *Float64Slice) Set(value string) error + Set parses the value into a float64 and appends it to the list of values + +func (f *Float64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64Slice) Value() []float64 + Value returns the slice of float64s set by this flag + +func (f *Float64Slice) WithSeparatorSpec(spec separatorSpec) + +type Float64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Float64Slice + Destination *Float64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []float64) error + // Has unexported fields. +} + Float64SliceFlag is a flag with type *Float64Slice + +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Float64SliceFlag) Get(ctx *Context) []float64 + Get returns the flag’s value in the given Context. + +func (f *Float64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Float64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Float64SliceFlag) GetDestination() []float64 + +func (f *Float64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Float64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Float64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Float64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Float64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Float64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Float64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Float64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Float64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Float64SliceFlag) SetDestination(slice []float64) + +func (f *Float64SliceFlag) SetValue(slice []float64) + +func (f *Float64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Float64SliceFlag) TakesValue() bool + TakesValue returns true if the flag takes a value, otherwise false + +func (f *Float64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type Generic interface { + Set(value string) error + String() string +} + Generic is a generic parseable type identified by a specific flag + +type GenericFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Generic + Destination Generic + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, interface{}) error + // Has unexported fields. +} + GenericFlag is a flag with type Generic + +func (f *GenericFlag) Apply(set *flag.FlagSet) error + Apply takes the flagset and calls Set on the generic flag with the value + provided by the user for parsing by the flag + +func (f *GenericFlag) Get(ctx *Context) interface{} + Get returns the flag’s value in the given Context. + +func (f *GenericFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *GenericFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *GenericFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *GenericFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *GenericFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *GenericFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *GenericFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *GenericFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *GenericFlag) Names() []string + Names returns the names of the flag + +func (f *GenericFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *GenericFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *GenericFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Int64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int64 + Destination *int64 + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, int64) error + // Has unexported fields. +} + Int64Flag is a flag with type int64 + +func (f *Int64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Int64Flag) Get(ctx *Context) int64 + Get returns the flag’s value in the given Context. + +func (f *Int64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Int64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Int64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Int64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Int64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Int64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Int64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Int64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Int64Flag) Names() []string + Names returns the names of the flag + +func (f *Int64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Int64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Int64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Int64Slice struct { + // Has unexported fields. +} + Int64Slice wraps []int64 to satisfy flag.Value + +func NewInt64Slice(defaults ...int64) *Int64Slice + NewInt64Slice makes an *Int64Slice with default values + +func (i *Int64Slice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *Int64Slice) Serialize() string + Serialize allows Int64Slice to fulfill Serializer + +func (i *Int64Slice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *Int64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *Int64Slice) Value() []int64 + Value returns the slice of ints set by this flag + +func (i *Int64Slice) WithSeparatorSpec(spec separatorSpec) + +type Int64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Int64Slice + Destination *Int64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []int64) error + // Has unexported fields. +} + Int64SliceFlag is a flag with type *Int64Slice + +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Int64SliceFlag) Get(ctx *Context) []int64 + Get returns the flag’s value in the given Context. + +func (f *Int64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Int64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Int64SliceFlag) GetDestination() []int64 + +func (f *Int64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Int64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Int64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Int64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Int64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Int64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Int64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Int64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Int64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Int64SliceFlag) SetDestination(slice []int64) + +func (f *Int64SliceFlag) SetValue(slice []int64) + +func (f *Int64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Int64SliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *Int64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type IntFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int + Destination *int + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, int) error + // Has unexported fields. +} + IntFlag is a flag with type int + +func (f *IntFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *IntFlag) Get(ctx *Context) int + Get returns the flag’s value in the given Context. + +func (f *IntFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *IntFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *IntFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *IntFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *IntFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *IntFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *IntFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *IntFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *IntFlag) Names() []string + Names returns the names of the flag + +func (f *IntFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *IntFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *IntFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type IntSlice struct { + // Has unexported fields. +} + IntSlice wraps []int to satisfy flag.Value + +func NewIntSlice(defaults ...int) *IntSlice + NewIntSlice makes an *IntSlice with default values + +func (i *IntSlice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *IntSlice) Serialize() string + Serialize allows IntSlice to fulfill Serializer + +func (i *IntSlice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *IntSlice) SetInt(value int) + TODO: Consistently have specific Set function for Int64 and Float64 ? SetInt + directly adds an integer to the list of values + +func (i *IntSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *IntSlice) Value() []int + Value returns the slice of ints set by this flag + +func (i *IntSlice) WithSeparatorSpec(spec separatorSpec) + +type IntSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *IntSlice + Destination *IntSlice + + Aliases []string + EnvVars []string + + Action func(*Context, []int) error + // Has unexported fields. +} + IntSliceFlag is a flag with type *IntSlice + +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *IntSliceFlag) Get(ctx *Context) []int + Get returns the flag’s value in the given Context. + +func (f *IntSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *IntSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *IntSliceFlag) GetDestination() []int + +func (f *IntSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *IntSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *IntSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *IntSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *IntSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *IntSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *IntSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *IntSliceFlag) Names() []string + Names returns the names of the flag + +func (f *IntSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *IntSliceFlag) SetDestination(slice []int) + +func (f *IntSliceFlag) SetValue(slice []int) + +func (f *IntSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *IntSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *IntSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type InvalidFlagAccessFunc func(*Context, string) + InvalidFlagAccessFunc is executed when an invalid flag is accessed from the + context. + +type MultiError interface { + error + Errors() []error +} + MultiError is an error that wraps multiple errors. + +type MultiFloat64Flag = SliceFlag[*Float64SliceFlag, []float64, float64] + MultiFloat64Flag extends Float64SliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type MultiInt64Flag = SliceFlag[*Int64SliceFlag, []int64, int64] + MultiInt64Flag extends Int64SliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type MultiIntFlag = SliceFlag[*IntSliceFlag, []int, int] + MultiIntFlag extends IntSliceFlag with support for using slices directly, + as Value and/or Destination. See also SliceFlag. + +type MultiStringFlag = SliceFlag[*StringSliceFlag, []string, string] + MultiStringFlag extends StringSliceFlag with support for using slices + directly, as Value and/or Destination. See also SliceFlag. + +type OnUsageErrorFunc func(cCtx *Context, err error, isSubcommand bool) error + OnUsageErrorFunc is executed if a usage error occurs. This is useful for + displaying customized usage error messages. This function is able to replace + the original error messages. If this function is not set, the "Incorrect + usage" is displayed and the execution is interrupted. + +type Path = string + +type PathFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Path + Destination *Path + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, Path) error + // Has unexported fields. +} + PathFlag is a flag with type Path + +func (f *PathFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *PathFlag) Get(ctx *Context) string + Get returns the flag’s value in the given Context. + +func (f *PathFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *PathFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *PathFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *PathFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *PathFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *PathFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *PathFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *PathFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *PathFlag) Names() []string + Names returns the names of the flag + +func (f *PathFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *PathFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *PathFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type RequiredFlag interface { + Flag + + IsRequired() bool +} + RequiredFlag is an interface that allows us to mark flags as required + it allows flags required flags to be backwards compatible with the Flag + interface + +type Serializer interface { + Serialize() string +} + Serializer is used to circumvent the limitations of flag.FlagSet.Set + +type SliceFlag[T SliceFlagTarget[E], S ~[]E, E any] struct { + Target T + Value S + Destination *S +} + SliceFlag extends implementations like StringSliceFlag and IntSliceFlag + with support for using slices directly, as Value and/or Destination. + See also SliceFlagTarget, MultiStringFlag, MultiFloat64Flag, MultiInt64Flag, + MultiIntFlag. + +func (x *SliceFlag[T, S, E]) Apply(set *flag.FlagSet) error + +func (x *SliceFlag[T, S, E]) GetCategory() string + +func (x *SliceFlag[T, S, E]) GetDefaultText() string + +func (x *SliceFlag[T, S, E]) GetDestination() S + +func (x *SliceFlag[T, S, E]) GetEnvVars() []string + +func (x *SliceFlag[T, S, E]) GetUsage() string + +func (x *SliceFlag[T, S, E]) GetValue() string + +func (x *SliceFlag[T, S, E]) IsRequired() bool + +func (x *SliceFlag[T, S, E]) IsSet() bool + +func (x *SliceFlag[T, S, E]) IsVisible() bool + +func (x *SliceFlag[T, S, E]) Names() []string + +func (x *SliceFlag[T, S, E]) SetDestination(slice S) + +func (x *SliceFlag[T, S, E]) SetValue(slice S) + +func (x *SliceFlag[T, S, E]) String() string + +func (x *SliceFlag[T, S, E]) TakesValue() bool + +type SliceFlagTarget[E any] interface { + Flag + RequiredFlag + DocGenerationFlag + VisibleFlag + CategorizableFlag + + // SetValue should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~[]E). + SetValue(slice []E) + // SetDestination should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~*[]E). + SetDestination(slice []E) + // GetDestination should return the current value referenced by any destination, or nil if nil/unset. + GetDestination() []E +} + SliceFlagTarget models a target implementation for use with SliceFlag. The + three methods, SetValue, SetDestination, and GetDestination, are necessary + to propagate Value and Destination, where Value is propagated inwards + (initially), and Destination is propagated outwards (on every update). + +type StringFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value string + Destination *string + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, string) error + // Has unexported fields. +} + StringFlag is a flag with type string + +func (f *StringFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *StringFlag) Get(ctx *Context) string + Get returns the flag’s value in the given Context. + +func (f *StringFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *StringFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *StringFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *StringFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *StringFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *StringFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *StringFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *StringFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *StringFlag) Names() []string + Names returns the names of the flag + +func (f *StringFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *StringFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *StringFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type StringSlice struct { + // Has unexported fields. +} + StringSlice wraps a []string to satisfy flag.Value + +func NewStringSlice(defaults ...string) *StringSlice + NewStringSlice creates a *StringSlice with default values + +func (s *StringSlice) Get() interface{} + Get returns the slice of strings set by this flag + +func (s *StringSlice) Serialize() string + Serialize allows StringSlice to fulfill Serializer + +func (s *StringSlice) Set(value string) error + Set appends the string value to the list of values + +func (s *StringSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (s *StringSlice) Value() []string + Value returns the slice of strings set by this flag + +func (s *StringSlice) WithSeparatorSpec(spec separatorSpec) + +type StringSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *StringSlice + Destination *StringSlice + + Aliases []string + EnvVars []string + + TakesFile bool + + Action func(*Context, []string) error + + KeepSpace bool + // Has unexported fields. +} + StringSliceFlag is a flag with type *StringSlice + +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *StringSliceFlag) Get(ctx *Context) []string + Get returns the flag’s value in the given Context. + +func (f *StringSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *StringSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *StringSliceFlag) GetDestination() []string + +func (f *StringSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *StringSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *StringSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *StringSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *StringSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *StringSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *StringSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *StringSliceFlag) Names() []string + Names returns the names of the flag + +func (f *StringSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *StringSliceFlag) SetDestination(slice []string) + +func (f *StringSliceFlag) SetValue(slice []string) + +func (f *StringSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *StringSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *StringSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type SuggestCommandFunc func(commands []*Command, provided string) string + +type SuggestFlagFunc func(flags []Flag, provided string, hideHelp bool) string + +type Timestamp struct { + // Has unexported fields. +} + Timestamp wrap to satisfy golang's flag interface. + +func NewTimestamp(timestamp time.Time) *Timestamp + Timestamp constructor + +func (t *Timestamp) Get() interface{} + Get returns the flag structure + +func (t *Timestamp) Set(value string) error + Parses the string value to timestamp + +func (t *Timestamp) SetLayout(layout string) + Set the timestamp string layout for future parsing + +func (t *Timestamp) SetLocation(loc *time.Location) + Set perceived timezone of the to-be parsed time string + +func (t *Timestamp) SetTimestamp(value time.Time) + Set the timestamp value directly + +func (t *Timestamp) String() string + String returns a readable representation of this value (for usage defaults) + +func (t *Timestamp) Value() *time.Time + Value returns the timestamp value stored in the flag + +type TimestampFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Timestamp + Destination *Timestamp + + Aliases []string + EnvVars []string + + Layout string + + Timezone *time.Location + + Action func(*Context, *time.Time) error + // Has unexported fields. +} + TimestampFlag is a flag with type *Timestamp + +func (f *TimestampFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *TimestampFlag) Get(ctx *Context) *time.Time + Get returns the flag’s value in the given Context. + +func (f *TimestampFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *TimestampFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *TimestampFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *TimestampFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *TimestampFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *TimestampFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *TimestampFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *TimestampFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *TimestampFlag) Names() []string + Names returns the names of the flag + +func (f *TimestampFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *TimestampFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *TimestampFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Uint64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint64 + Destination *uint64 + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, uint64) error + // Has unexported fields. +} + Uint64Flag is a flag with type uint64 + +func (f *Uint64Flag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Uint64Flag) Get(ctx *Context) uint64 + Get returns the flag’s value in the given Context. + +func (f *Uint64Flag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Uint64Flag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Uint64Flag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Uint64Flag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Uint64Flag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Uint64Flag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Uint64Flag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Uint64Flag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Uint64Flag) Names() []string + Names returns the names of the flag + +func (f *Uint64Flag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Uint64Flag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Uint64Flag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type Uint64Slice struct { + // Has unexported fields. +} + Uint64Slice wraps []int64 to satisfy flag.Value + +func NewUint64Slice(defaults ...uint64) *Uint64Slice + NewUint64Slice makes an *Uint64Slice with default values + +func (i *Uint64Slice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *Uint64Slice) Serialize() string + Serialize allows Uint64Slice to fulfill Serializer + +func (i *Uint64Slice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *Uint64Slice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *Uint64Slice) Value() []uint64 + Value returns the slice of ints set by this flag + +func (i *Uint64Slice) WithSeparatorSpec(spec separatorSpec) + +type Uint64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Uint64Slice + Destination *Uint64Slice + + Aliases []string + EnvVars []string + + Action func(*Context, []uint64) error + // Has unexported fields. +} + Uint64SliceFlag is a flag with type *Uint64Slice + +func (f *Uint64SliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 + Get returns the flag’s value in the given Context. + +func (f *Uint64SliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *Uint64SliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *Uint64SliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *Uint64SliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *Uint64SliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *Uint64SliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *Uint64SliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *Uint64SliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *Uint64SliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *Uint64SliceFlag) Names() []string + Names returns the names of the flag + +func (f *Uint64SliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *Uint64SliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *Uint64SliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *Uint64SliceFlag) WithSeparatorSpec(spec separatorSpec) + +type UintFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint + Destination *uint + + Aliases []string + EnvVars []string + + Base int + + Action func(*Context, uint) error + // Has unexported fields. +} + UintFlag is a flag with type uint + +func (f *UintFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *UintFlag) Get(ctx *Context) uint + Get returns the flag’s value in the given Context. + +func (f *UintFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *UintFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *UintFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *UintFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *UintFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *UintFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *UintFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *UintFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *UintFlag) Names() []string + Names returns the names of the flag + +func (f *UintFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *UintFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *UintFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +type UintSlice struct { + // Has unexported fields. +} + UintSlice wraps []int to satisfy flag.Value + +func NewUintSlice(defaults ...uint) *UintSlice + NewUintSlice makes an *UintSlice with default values + +func (i *UintSlice) Get() interface{} + Get returns the slice of ints set by this flag + +func (i *UintSlice) Serialize() string + Serialize allows UintSlice to fulfill Serializer + +func (i *UintSlice) Set(value string) error + Set parses the value into an integer and appends it to the list of values + +func (i *UintSlice) SetUint(value uint) + TODO: Consistently have specific Set function for Int64 and Float64 ? SetInt + directly adds an integer to the list of values + +func (i *UintSlice) String() string + String returns a readable representation of this value (for usage defaults) + +func (i *UintSlice) Value() []uint + Value returns the slice of ints set by this flag + +func (i *UintSlice) WithSeparatorSpec(spec separatorSpec) + +type UintSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *UintSlice + Destination *UintSlice + + Aliases []string + EnvVars []string + + Action func(*Context, []uint) error + // Has unexported fields. +} + UintSliceFlag is a flag with type *UintSlice + +func (f *UintSliceFlag) Apply(set *flag.FlagSet) error + Apply populates the flag given the flag set and environment + +func (f *UintSliceFlag) Get(ctx *Context) []uint + Get returns the flag’s value in the given Context. + +func (f *UintSliceFlag) GetCategory() string + GetCategory returns the category for the flag + +func (f *UintSliceFlag) GetDefaultText() string + GetDefaultText returns the default text for this flag + +func (f *UintSliceFlag) GetEnvVars() []string + GetEnvVars returns the env vars for this flag + +func (f *UintSliceFlag) GetUsage() string + GetUsage returns the usage string for the flag + +func (f *UintSliceFlag) GetValue() string + GetValue returns the flags value as string representation and an empty + string if the flag takes no value at all. + +func (f *UintSliceFlag) IsRequired() bool + IsRequired returns whether or not the flag is required + +func (f *UintSliceFlag) IsSet() bool + IsSet returns whether or not the flag has been set through env or file + +func (f *UintSliceFlag) IsSliceFlag() bool + IsSliceFlag implements DocGenerationSliceFlag. + +func (f *UintSliceFlag) IsVisible() bool + IsVisible returns true if the flag is not hidden, otherwise false + +func (f *UintSliceFlag) Names() []string + Names returns the names of the flag + +func (f *UintSliceFlag) RunAction(c *Context) error + RunAction executes flag action if set + +func (f *UintSliceFlag) String() string + String returns a readable representation of this value (for usage defaults) + +func (f *UintSliceFlag) TakesValue() bool + TakesValue returns true of the flag takes a value, otherwise false + +func (f *UintSliceFlag) WithSeparatorSpec(spec separatorSpec) + +type VisibleFlag interface { + Flag + + // IsVisible returns true if the flag is not hidden, otherwise false + IsVisible() bool +} + VisibleFlag is an interface that allows to check if a flag is visible + +type VisibleFlagCategory interface { + // Name returns the category name string + Name() string + // Flags returns a slice of VisibleFlag sorted by name + Flags() []VisibleFlag +} + VisibleFlagCategory is a category containing flags. + +package altsrc // import "github.com/urfave/cli/v2/altsrc" + + +FUNCTIONS + +func ApplyInputSourceValues(cCtx *cli.Context, inputSourceContext InputSourceContext, flags []cli.Flag) error + ApplyInputSourceValues iterates over all provided flags and executes + ApplyInputSourceValue on flags implementing the FlagInputSourceExtension + interface to initialize these flags to an alternate input source. + +func InitInputSource(flags []cli.Flag, createInputSource func() (InputSourceContext, error)) cli.BeforeFunc + InitInputSource is used to to setup an InputSourceContext on a cli.Command + Before method. It will create a new input source based on the func provided. + If there is no error it will then apply the new input source to any flags + that are supported by the input source + +func InitInputSourceWithContext(flags []cli.Flag, createInputSource func(cCtx *cli.Context) (InputSourceContext, error)) cli.BeforeFunc + InitInputSourceWithContext is used to to setup an InputSourceContext on + a cli.Command Before method. It will create a new input source based on + the func provided with potentially using existing cli.Context values to + initialize itself. If there is no error it will then apply the new input + source to any flags that are supported by the input source + +func NewJSONSourceFromFlagFunc(flag string) func(c *cli.Context) (InputSourceContext, error) + NewJSONSourceFromFlagFunc returns a func that takes a cli.Context and + returns an InputSourceContext suitable for retrieving config variables from + a file containing JSON data with the file name defined by the given flag. + +func NewTomlSourceFromFlagFunc(flagFileName string) func(cCtx *cli.Context) (InputSourceContext, error) + NewTomlSourceFromFlagFunc creates a new TOML InputSourceContext from a + provided flag name and source context. + +func NewYamlSourceFromFlagFunc(flagFileName string) func(cCtx *cli.Context) (InputSourceContext, error) + NewYamlSourceFromFlagFunc creates a new Yaml InputSourceContext from a + provided flag name and source context. + + +TYPES + +type BoolFlag struct { + *cli.BoolFlag + // Has unexported fields. +} + BoolFlag is the flag type that wraps cli.BoolFlag to allow for other values + to be specified + +func NewBoolFlag(fl *cli.BoolFlag) *BoolFlag + NewBoolFlag creates a new BoolFlag + +func (f *BoolFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + BoolFlag.Apply + +func (f *BoolFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Bool value to the flagSet if required + +type DurationFlag struct { + *cli.DurationFlag + // Has unexported fields. +} + DurationFlag is the flag type that wraps cli.DurationFlag to allow for other + values to be specified + +func NewDurationFlag(fl *cli.DurationFlag) *DurationFlag + NewDurationFlag creates a new DurationFlag + +func (f *DurationFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + DurationFlag.Apply + +func (f *DurationFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Duration value to the flagSet if required + +type FlagInputSourceExtension interface { + cli.Flag + ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error +} + FlagInputSourceExtension is an extension interface of cli.Flag that allows a + value to be set on the existing parsed flags. + +type Float64Flag struct { + *cli.Float64Flag + // Has unexported fields. +} + Float64Flag is the flag type that wraps cli.Float64Flag to allow for other + values to be specified + +func NewFloat64Flag(fl *cli.Float64Flag) *Float64Flag + NewFloat64Flag creates a new Float64Flag + +func (f *Float64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Float64Flag.Apply + +func (f *Float64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Float64 value to the flagSet if required + +type Float64SliceFlag struct { + *cli.Float64SliceFlag + // Has unexported fields. +} + Float64SliceFlag is the flag type that wraps cli.Float64SliceFlag to allow + for other values to be specified + +func NewFloat64SliceFlag(fl *cli.Float64SliceFlag) *Float64SliceFlag + NewFloat64SliceFlag creates a new Float64SliceFlag + +func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Float64SliceFlag.Apply + +func (f *Float64SliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Float64Slice value if required + +type GenericFlag struct { + *cli.GenericFlag + // Has unexported fields. +} + GenericFlag is the flag type that wraps cli.GenericFlag to allow for other + values to be specified + +func NewGenericFlag(fl *cli.GenericFlag) *GenericFlag + NewGenericFlag creates a new GenericFlag + +func (f *GenericFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + GenericFlag.Apply + +func (f *GenericFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a generic value to the flagSet if required + +type InputSourceContext interface { + Source() string + + Int(name string) (int, error) + Int64(name string) (int64, error) + Uint(name string) (uint, error) + Uint64(name string) (uint64, error) + Duration(name string) (time.Duration, error) + Float64(name string) (float64, error) + String(name string) (string, error) + StringSlice(name string) ([]string, error) + IntSlice(name string) ([]int, error) + Int64Slice(name string) ([]int64, error) + Float64Slice(name string) ([]float64, error) + Generic(name string) (cli.Generic, error) + Bool(name string) (bool, error) + + // Has unexported methods. +} + InputSourceContext is an interface used to allow other input sources to be + implemented as needed. + + Source returns an identifier for the input source. In case of file source it + should return path to the file. + +func NewJSONSource(data []byte) (InputSourceContext, error) + NewJSONSource returns an InputSourceContext suitable for retrieving config + variables from raw JSON data. + +func NewJSONSourceFromFile(f string) (InputSourceContext, error) + NewJSONSourceFromFile returns an InputSourceContext suitable for retrieving + config variables from a file (or url) containing JSON data. + +func NewJSONSourceFromReader(r io.Reader) (InputSourceContext, error) + NewJSONSourceFromReader returns an InputSourceContext suitable for + retrieving config variables from an io.Reader that returns JSON data. + +func NewTomlSourceFromFile(file string) (InputSourceContext, error) + NewTomlSourceFromFile creates a new TOML InputSourceContext from a filepath. + +func NewYamlSourceFromFile(file string) (InputSourceContext, error) + NewYamlSourceFromFile creates a new Yaml InputSourceContext from a filepath. + +type Int64Flag struct { + *cli.Int64Flag + // Has unexported fields. +} + Int64Flag is the flag type that wraps cli.Int64Flag to allow for other + values to be specified + +func NewInt64Flag(fl *cli.Int64Flag) *Int64Flag + NewInt64Flag creates a new Int64Flag + +func (f *Int64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Int64Flag.Apply + +func (f *Int64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + +type Int64SliceFlag struct { + *cli.Int64SliceFlag + // Has unexported fields. +} + Int64SliceFlag is the flag type that wraps cli.Int64SliceFlag to allow for + other values to be specified + +func NewInt64SliceFlag(fl *cli.Int64SliceFlag) *Int64SliceFlag + NewInt64SliceFlag creates a new Int64SliceFlag + +func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Int64SliceFlag.Apply + +func (f *Int64SliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Int64Slice value if required + +type IntFlag struct { + *cli.IntFlag + // Has unexported fields. +} + IntFlag is the flag type that wraps cli.IntFlag to allow for other values to + be specified + +func NewIntFlag(fl *cli.IntFlag) *IntFlag + NewIntFlag creates a new IntFlag + +func (f *IntFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + IntFlag.Apply + +func (f *IntFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a int value to the flagSet if required + +type IntSliceFlag struct { + *cli.IntSliceFlag + // Has unexported fields. +} + IntSliceFlag is the flag type that wraps cli.IntSliceFlag to allow for other + values to be specified + +func NewIntSliceFlag(fl *cli.IntSliceFlag) *IntSliceFlag + NewIntSliceFlag creates a new IntSliceFlag + +func (f *IntSliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + IntSliceFlag.Apply + +func (f *IntSliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a IntSlice value if required + +type MapInputSource struct { + // Has unexported fields. +} + MapInputSource implements InputSourceContext to return data from the map + that is loaded. + +func NewMapInputSource(file string, valueMap map[interface{}]interface{}) *MapInputSource + NewMapInputSource creates a new MapInputSource for implementing custom input + sources. + +func (fsm *MapInputSource) Bool(name string) (bool, error) + Bool returns an bool from the map otherwise returns false + +func (fsm *MapInputSource) Duration(name string) (time.Duration, error) + Duration returns a duration from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Float64(name string) (float64, error) + Float64 returns an float64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Float64Slice(name string) ([]float64, error) + Float64Slice returns an []float64 from the map if it exists otherwise + returns nil + +func (fsm *MapInputSource) Generic(name string) (cli.Generic, error) + Generic returns an cli.Generic from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) Int(name string) (int, error) + Int returns an int from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Int64(name string) (int64, error) + Int64 returns an int64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Int64Slice(name string) ([]int64, error) + Int64Slice returns an []int64 from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) IntSlice(name string) ([]int, error) + IntSlice returns an []int from the map if it exists otherwise returns nil + +func (fsm *MapInputSource) Source() string + Source returns the path of the source file + +func (fsm *MapInputSource) String(name string) (string, error) + String returns a string from the map if it exists otherwise returns an empty + string + +func (fsm *MapInputSource) StringSlice(name string) ([]string, error) + StringSlice returns an []string from the map if it exists otherwise returns + nil + +func (fsm *MapInputSource) Uint(name string) (uint, error) + Int64 returns an int64 from the map if it exists otherwise returns 0 + +func (fsm *MapInputSource) Uint64(name string) (uint64, error) + UInt64 returns an uint64 from the map if it exists otherwise returns 0 + +type PathFlag struct { + *cli.PathFlag + // Has unexported fields. +} + PathFlag is the flag type that wraps cli.PathFlag to allow for other values + to be specified + +func NewPathFlag(fl *cli.PathFlag) *PathFlag + NewPathFlag creates a new PathFlag + +func (f *PathFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + PathFlag.Apply + +func (f *PathFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a Path value to the flagSet if required + +type StringFlag struct { + *cli.StringFlag + // Has unexported fields. +} + StringFlag is the flag type that wraps cli.StringFlag to allow for other + values to be specified + +func NewStringFlag(fl *cli.StringFlag) *StringFlag + NewStringFlag creates a new StringFlag + +func (f *StringFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + StringFlag.Apply + +func (f *StringFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a String value to the flagSet if required + +type StringSliceFlag struct { + *cli.StringSliceFlag + // Has unexported fields. +} + StringSliceFlag is the flag type that wraps cli.StringSliceFlag to allow for + other values to be specified + +func NewStringSliceFlag(fl *cli.StringSliceFlag) *StringSliceFlag + NewStringSliceFlag creates a new StringSliceFlag + +func (f *StringSliceFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + StringSliceFlag.Apply + +func (f *StringSliceFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + ApplyInputSourceValue applies a StringSlice value to the flagSet if required + +type Uint64Flag struct { + *cli.Uint64Flag + // Has unexported fields. +} + Uint64Flag is the flag type that wraps cli.Uint64Flag to allow for other + values to be specified + +func NewUint64Flag(fl *cli.Uint64Flag) *Uint64Flag + NewUint64Flag creates a new Uint64Flag + +func (f *Uint64Flag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + Uint64Flag.Apply + +func (f *Uint64Flag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + +type UintFlag struct { + *cli.UintFlag + // Has unexported fields. +} + UintFlag is the flag type that wraps cli.UintFlag to allow for other values + to be specified + +func NewUintFlag(fl *cli.UintFlag) *UintFlag + NewUintFlag creates a new UintFlag + +func (f *UintFlag) Apply(set *flag.FlagSet) error + Apply saves the flagSet for later usage calls, then calls the wrapped + UintFlag.Apply + +func (f *UintFlag) ApplyInputSourceValue(cCtx *cli.Context, isc InputSourceContext) error + diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go new file mode 100644 index 000000000..640e29045 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -0,0 +1,564 @@ +package cli + +import ( + "fmt" + "io" + "os" + "strings" + "text/tabwriter" + "text/template" + "unicode/utf8" +) + +const ( + helpName = "help" + helpAlias = "h" +) + +// this instance is to avoid recursion in the ShowCommandHelp which can +// add a help command again +var helpCommandDontUse = &Command{ + Name: helpName, + Aliases: []string{helpAlias}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", +} + +var helpCommand = &Command{ + Name: helpName, + Aliases: []string{helpAlias}, + Usage: "Shows a list of commands or help for one command", + ArgsUsage: "[command]", + Action: func(cCtx *Context) error { + args := cCtx.Args() + argsPresent := args.First() != "" + firstArg := args.First() + + // This action can be triggered by a "default" action of a command + // or via cmd.Run when cmd == helpCmd. So we have following possibilities + // + // 1 $ app + // 2 $ app help + // 3 $ app foo + // 4 $ app help foo + // 5 $ app foo help + // 6 $ app foo -h (with no other sub-commands nor flags defined) + + // Case 4. when executing a help command set the context to parent + // to allow resolution of subsequent args. This will transform + // $ app help foo + // to + // $ app foo + // which will then be handled as case 3 + if cCtx.Command.Name == helpName || cCtx.Command.Name == helpAlias { + cCtx = cCtx.parentContext + } + + // Case 4. $ app hello foo + // foo is the command for which help needs to be shown + if argsPresent { + return ShowCommandHelp(cCtx, firstArg) + } + + // Case 1 & 2 + // Special case when running help on main app itself as opposed to individual + // commands/subcommands + if cCtx.parentContext.App == nil { + _ = ShowAppHelp(cCtx) + return nil + } + + // Case 3, 5 + if (len(cCtx.Command.Subcommands) == 1 && !cCtx.Command.HideHelp && !cCtx.Command.HideHelpCommand) || + (len(cCtx.Command.Subcommands) == 0 && cCtx.Command.HideHelp) { + templ := cCtx.Command.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate + } + HelpPrinter(cCtx.App.Writer, templ, cCtx.Command) + return nil + } + + // Case 6, handling incorporated in the callee itself + return ShowSubcommandHelp(cCtx) + }, +} + +// Prints help for the App or Command +type helpPrinter func(w io.Writer, templ string, data interface{}) + +// Prints help for the App or Command with custom template function. +type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) + +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. +var HelpPrinter helpPrinter = printHelp + +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. +// +// In the default implementation, if the customFuncs argument contains a +// "wrapAt" key, which is a function which takes no arguments and returns +// an int, this int value will be used to produce a "wrap" function used +// by the default template to wrap long lines. +var HelpPrinterCustom helpPrinterCustom = printHelpCustom + +// VersionPrinter prints the version for the App +var VersionPrinter = printVersion + +// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. +func ShowAppHelpAndExit(c *Context, exitCode int) { + _ = ShowAppHelp(c) + os.Exit(exitCode) +} + +// ShowAppHelp is an action that displays the help. +func ShowAppHelp(cCtx *Context) error { + tpl := cCtx.App.CustomAppHelpTemplate + if tpl == "" { + tpl = AppHelpTemplate + } + + if cCtx.App.ExtraInfo == nil { + HelpPrinter(cCtx.App.Writer, tpl, cCtx.App) + return nil + } + + customAppData := func() map[string]interface{} { + return map[string]interface{}{ + "ExtraInfo": cCtx.App.ExtraInfo, + } + } + HelpPrinterCustom(cCtx.App.Writer, tpl, cCtx.App, customAppData()) + + return nil +} + +// DefaultAppComplete prints the list of subcommands as the default app completion method +func DefaultAppComplete(cCtx *Context) { + DefaultCompleteWithFlags(nil)(cCtx) +} + +func printCommandSuggestions(commands []*Command, writer io.Writer) { + for _, command := range commands { + if command.Hidden { + continue + } + if strings.HasSuffix(os.Getenv("SHELL"), "zsh") { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) + } + } else { + for _, name := range command.Names() { + _, _ = fmt.Fprintf(writer, "%s\n", name) + } + } + } +} + +func cliArgContains(flagName string) bool { + for _, name := range strings.Split(flagName, ",") { + name = strings.TrimSpace(name) + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 + } + flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + for _, a := range os.Args { + if a == flag { + return true + } + } + } + return false +} + +func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { + cur := strings.TrimPrefix(lastArg, "-") + cur = strings.TrimPrefix(cur, "-") + for _, flag := range flags { + if bflag, ok := flag.(*BoolFlag); ok && bflag.Hidden { + continue + } + for _, name := range flag.Names() { + name = strings.TrimSpace(name) + // this will get total count utf8 letters in flag name + count := utf8.RuneCountInString(name) + if count > 2 { + count = 2 // reuse this count to generate single - or -- in flag completion + } + // if flag name has more than one utf8 letter and last argument in cli has -- prefix then + // skip flag completion for short flags example -v or -x + if strings.HasPrefix(lastArg, "--") && count == 1 { + continue + } + // match if last argument matches this flag and it is not repeated + if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(name) { + flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) + _, _ = fmt.Fprintln(writer, flagCompletion) + } + } + } +} + +func DefaultCompleteWithFlags(cmd *Command) func(cCtx *Context) { + return func(cCtx *Context) { + var lastArg string + + // TODO: This shouldnt depend on os.Args rather it should + // depend on root arguments passed to App + if len(os.Args) > 2 { + lastArg = os.Args[len(os.Args)-2] + } + + if lastArg != "" { + if strings.HasPrefix(lastArg, "-") { + if cmd != nil { + printFlagSuggestions(lastArg, cmd.Flags, cCtx.App.Writer) + + return + } + + printFlagSuggestions(lastArg, cCtx.App.Flags, cCtx.App.Writer) + + return + } + } + + if cmd != nil { + printCommandSuggestions(cmd.Subcommands, cCtx.App.Writer) + return + } + + printCommandSuggestions(cCtx.Command.Subcommands, cCtx.App.Writer) + } +} + +// ShowCommandHelpAndExit - exits with code after showing help +func ShowCommandHelpAndExit(c *Context, command string, code int) { + _ = ShowCommandHelp(c, command) + os.Exit(code) +} + +// ShowCommandHelp prints help for the given command +func ShowCommandHelp(ctx *Context, command string) error { + + commands := ctx.App.Commands + if ctx.Command.Subcommands != nil { + commands = ctx.Command.Subcommands + } + for _, c := range commands { + if c.HasName(command) { + if !ctx.App.HideHelpCommand && !c.HasName(helpName) && len(c.Subcommands) != 0 && c.Command(helpName) == nil { + c.Subcommands = append(c.Subcommands, helpCommandDontUse) + } + if !ctx.App.HideHelp && HelpFlag != nil { + c.appendFlag(HelpFlag) + } + templ := c.CustomHelpTemplate + if templ == "" { + if len(c.Subcommands) == 0 { + templ = CommandHelpTemplate + } else { + templ = SubcommandHelpTemplate + } + } + + HelpPrinter(ctx.App.Writer, templ, c) + + return nil + } + } + + if ctx.App.CommandNotFound == nil { + errMsg := fmt.Sprintf("No help topic for '%v'", command) + if ctx.App.Suggest && SuggestCommand != nil { + if suggestion := SuggestCommand(ctx.Command.Subcommands, command); suggestion != "" { + errMsg += ". " + suggestion + } + } + return Exit(errMsg, 3) + } + + ctx.App.CommandNotFound(ctx, command) + return nil +} + +// ShowSubcommandHelpAndExit - Prints help for the given subcommand and exits with exit code. +func ShowSubcommandHelpAndExit(c *Context, exitCode int) { + _ = ShowSubcommandHelp(c) + os.Exit(exitCode) +} + +// ShowSubcommandHelp prints help for the given subcommand +func ShowSubcommandHelp(cCtx *Context) error { + if cCtx == nil { + return nil + } + // use custom template when provided (fixes #1703) + templ := SubcommandHelpTemplate + if cCtx.Command != nil && cCtx.Command.CustomHelpTemplate != "" { + templ = cCtx.Command.CustomHelpTemplate + } + HelpPrinter(cCtx.App.Writer, templ, cCtx.Command) + return nil +} + +// ShowVersion prints the version number of the App +func ShowVersion(cCtx *Context) { + VersionPrinter(cCtx) +} + +func printVersion(cCtx *Context) { + _, _ = fmt.Fprintf(cCtx.App.Writer, "%v version %v\n", cCtx.App.Name, cCtx.App.Version) +} + +// ShowCompletions prints the lists of commands within a given context +func ShowCompletions(cCtx *Context) { + c := cCtx.Command + if c != nil && c.BashComplete != nil { + c.BashComplete(cCtx) + } +} + +// ShowCommandCompletions prints the custom completions for a given command +func ShowCommandCompletions(ctx *Context, command string) { + c := ctx.Command.Command(command) + if c != nil { + if c.BashComplete != nil { + c.BashComplete(ctx) + } else { + DefaultCompleteWithFlags(c)(ctx) + } + } + +} + +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { + + const maxLineLength = 10000 + + funcMap := template.FuncMap{ + "join": strings.Join, + "subtract": subtract, + "indent": indent, + "nindent": nindent, + "trim": strings.TrimSpace, + "wrap": func(input string, offset int) string { return wrap(input, offset, maxLineLength) }, + "offset": offset, + "offsetCommands": offsetCommands, + } + + if customFuncs["wrapAt"] != nil { + if wa, ok := customFuncs["wrapAt"]; ok { + if waf, ok := wa.(func() int); ok { + wrapAt := waf() + customFuncs["wrap"] = func(input string, offset int) string { + return wrap(input, offset, wrapAt) + } + } + } + } + + for key, value := range customFuncs { + funcMap[key] = value + } + + w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) + t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) + templates := map[string]string{ + "helpNameTemplate": helpNameTemplate, + "usageTemplate": usageTemplate, + "descriptionTemplate": descriptionTemplate, + "visibleCommandTemplate": visibleCommandTemplate, + "copyrightTemplate": copyrightTemplate, + "versionTemplate": versionTemplate, + "visibleFlagCategoryTemplate": visibleFlagCategoryTemplate, + "visibleFlagTemplate": visibleFlagTemplate, + "visibleGlobalFlagCategoryTemplate": strings.Replace(visibleFlagCategoryTemplate, "OPTIONS", "GLOBAL OPTIONS", -1), + "authorsTemplate": authorsTemplate, + "visibleCommandCategoryTemplate": visibleCommandCategoryTemplate, + } + for name, value := range templates { + if _, err := t.New(name).Parse(value); err != nil { + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + } + } + + err := t.Execute(w, data) + if err != nil { + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. + if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { + _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) + } + return + } + _ = w.Flush() +} + +func printHelp(out io.Writer, templ string, data interface{}) { + HelpPrinterCustom(out, templ, data, nil) +} + +func checkVersion(cCtx *Context) bool { + found := false + for _, name := range VersionFlag.Names() { + if cCtx.Bool(name) { + found = true + } + } + return found +} + +func checkHelp(cCtx *Context) bool { + if HelpFlag == nil { + return false + } + found := false + for _, name := range HelpFlag.Names() { + if cCtx.Bool(name) { + found = true + break + } + } + + return found +} + +func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { + if !a.EnableBashCompletion { + return false, arguments + } + + pos := len(arguments) - 1 + lastArg := arguments[pos] + + if lastArg != "--generate-bash-completion" { + return false, arguments + } + + return true, arguments[:pos] +} + +func checkCompletions(cCtx *Context) bool { + if !cCtx.shellComplete { + return false + } + + if args := cCtx.Args(); args.Present() { + name := args.First() + if cmd := cCtx.Command.Command(name); cmd != nil { + // let the command handle the completion + return false + } + } + + ShowCompletions(cCtx) + return true +} + +func subtract(a, b int) int { + return a - b +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func wrap(input string, offset int, wrapAt int) string { + var ss []string + + lines := strings.Split(input, "\n") + + padding := strings.Repeat(" ", offset) + + for i, line := range lines { + if line == "" { + ss = append(ss, line) + } else { + wrapped := wrapLine(line, offset, wrapAt, padding) + if i == 0 { + ss = append(ss, wrapped) + } else { + ss = append(ss, padding+wrapped) + + } + + } + } + + return strings.Join(ss, "\n") +} + +func wrapLine(input string, offset int, wrapAt int, padding string) string { + if wrapAt <= offset || len(input) <= wrapAt-offset { + return input + } + + lineWidth := wrapAt - offset + words := strings.Fields(input) + if len(words) == 0 { + return input + } + + wrapped := words[0] + spaceLeft := lineWidth - len(wrapped) + for _, word := range words[1:] { + if len(word)+1 > spaceLeft { + wrapped += "\n" + padding + word + spaceLeft = lineWidth - len(word) + } else { + wrapped += " " + word + spaceLeft -= 1 + len(word) + } + } + + return wrapped +} + +func offset(input string, fixed int) int { + return len(input) + fixed +} + +// this function tries to find the max width of the names column +// so say we have the following rows for help +// +// foo1, foo2, foo3 some string here +// bar1, b2 some other string here +// +// We want to offset the 2nd row usage by some amount so that everything +// is aligned +// +// foo1, foo2, foo3 some string here +// bar1, b2 some other string here +// +// to find that offset we find the length of all the rows and use the max +// to calculate the offset +func offsetCommands(cmds []*Command, fixed int) int { + var max int = 0 + for _, cmd := range cmds { + s := strings.Join(cmd.Names(), ", ") + if len(s) > max { + max = len(s) + } + } + return max + fixed +} diff --git a/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt b/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt new file mode 100644 index 000000000..482ad0622 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/mkdocs-reqs.txt @@ -0,0 +1,5 @@ +mkdocs-git-revision-date-localized-plugin~=1.0 +mkdocs-material-extensions~=1.0 +mkdocs-material~=8.2 +mkdocs~=1.3 +pygments~=2.12 diff --git a/vendor/github.com/urfave/cli/v2/mkdocs.yml b/vendor/github.com/urfave/cli/v2/mkdocs.yml new file mode 100644 index 000000000..f7bfd419e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/mkdocs.yml @@ -0,0 +1,107 @@ +# NOTE: the mkdocs dependencies will need to be installed out of +# band until this whole thing gets more automated: +# +# pip install -r mkdocs-reqs.txt +# + +site_name: urfave/cli +site_url: https://cli.urfave.org/ +repo_url: https://github.com/urfave/cli +edit_uri: edit/main/docs/ +nav: + - Home: + - Welcome: index.md + - Contributing: CONTRIBUTING.md + - Code of Conduct: CODE_OF_CONDUCT.md + - Releasing: RELEASING.md + - Security: SECURITY.md + - Migrate v1 to v2: migrate-v1-to-v2.md + - v2 Manual: + - Getting Started: v2/getting-started.md + - Migrating From Older Releases: v2/migrating-from-older-releases.md + - Examples: + - Greet: v2/examples/greet.md + - Arguments: v2/examples/arguments.md + - Flags: v2/examples/flags.md + - Subcommands: v2/examples/subcommands.md + - Subcommands Categories: v2/examples/subcommands-categories.md + - Exit Codes: v2/examples/exit-codes.md + - Combining Short Options: v2/examples/combining-short-options.md + - Bash Completions: v2/examples/bash-completions.md + - Generated Help Text: v2/examples/generated-help-text.md + - Version Flag: v2/examples/version-flag.md + - Timestamp Flag: v2/examples/timestamp-flag.md + - Suggestions: v2/examples/suggestions.md + - Full API Example: v2/examples/full-api-example.md + - v1 Manual: + - Getting Started: v1/getting-started.md + - Migrating to v2: v1/migrating-to-v2.md + - Examples: + - Greet: v1/examples/greet.md + - Arguments: v1/examples/arguments.md + - Flags: v1/examples/flags.md + - Subcommands: v1/examples/subcommands.md + - Subcommands (Categories): v1/examples/subcommands-categories.md + - Exit Codes: v1/examples/exit-codes.md + - Combining Short Options: v1/examples/combining-short-options.md + - Bash Completions: v1/examples/bash-completions.md + - Generated Help Text: v1/examples/generated-help-text.md + - Version Flag: v1/examples/version-flag.md + +theme: + name: material + palette: + - media: "(prefers-color-scheme: light)" + scheme: default + toggle: + icon: material/brightness-4 + name: dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + toggle: + icon: material/brightness-7 + name: light mode + features: + - content.code.annotate + - navigation.top + - navigation.instant + - navigation.expand + - navigation.sections + - navigation.tabs + - navigation.tabs.sticky +plugins: + - git-revision-date-localized + - search + - tags +# NOTE: this is the recommended configuration from +# https://squidfunk.github.io/mkdocs-material/setup/extensions/#recommended-configuration +markdown_extensions: + - abbr + - admonition + - attr_list + - def_list + - footnotes + - meta + - md_in_html + - toc: + permalink: true + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - pymdownx.highlight + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde diff --git a/vendor/github.com/urfave/cli/v2/parse.go b/vendor/github.com/urfave/cli/v2/parse.go new file mode 100644 index 000000000..d79f15a18 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/parse.go @@ -0,0 +1,102 @@ +package cli + +import ( + "flag" + "strings" +) + +type iterativeParser interface { + newFlagSet() (*flag.FlagSet, error) + useShortOptionHandling() bool +} + +// To enable short-option handling (e.g., "-it" vs "-i -t") we have to +// iteratively catch parsing errors. This way we achieve LR parsing without +// transforming any arguments. Otherwise, there is no way we can discriminate +// combined short options from common arguments that should be left untouched. +// Pass `shellComplete` to continue parsing options on failure during shell +// completion when, the user-supplied options may be incomplete. +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { + for { + err := set.Parse(args) + if !ip.useShortOptionHandling() || err == nil { + if shellComplete { + return nil + } + return err + } + + trimmed, trimErr := flagFromError(err) + if trimErr != nil { + return err + } + + // regenerate the initial args with the split short opts + argsWereSplit := false + for i, arg := range args { + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { + continue + } + + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) + if len(shortOpts) == 1 { + return err + } + + // swap current argument with the split version + // do not include args that parsed correctly so far as it would + // trigger Value.Set() on those args and would result in + // duplicates for slice type flags + args = append(shortOpts, args[i+1:]...) + argsWereSplit = true + break + } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + } +} + +const providedButNotDefinedErrMsg = "flag provided but not defined: -" + +// flagFromError tries to parse a provided flag from an error message. If the +// parsing fials, it returns the input error and an empty string +func flagFromError(err error) (string, error) { + errStr := err.Error() + trimmed := strings.TrimPrefix(errStr, providedButNotDefinedErrMsg) + if errStr == trimmed { + return "", err + } + return trimmed, nil +} + +func splitShortOptions(set *flag.FlagSet, arg string) []string { + shortFlagsExist := func(s string) bool { + for _, c := range s[1:] { + if f := set.Lookup(string(c)); f == nil { + return false + } + } + return true + } + + if !isSplittable(arg) || !shortFlagsExist(arg) { + return []string{arg} + } + + separated := make([]string, 0, len(arg)-1) + for _, flagChar := range arg[1:] { + separated = append(separated, "-"+string(flagChar)) + } + + return separated +} + +func isSplittable(flagArg string) bool { + return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 +} diff --git a/vendor/github.com/urfave/cli/v2/sliceflag.go b/vendor/github.com/urfave/cli/v2/sliceflag.go new file mode 100644 index 000000000..b2ca59041 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sliceflag.go @@ -0,0 +1,290 @@ +package cli + +import ( + "flag" + "reflect" +) + +type ( + // SliceFlag extends implementations like StringSliceFlag and IntSliceFlag with support for using slices directly, + // as Value and/or Destination. + // See also SliceFlagTarget, MultiStringFlag, MultiFloat64Flag, MultiInt64Flag, MultiIntFlag. + SliceFlag[T SliceFlagTarget[E], S ~[]E, E any] struct { + Target T + Value S + Destination *S + } + + // SliceFlagTarget models a target implementation for use with SliceFlag. + // The three methods, SetValue, SetDestination, and GetDestination, are necessary to propagate Value and + // Destination, where Value is propagated inwards (initially), and Destination is propagated outwards (on every + // update). + SliceFlagTarget[E any] interface { + Flag + RequiredFlag + DocGenerationFlag + VisibleFlag + CategorizableFlag + + // SetValue should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~[]E). + SetValue(slice []E) + // SetDestination should propagate the given slice to the target, ideally as a new value. + // Note that a nil slice should nil/clear any existing value (modelled as ~*[]E). + SetDestination(slice []E) + // GetDestination should return the current value referenced by any destination, or nil if nil/unset. + GetDestination() []E + } + + // MultiStringFlag extends StringSliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiStringFlag = SliceFlag[*StringSliceFlag, []string, string] + + // MultiFloat64Flag extends Float64SliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiFloat64Flag = SliceFlag[*Float64SliceFlag, []float64, float64] + + // MultiInt64Flag extends Int64SliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiInt64Flag = SliceFlag[*Int64SliceFlag, []int64, int64] + + // MultiIntFlag extends IntSliceFlag with support for using slices directly, as Value and/or Destination. + // See also SliceFlag. + MultiIntFlag = SliceFlag[*IntSliceFlag, []int, int] + + flagValueHook struct { + value Generic + hook func() + } +) + +var ( + // compile time assertions + + _ SliceFlagTarget[string] = (*StringSliceFlag)(nil) + _ SliceFlagTarget[string] = (*SliceFlag[*StringSliceFlag, []string, string])(nil) + _ SliceFlagTarget[string] = (*MultiStringFlag)(nil) + _ SliceFlagTarget[float64] = (*MultiFloat64Flag)(nil) + _ SliceFlagTarget[int64] = (*MultiInt64Flag)(nil) + _ SliceFlagTarget[int] = (*MultiIntFlag)(nil) + + _ Generic = (*flagValueHook)(nil) + _ Serializer = (*flagValueHook)(nil) +) + +func (x *SliceFlag[T, S, E]) Apply(set *flag.FlagSet) error { + x.Target.SetValue(x.convertSlice(x.Value)) + + destination := x.Destination + if destination == nil { + x.Target.SetDestination(nil) + + return x.Target.Apply(set) + } + + x.Target.SetDestination(x.convertSlice(*destination)) + + return applyFlagValueHook(set, x.Target.Apply, func() { + *destination = x.Target.GetDestination() + }) +} + +func (x *SliceFlag[T, S, E]) convertSlice(slice S) []E { + result := make([]E, len(slice)) + copy(result, slice) + return result +} + +func (x *SliceFlag[T, S, E]) SetValue(slice S) { + x.Value = slice +} + +func (x *SliceFlag[T, S, E]) SetDestination(slice S) { + if slice != nil { + x.Destination = &slice + } else { + x.Destination = nil + } +} + +func (x *SliceFlag[T, S, E]) GetDestination() S { + if destination := x.Destination; destination != nil { + return *destination + } + return nil +} + +func (x *SliceFlag[T, S, E]) String() string { return x.Target.String() } +func (x *SliceFlag[T, S, E]) Names() []string { return x.Target.Names() } +func (x *SliceFlag[T, S, E]) IsSet() bool { return x.Target.IsSet() } +func (x *SliceFlag[T, S, E]) IsRequired() bool { return x.Target.IsRequired() } +func (x *SliceFlag[T, S, E]) TakesValue() bool { return x.Target.TakesValue() } +func (x *SliceFlag[T, S, E]) GetUsage() string { return x.Target.GetUsage() } +func (x *SliceFlag[T, S, E]) GetValue() string { return x.Target.GetValue() } +func (x *SliceFlag[T, S, E]) GetDefaultText() string { return x.Target.GetDefaultText() } +func (x *SliceFlag[T, S, E]) GetEnvVars() []string { return x.Target.GetEnvVars() } +func (x *SliceFlag[T, S, E]) IsVisible() bool { return x.Target.IsVisible() } +func (x *SliceFlag[T, S, E]) GetCategory() string { return x.Target.GetCategory() } + +func (x *flagValueHook) Set(value string) error { + if err := x.value.Set(value); err != nil { + return err + } + x.hook() + return nil +} + +func (x *flagValueHook) String() string { + // note: this is necessary due to the way Go's flag package handles defaults + isZeroValue := func(f flag.Value, v string) bool { + /* + https://cs.opensource.google/go/go/+/refs/tags/go1.18.3:src/flag/flag.go;drc=2580d0e08d5e9f979b943758d3c49877fb2324cb;l=453 + + Copyright (c) 2009 The Go Authors. All rights reserved. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + // Build a zero value of the flag's Value type, and see if the + // result of calling its String method equals the value passed in. + // This works unless the Value type is itself an interface type. + typ := reflect.TypeOf(f) + var z reflect.Value + if typ.Kind() == reflect.Pointer { + z = reflect.New(typ.Elem()) + } else { + z = reflect.Zero(typ) + } + return v == z.Interface().(flag.Value).String() + } + if x.value != nil { + // only return non-empty if not the same string as returned by the zero value + if s := x.value.String(); !isZeroValue(x.value, s) { + return s + } + } + return `` +} + +func (x *flagValueHook) Serialize() string { + if value, ok := x.value.(Serializer); ok { + return value.Serialize() + } + return x.String() +} + +// applyFlagValueHook wraps calls apply then wraps flags to call a hook function on update and after initial apply. +func applyFlagValueHook(set *flag.FlagSet, apply func(set *flag.FlagSet) error, hook func()) error { + if apply == nil || set == nil || hook == nil { + panic(`invalid input`) + } + var tmp flag.FlagSet + if err := apply(&tmp); err != nil { + return err + } + tmp.VisitAll(func(f *flag.Flag) { set.Var(&flagValueHook{value: f.Value, hook: hook}, f.Name, f.Usage) }) + hook() + return nil +} + +// newSliceFlagValue is for implementing SliceFlagTarget.SetValue and SliceFlagTarget.SetDestination. +// It's e.g. as part of StringSliceFlag.SetValue, using the factory NewStringSlice. +func newSliceFlagValue[R any, S ~[]E, E any](factory func(defaults ...E) *R, defaults S) *R { + if defaults == nil { + return nil + } + return factory(defaults...) +} + +// unwrapFlagValue strips any/all *flagValueHook wrappers. +func unwrapFlagValue(v flag.Value) flag.Value { + for { + h, ok := v.(*flagValueHook) + if !ok { + return v + } + v = h.value + } +} + +// NOTE: the methods below are in this file to make use of the build constraint + +func (f *Float64SliceFlag) SetValue(slice []float64) { + f.Value = newSliceFlagValue(NewFloat64Slice, slice) +} + +func (f *Float64SliceFlag) SetDestination(slice []float64) { + f.Destination = newSliceFlagValue(NewFloat64Slice, slice) +} + +func (f *Float64SliceFlag) GetDestination() []float64 { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *Int64SliceFlag) SetValue(slice []int64) { + f.Value = newSliceFlagValue(NewInt64Slice, slice) +} + +func (f *Int64SliceFlag) SetDestination(slice []int64) { + f.Destination = newSliceFlagValue(NewInt64Slice, slice) +} + +func (f *Int64SliceFlag) GetDestination() []int64 { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *IntSliceFlag) SetValue(slice []int) { + f.Value = newSliceFlagValue(NewIntSlice, slice) +} + +func (f *IntSliceFlag) SetDestination(slice []int) { + f.Destination = newSliceFlagValue(NewIntSlice, slice) +} + +func (f *IntSliceFlag) GetDestination() []int { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} + +func (f *StringSliceFlag) SetValue(slice []string) { + f.Value = newSliceFlagValue(NewStringSlice, slice) +} + +func (f *StringSliceFlag) SetDestination(slice []string) { + f.Destination = newSliceFlagValue(NewStringSlice, slice) +} + +func (f *StringSliceFlag) GetDestination() []string { + if destination := f.Destination; destination != nil { + return destination.Value() + } + return nil +} diff --git a/vendor/github.com/urfave/cli/v2/sort.go b/vendor/github.com/urfave/cli/v2/sort.go new file mode 100644 index 000000000..23d1c2f77 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/sort.go @@ -0,0 +1,29 @@ +package cli + +import "unicode" + +// lexicographicLess compares strings alphabetically considering case. +func lexicographicLess(i, j string) bool { + iRunes := []rune(i) + jRunes := []rune(j) + + lenShared := len(iRunes) + if lenShared > len(jRunes) { + lenShared = len(jRunes) + } + + for index := 0; index < lenShared; index++ { + ir := iRunes[index] + jr := jRunes[index] + + if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { + return lir < ljr + } + + if ir != jr { + return ir < jr + } + } + + return i < j +} diff --git a/vendor/github.com/urfave/cli/v2/suggestions.go b/vendor/github.com/urfave/cli/v2/suggestions.go new file mode 100644 index 000000000..9d2b7a81e --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/suggestions.go @@ -0,0 +1,68 @@ +//go:build !urfave_cli_no_suggest +// +build !urfave_cli_no_suggest + +package cli + +import ( + "fmt" + + "github.com/xrash/smetrics" +) + +func init() { + SuggestFlag = suggestFlag + SuggestCommand = suggestCommand +} + +func jaroWinkler(a, b string) float64 { + // magic values are from https://github.com/xrash/smetrics/blob/039620a656736e6ad994090895784a7af15e0b80/jaro-winkler.go#L8 + const ( + boostThreshold = 0.7 + prefixSize = 4 + ) + return smetrics.JaroWinkler(a, b, boostThreshold, prefixSize) +} + +func suggestFlag(flags []Flag, provided string, hideHelp bool) string { + distance := 0.0 + suggestion := "" + + for _, flag := range flags { + flagNames := flag.Names() + if !hideHelp && HelpFlag != nil { + flagNames = append(flagNames, HelpFlag.Names()...) + } + for _, name := range flagNames { + newDistance := jaroWinkler(name, provided) + if newDistance > distance { + distance = newDistance + suggestion = name + } + } + } + + if len(suggestion) == 1 { + suggestion = "-" + suggestion + } else if len(suggestion) > 1 { + suggestion = "--" + suggestion + } + + return suggestion +} + +// suggestCommand takes a list of commands and a provided string to suggest a +// command name +func suggestCommand(commands []*Command, provided string) (suggestion string) { + distance := 0.0 + for _, command := range commands { + for _, name := range append(command.Names(), helpName, helpAlias) { + newDistance := jaroWinkler(name, provided) + if newDistance > distance { + distance = newDistance + suggestion = name + } + } + } + + return fmt.Sprintf(SuggestDidYouMeanTemplate, suggestion) +} diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go new file mode 100644 index 000000000..5748f4c20 --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -0,0 +1,143 @@ +package cli + +var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}` +var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}` +var descriptionTemplate = `{{wrap .Description 3}}` +var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: + {{range $index, $author := .Authors}}{{if $index}} + {{end}}{{$author}}{{end}}` +var visibleCommandTemplate = `{{ $cv := offsetCommands .VisibleCommands 5}}{{range .VisibleCommands}} + {{$s := join .Names ", "}}{{$s}}{{ $sp := subtract $cv (offset $s 3) }}{{ indent $sp ""}}{{wrap .Usage $cv}}{{end}}` +var visibleCommandCategoryTemplate = `{{range .VisibleCategories}}{{if .Name}} + {{.Name}}:{{range .VisibleCommands}} + {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{template "visibleCommandTemplate" .}}{{end}}{{end}}` +var visibleFlagCategoryTemplate = `{{range .VisibleFlagCategories}} + {{if .Name}}{{.Name}} + + {{end}}{{$flglen := len .Flags}}{{range $i, $e := .Flags}}{{if eq (subtract $flglen $i) 1}}{{$e}} +{{else}}{{$e}} + {{end}}{{end}}{{end}}` + +var visibleFlagTemplate = `{{range $i, $e := .VisibleFlags}} + {{wrap $e.String 6}}{{end}}` + +var versionTemplate = `{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}` + +var copyrightTemplate = `{{wrap .Copyright 3}}` + +// AppHelpTemplate is the text template for the Default help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var AppHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + +VERSION: + {{.Version}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}} +{{- if len .Authors}} + +AUTHOR{{template "authorsTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +GLOBAL OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +GLOBAL OPTIONS:{{template "visibleFlagTemplate" .}}{{end}}{{if .Copyright}} + +COPYRIGHT: + {{template "copyrightTemplate" .}}{{end}} +` + +// CommandHelpTemplate is the text template for the command help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var CommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{template "usageTemplate" .}}{{if .Category}} + +CATEGORY: + {{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + +// SubcommandHelpTemplate is the text template for the subcommand help topic. +// cli.go uses text/template to render templates. You can +// render custom help text by setting this variable. +var SubcommandHelpTemplate = `NAME: + {{template "helpNameTemplate" .}} + +USAGE: + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} + +DESCRIPTION: + {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} + +COMMANDS:{{template "visibleCommandCategoryTemplate" .}}{{end}}{{if .VisibleFlagCategories}} + +OPTIONS:{{template "visibleFlagCategoryTemplate" .}}{{else if .VisibleFlags}} + +OPTIONS:{{template "visibleFlagTemplate" .}}{{end}} +` + +var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }} + +{{end}}# NAME + +{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} + +# SYNOPSIS + +{{ .App.Name }} +{{ if .SynopsisArgs }} +` + "```" + ` +{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` +{{ end }}{{ if .App.Description }} +# DESCRIPTION + +{{ .App.Description }} +{{ end }} +**Usage**: + +` + "```" + `{{ if .App.UsageText }} +{{ .App.UsageText }} +{{ else }} +{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] +{{ end }}` + "```" + ` +{{ if .GlobalArgs }} +# GLOBAL OPTIONS +{{ range $v := .GlobalArgs }} +{{ $v }}{{ end }} +{{ end }}{{ if .Commands }} +# COMMANDS +{{ range $v := .Commands }} +{{ $v }}{{ end }}{{ end }}` + +var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion + +function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' + for i in (commandline -opc) + if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} + return 1 + end + end + return 0 +end + +{{ range $v := .Completions }}{{ $v }} +{{ end }}` diff --git a/vendor/github.com/urfave/cli/v2/zz_generated.flags.go b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go new file mode 100644 index 000000000..f2fc8c88b --- /dev/null +++ b/vendor/github.com/urfave/cli/v2/zz_generated.flags.go @@ -0,0 +1,865 @@ +// WARNING: this file is generated. DO NOT EDIT + +package cli + +import "time" + +// Float64SliceFlag is a flag with type *Float64Slice +type Float64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Float64Slice + Destination *Float64Slice + + Aliases []string + EnvVars []string + + defaultValue *Float64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []float64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Float64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Float64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// GenericFlag is a flag with type Generic +type GenericFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Generic + Destination Generic + + Aliases []string + EnvVars []string + + defaultValue Generic + defaultValueSet bool + + TakesFile bool + + Action func(*Context, interface{}) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *GenericFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *GenericFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *GenericFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *GenericFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *GenericFlag) IsVisible() bool { + return !f.Hidden +} + +// Int64SliceFlag is a flag with type *Int64Slice +type Int64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Int64Slice + Destination *Int64Slice + + Aliases []string + EnvVars []string + + defaultValue *Int64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []int64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Int64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Int64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// IntSliceFlag is a flag with type *IntSlice +type IntSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *IntSlice + Destination *IntSlice + + Aliases []string + EnvVars []string + + defaultValue *IntSlice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []int) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *IntSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *IntSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// PathFlag is a flag with type Path +type PathFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value Path + Destination *Path + + Aliases []string + EnvVars []string + + defaultValue Path + defaultValueSet bool + + TakesFile bool + + Action func(*Context, Path) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *PathFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *PathFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *PathFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *PathFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *PathFlag) IsVisible() bool { + return !f.Hidden +} + +// StringSliceFlag is a flag with type *StringSlice +type StringSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *StringSlice + Destination *StringSlice + + Aliases []string + EnvVars []string + + defaultValue *StringSlice + defaultValueSet bool + + separator separatorSpec + + TakesFile bool + + Action func(*Context, []string) error + + KeepSpace bool +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *StringSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *StringSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// TimestampFlag is a flag with type *Timestamp +type TimestampFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Timestamp + Destination *Timestamp + + Aliases []string + EnvVars []string + + defaultValue *Timestamp + defaultValueSet bool + + Layout string + + Timezone *time.Location + + Action func(*Context, *time.Time) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *TimestampFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *TimestampFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *TimestampFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *TimestampFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *TimestampFlag) IsVisible() bool { + return !f.Hidden +} + +// Uint64SliceFlag is a flag with type *Uint64Slice +type Uint64SliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *Uint64Slice + Destination *Uint64Slice + + Aliases []string + EnvVars []string + + defaultValue *Uint64Slice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []uint64) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64SliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Uint64SliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64SliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Uint64SliceFlag) IsVisible() bool { + return !f.Hidden +} + +// UintSliceFlag is a flag with type *UintSlice +type UintSliceFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value *UintSlice + Destination *UintSlice + + Aliases []string + EnvVars []string + + defaultValue *UintSlice + defaultValueSet bool + + separator separatorSpec + + Action func(*Context, []uint) error +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintSliceFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *UintSliceFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintSliceFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *UintSliceFlag) IsVisible() bool { + return !f.Hidden +} + +// BoolFlag is a flag with type bool +type BoolFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value bool + Destination *bool + + Aliases []string + EnvVars []string + + defaultValue bool + defaultValueSet bool + + Count *int + + DisableDefaultText bool + + Action func(*Context, bool) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *BoolFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *BoolFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *BoolFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *BoolFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *BoolFlag) IsVisible() bool { + return !f.Hidden +} + +// Float64Flag is a flag with type float64 +type Float64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value float64 + Destination *float64 + + Aliases []string + EnvVars []string + + defaultValue float64 + defaultValueSet bool + + Action func(*Context, float64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Float64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Float64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Float64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Float64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Float64Flag) IsVisible() bool { + return !f.Hidden +} + +// IntFlag is a flag with type int +type IntFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int + Destination *int + + Aliases []string + EnvVars []string + + defaultValue int + defaultValueSet bool + + Base int + + Action func(*Context, int) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *IntFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *IntFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *IntFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *IntFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *IntFlag) IsVisible() bool { + return !f.Hidden +} + +// Int64Flag is a flag with type int64 +type Int64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value int64 + Destination *int64 + + Aliases []string + EnvVars []string + + defaultValue int64 + defaultValueSet bool + + Base int + + Action func(*Context, int64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Int64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Int64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Int64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Int64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Int64Flag) IsVisible() bool { + return !f.Hidden +} + +// StringFlag is a flag with type string +type StringFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value string + Destination *string + + Aliases []string + EnvVars []string + + defaultValue string + defaultValueSet bool + + TakesFile bool + + Action func(*Context, string) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *StringFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *StringFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *StringFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *StringFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *StringFlag) IsVisible() bool { + return !f.Hidden +} + +// DurationFlag is a flag with type time.Duration +type DurationFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value time.Duration + Destination *time.Duration + + Aliases []string + EnvVars []string + + defaultValue time.Duration + defaultValueSet bool + + Action func(*Context, time.Duration) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *DurationFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *DurationFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *DurationFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *DurationFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *DurationFlag) IsVisible() bool { + return !f.Hidden +} + +// UintFlag is a flag with type uint +type UintFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint + Destination *uint + + Aliases []string + EnvVars []string + + defaultValue uint + defaultValueSet bool + + Base int + + Action func(*Context, uint) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *UintFlag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *UintFlag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *UintFlag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *UintFlag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *UintFlag) IsVisible() bool { + return !f.Hidden +} + +// Uint64Flag is a flag with type uint64 +type Uint64Flag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + Required bool + Hidden bool + HasBeenSet bool + + Value uint64 + Destination *uint64 + + Aliases []string + EnvVars []string + + defaultValue uint64 + defaultValueSet bool + + Base int + + Action func(*Context, uint64) error +} + +// String returns a readable representation of this value (for usage defaults) +func (f *Uint64Flag) String() string { + return FlagStringer(f) +} + +// IsSet returns whether or not the flag has been set through env or file +func (f *Uint64Flag) IsSet() bool { + return f.HasBeenSet +} + +// Names returns the names of the flag +func (f *Uint64Flag) Names() []string { + return FlagNames(f.Name, f.Aliases) +} + +// IsRequired returns whether or not the flag is required +func (f *Uint64Flag) IsRequired() bool { + return f.Required +} + +// IsVisible returns true if the flag is not hidden, otherwise false +func (f *Uint64Flag) IsVisible() bool { + return !f.Hidden +} + +// vim:ro diff --git a/vendor/github.com/vishvananda/netlink/.gitignore b/vendor/github.com/vishvananda/netlink/.gitignore new file mode 100644 index 000000000..66f8fb502 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/.gitignore @@ -0,0 +1,2 @@ +.idea/ +.vscode/ diff --git a/vendor/github.com/vishvananda/netlink/CHANGELOG.md b/vendor/github.com/vishvananda/netlink/CHANGELOG.md new file mode 100644 index 000000000..b11e59ff6 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## 1.0.0 (2018-03-15) + +Initial release tagging \ No newline at end of file diff --git a/vendor/github.com/vishvananda/netlink/LICENSE b/vendor/github.com/vishvananda/netlink/LICENSE new file mode 100644 index 000000000..9f64db858 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vishvananda/netlink/Makefile b/vendor/github.com/vishvananda/netlink/Makefile new file mode 100644 index 000000000..a0e68e7a9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/Makefile @@ -0,0 +1,30 @@ +DIRS := \ + . \ + nl + +DEPS = \ + github.com/vishvananda/netns \ + golang.org/x/sys/unix + +uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) +testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) +goroot = $(addprefix ../../../,$(1)) +unroot = $(subst ../../../,,$(1)) +fmt = $(addprefix fmt-,$(1)) + +all: test + +$(call goroot,$(DEPS)): + go get $(call unroot,$@) + +.PHONY: $(call testdirs,$(DIRS)) +$(call testdirs,$(DIRS)): + go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@ + +$(call fmt,$(call testdirs,$(DIRS))): + ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q . + +.PHONY: fmt +fmt: $(call fmt,$(call testdirs,$(DIRS))) + +test: fmt $(call goroot,$(DEPS)) $(call testdirs,$(DIRS)) diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md new file mode 100644 index 000000000..0128bc67d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/README.md @@ -0,0 +1,92 @@ +# netlink - netlink library for go # + +![Build Status](https://github.com/vishvananda/netlink/actions/workflows/main.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/vishvananda/netlink?status.svg)](https://godoc.org/github.com/vishvananda/netlink) + +The netlink package provides a simple netlink library for go. Netlink +is the interface a user-space program in linux uses to communicate with +the kernel. It can be used to add and remove interfaces, set ip addresses +and routes, and configure ipsec. Netlink communication requires elevated +privileges, so in most cases this code needs to be run as root. Since +low-level netlink messages are inscrutable at best, the library attempts +to provide an api that is loosely modeled on the CLI provided by iproute2. +Actions like `ip link add` will be accomplished via a similarly named +function like AddLink(). This library began its life as a fork of the +netlink functionality in +[docker/libcontainer](https://github.com/docker/libcontainer) but was +heavily rewritten to improve testability, performance, and to add new +functionality like ipsec xfrm handling. + +## Local Build and Test ## + +You can use go get command: + + go get github.com/vishvananda/netlink + +Testing dependencies: + + go get github.com/vishvananda/netns + +Testing (requires root): + + sudo -E go test github.com/vishvananda/netlink + +## Examples ## + +Add a new bridge and add eth1 into it: + +```go +package main + +import ( + "fmt" + "github.com/vishvananda/netlink" +) + +func main() { + la := netlink.NewLinkAttrs() + la.Name = "foo" + mybridge := &netlink.Bridge{LinkAttrs: la} + err := netlink.LinkAdd(mybridge) + if err != nil { + fmt.Printf("could not add %s: %v\n", la.Name, err) + } + eth1, _ := netlink.LinkByName("eth1") + netlink.LinkSetMaster(eth1, mybridge) +} + +``` +Note `NewLinkAttrs` constructor, it sets default values in structure. For now +it sets only `TxQLen` to `-1`, so kernel will set default by itself. If you're +using simple initialization(`LinkAttrs{Name: "foo"}`) `TxQLen` will be set to +`0` unless you specify it like `LinkAttrs{Name: "foo", TxQLen: 1000}`. + +Add a new ip address to loopback: + +```go +package main + +import ( + "github.com/vishvananda/netlink" +) + +func main() { + lo, _ := netlink.LinkByName("lo") + addr, _ := netlink.ParseAddr("169.254.169.254/32") + netlink.AddrAdd(lo, addr) +} + +``` + +## Future Work ## + +Many pieces of netlink are not yet fully supported in the high-level +interface. Aspects of virtually all of the high-level objects don't exist. +Many of the underlying primitives are there, so its a matter of putting +the right fields into the high-level objects and making sure that they +are serialized and deserialized correctly in the Add and List methods. + +There are also a few pieces of low level netlink functionality that still +need to be implemented. Routing rules are not in place and some of the +more advanced link types. Hopefully there is decent structure and testing +in place to make these fairly straightforward to add. + diff --git a/vendor/github.com/vishvananda/netlink/addr.go b/vendor/github.com/vishvananda/netlink/addr.go new file mode 100644 index 000000000..653f540db --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/addr.go @@ -0,0 +1,57 @@ +package netlink + +import ( + "fmt" + "net" + "strings" +) + +// Addr represents an IP address from netlink. Netlink ip addresses +// include a mask, so it stores the address as a net.IPNet. +type Addr struct { + *net.IPNet + Label string + Flags int + Scope int + Peer *net.IPNet + Broadcast net.IP + PreferedLft int + ValidLft int + LinkIndex int +} + +// String returns $ip/$netmask $label +func (a Addr) String() string { + return strings.TrimSpace(fmt.Sprintf("%s %s", a.IPNet, a.Label)) +} + +// ParseAddr parses the string representation of an address in the +// form $ip/$netmask $label. The label portion is optional +func ParseAddr(s string) (*Addr, error) { + label := "" + parts := strings.Split(s, " ") + if len(parts) > 1 { + s = parts[0] + label = parts[1] + } + m, err := ParseIPNet(s) + if err != nil { + return nil, err + } + return &Addr{IPNet: m, Label: label}, nil +} + +// Equal returns true if both Addrs have the same net.IPNet value. +func (a Addr) Equal(x Addr) bool { + sizea, _ := a.Mask.Size() + sizeb, _ := x.Mask.Size() + // ignore label for comparison + return a.IP.Equal(x.IP) && sizea == sizeb +} + +func (a Addr) PeerEqual(x Addr) bool { + sizea, _ := a.Peer.Mask.Size() + sizeb, _ := x.Peer.Mask.Size() + // ignore label for comparison + return a.Peer.IP.Equal(x.Peer.IP) && sizea == sizeb +} diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go new file mode 100644 index 000000000..218ab2379 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -0,0 +1,425 @@ +package netlink + +import ( + "fmt" + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +// AddrAdd will add an IP address to a link device. +// +// Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func AddrAdd(link Link, addr *Addr) error { + return pkgHandle.AddrAdd(link, addr) +} + +// AddrAdd will add an IP address to a link device. +// +// Equivalent to: `ip addr add $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func (h *Handle) AddrAdd(link Link, addr *Addr) error { + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +// AddrReplace will replace (or, if not present, add) an IP address on a link device. +// +// Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func AddrReplace(link Link, addr *Addr) error { + return pkgHandle.AddrReplace(link, addr) +} + +// AddrReplace will replace (or, if not present, add) an IP address on a link device. +// +// Equivalent to: `ip addr replace $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func (h *Handle) AddrReplace(link Link, addr *Addr) error { + req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +// AddrDel will delete an IP address from a link device. +// +// Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func AddrDel(link Link, addr *Addr) error { + return pkgHandle.AddrDel(link, addr) +} + +// AddrDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +// +// If `addr` is an IPv4 address and the broadcast address is not given, it +// will be automatically computed based on the IP mask if /30 or larger. +func (h *Handle) AddrDel(link Link, addr *Addr) error { + req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error { + family := nl.GetIPFamily(addr.IP) + msg := nl.NewIfAddrmsg(family) + msg.Scope = uint8(addr.Scope) + if link == nil { + msg.Index = uint32(addr.LinkIndex) + } else { + base := link.Attrs() + if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { + return fmt.Errorf("label must begin with interface name") + } + h.ensureIndex(base) + msg.Index = uint32(base.Index) + } + mask := addr.Mask + if addr.Peer != nil { + mask = addr.Peer.Mask + } + prefixlen, masklen := mask.Size() + msg.Prefixlen = uint8(prefixlen) + req.AddData(msg) + + var localAddrData []byte + if family == FAMILY_V4 { + localAddrData = addr.IP.To4() + } else { + localAddrData = addr.IP.To16() + } + + localData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData) + req.AddData(localData) + var peerAddrData []byte + if addr.Peer != nil { + if family == FAMILY_V4 { + peerAddrData = addr.Peer.IP.To4() + } else { + peerAddrData = addr.Peer.IP.To16() + } + } else { + peerAddrData = localAddrData + } + + addressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData) + req.AddData(addressData) + + if addr.Flags != 0 { + if addr.Flags <= 0xff { + msg.IfAddrmsg.Flags = uint8(addr.Flags) + } else { + b := make([]byte, 4) + native.PutUint32(b, uint32(addr.Flags)) + flagsData := nl.NewRtAttr(unix.IFA_FLAGS, b) + req.AddData(flagsData) + } + } + + if family == FAMILY_V4 { + // Automatically set the broadcast address if it is unset and the + // subnet is large enough to sensibly have one (/30 or larger). + // See: RFC 3021 + if addr.Broadcast == nil && prefixlen < 31 { + calcBroadcast := make(net.IP, masklen/8) + for i := range localAddrData { + calcBroadcast[i] = localAddrData[i] | ^mask[i] + } + addr.Broadcast = calcBroadcast + } + + if addr.Broadcast != nil { + req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + } + + if addr.Label != "" { + labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + req.AddData(labelData) + } + } + + // 0 is the default value for these attributes. However, 0 means "expired", while the least-surprising default + // value should be "forever". To compensate for that, only add the attributes if at least one of the values is + // non-zero, which means the caller has explicitly set them + if addr.ValidLft > 0 || addr.PreferedLft > 0 { + cachedata := nl.IfaCacheInfo{unix.IfaCacheinfo{ + Valid: uint32(addr.ValidLft), + Prefered: uint32(addr.PreferedLft), + }} + req.AddData(nl.NewRtAttr(unix.IFA_CACHEINFO, cachedata.Serialize())) + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// AddrList gets a list of IP addresses in the system. +// Equivalent to: `ip addr show`. +// The list can be filtered by link and ip family. +func AddrList(link Link, family int) ([]Addr, error) { + return pkgHandle.AddrList(link, family) +} + +// AddrList gets a list of IP addresses in the system. +// Equivalent to: `ip addr show`. +// The list can be filtered by link and ip family. +func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { + req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) + msg := nl.NewIfAddrmsg(family) + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) + if err != nil { + return nil, err + } + + indexFilter := 0 + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + indexFilter = base.Index + } + + var res []Addr + for _, m := range msgs { + addr, msgFamily, err := parseAddr(m) + if err != nil { + return res, err + } + + if link != nil && addr.LinkIndex != indexFilter { + // Ignore messages from other interfaces + continue + } + + if family != FAMILY_ALL && msgFamily != family { + continue + } + + res = append(res, addr) + } + + return res, nil +} + +func parseAddr(m []byte) (addr Addr, family int, err error) { + msg := nl.DeserializeIfAddrmsg(m) + + family = -1 + addr.LinkIndex = -1 + + attrs, err1 := nl.ParseRouteAttr(m[msg.Len():]) + if err1 != nil { + err = err1 + return + } + + family = int(msg.Family) + addr.LinkIndex = int(msg.Index) + + var local, dst *net.IPNet + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.IFA_ADDRESS: + dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + } + case unix.IFA_LOCAL: + // iproute2 manual: + // If a peer address is specified, the local address + // cannot have a prefix length. The network prefix is + // associated with the peer rather than with the local + // address. + n := 8 * len(attr.Value) + local = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(n, n), + } + case unix.IFA_BROADCAST: + addr.Broadcast = attr.Value + case unix.IFA_LABEL: + addr.Label = string(attr.Value[:len(attr.Value)-1]) + case unix.IFA_FLAGS: + addr.Flags = int(native.Uint32(attr.Value[0:4])) + case unix.IFA_CACHEINFO: + ci := nl.DeserializeIfaCacheInfo(attr.Value) + addr.PreferedLft = int(ci.Prefered) + addr.ValidLft = int(ci.Valid) + } + } + + // libnl addr.c comment: + // IPv6 sends the local address as IFA_ADDRESS with no + // IFA_LOCAL, IPv4 sends both IFA_LOCAL and IFA_ADDRESS + // with IFA_ADDRESS being the peer address if they differ + // + // But obviously, as there are IPv6 PtP addresses, too, + // IFA_LOCAL should also be handled for IPv6. + if local != nil { + if family == FAMILY_V4 && dst != nil && local.IP.Equal(dst.IP) { + addr.IPNet = dst + } else { + addr.IPNet = local + addr.Peer = dst + } + } else { + addr.IPNet = dst + } + + addr.Scope = int(msg.Scope) + + return +} + +type AddrUpdate struct { + LinkAddress net.IPNet + LinkIndex int + Flags int + Scope int + PreferedLft int + ValidLft int + NewAddr bool // true=added false=deleted +} + +// AddrSubscribe takes a chan down which notifications will be sent +// when addresses change. Close the 'done' chan to stop subscription. +func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) +} + +// AddrSubscribeAt works like AddrSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { + return addrSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) +} + +// AddrSubscribeOptions contains a set of options to use with +// AddrSubscribeWithOptions. +type AddrSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval +} + +// AddrSubscribeWithOptions work like AddrSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) +} + +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvBufForce bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) + if err != nil { + return err + } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvBufForce) + if err != nil { + return err + } + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETADDR, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + } + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(fmt.Errorf("Receive failed: %v", + err)) + } + return + } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } + for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) + } + continue + } + msgType := m.Header.Type + if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { + if cberr != nil { + cberr(fmt.Errorf("bad message type: %d", msgType)) + } + continue + } + + addr, _, err := parseAddr(m.Data) + if err != nil { + if cberr != nil { + cberr(fmt.Errorf("could not parse address: %v", err)) + } + continue + } + + ch <- AddrUpdate{LinkAddress: *addr.IPNet, + LinkIndex: addr.LinkIndex, + NewAddr: msgType == unix.RTM_NEWADDR, + Flags: addr.Flags, + Scope: addr.Scope, + PreferedLft: addr.PreferedLft, + ValidLft: addr.ValidLft} + } + } + }() + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/bpf_linux.go b/vendor/github.com/vishvananda/netlink/bpf_linux.go new file mode 100644 index 000000000..96befbfe0 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/bpf_linux.go @@ -0,0 +1,77 @@ +package netlink + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type BpfProgType uint32 + +const ( + BPF_PROG_TYPE_UNSPEC BpfProgType = iota + BPF_PROG_TYPE_SOCKET_FILTER + BPF_PROG_TYPE_KPROBE + BPF_PROG_TYPE_SCHED_CLS + BPF_PROG_TYPE_SCHED_ACT + BPF_PROG_TYPE_TRACEPOINT + BPF_PROG_TYPE_XDP + BPF_PROG_TYPE_PERF_EVENT + BPF_PROG_TYPE_CGROUP_SKB + BPF_PROG_TYPE_CGROUP_SOCK + BPF_PROG_TYPE_LWT_IN + BPF_PROG_TYPE_LWT_OUT + BPF_PROG_TYPE_LWT_XMIT + BPF_PROG_TYPE_SOCK_OPS + BPF_PROG_TYPE_SK_SKB + BPF_PROG_TYPE_CGROUP_DEVICE + BPF_PROG_TYPE_SK_MSG + BPF_PROG_TYPE_RAW_TRACEPOINT + BPF_PROG_TYPE_CGROUP_SOCK_ADDR + BPF_PROG_TYPE_LWT_SEG6LOCAL + BPF_PROG_TYPE_LIRC_MODE2 + BPF_PROG_TYPE_SK_REUSEPORT + BPF_PROG_TYPE_FLOW_DISSECTOR + BPF_PROG_TYPE_CGROUP_SYSCTL + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE + BPF_PROG_TYPE_CGROUP_SOCKOPT + BPF_PROG_TYPE_TRACING + BPF_PROG_TYPE_STRUCT_OPS + BPF_PROG_TYPE_EXT + BPF_PROG_TYPE_LSM + BPF_PROG_TYPE_SK_LOOKUP +) + +type BPFAttr struct { + ProgType uint32 + InsnCnt uint32 + Insns uintptr + License uintptr + LogLevel uint32 + LogSize uint32 + LogBuf uintptr + KernVersion uint32 +} + +// loadSimpleBpf loads a trivial bpf program for testing purposes. +func loadSimpleBpf(progType BpfProgType, ret uint32) (int, error) { + insns := []uint64{ + 0x00000000000000b7 | (uint64(ret) << 32), + 0x0000000000000095, + } + license := []byte{'A', 'S', 'L', '2', '\x00'} + attr := BPFAttr{ + ProgType: uint32(progType), + InsnCnt: uint32(len(insns)), + Insns: uintptr(unsafe.Pointer(&insns[0])), + License: uintptr(unsafe.Pointer(&license[0])), + } + fd, _, errno := unix.Syscall(unix.SYS_BPF, + 5, /* bpf cmd */ + uintptr(unsafe.Pointer(&attr)), + unsafe.Sizeof(attr)) + if errno != 0 { + return 0, errno + } + return int(fd), nil +} diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go new file mode 100644 index 000000000..6c340b0ce --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -0,0 +1,149 @@ +package netlink + +import ( + "fmt" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// BridgeVlanList gets a map of device id to bridge vlan infos. +// Equivalent to: `bridge vlan show` +func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + return pkgHandle.BridgeVlanList() +} + +// BridgeVlanList gets a map of device id to bridge vlan infos. +// Equivalent to: `bridge vlan show` +func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req.AddData(msg) + req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if err != nil { + return nil, err + } + ret := make(map[int32][]*nl.BridgeVlanInfo) + for _, m := range msgs { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.IFLA_AF_SPEC: + //nested attr + nestAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse nested attr %v", err) + } + for _, nestAttr := range nestAttrs { + switch nestAttr.Attr.Type { + case nl.IFLA_BRIDGE_VLAN_INFO: + vlanInfo := nl.DeserializeBridgeVlanInfo(nestAttr.Value) + ret[msg.Index] = append(ret[msg.Index], vlanInfo) + } + } + } + } + } + return ret, nil +} + +// BridgeVlanAdd adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanAdd(link, vid, pvid, untagged, self, master) +} + +// BridgeVlanAdd adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanAddRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanAddRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanDel adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanDel(link, vid, pvid, untagged, self, master) +} + +// BridgeVlanDel adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, pvid, untagged, self, master) +} + +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanDelRange(link, vid, vidEnd, pvid, untagged, self, master) +} + +// BridgeVlanDelRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, pvid, untagged, self, master) +} + +func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg.Index = int32(base.Index) + req.AddData(msg) + + br := nl.NewRtAttr(unix.IFLA_AF_SPEC, nil) + var flags uint16 + if self { + flags |= nl.BRIDGE_FLAGS_SELF + } + if master { + flags |= nl.BRIDGE_FLAGS_MASTER + } + if flags > 0 { + br.AddRtAttr(nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags)) + } + vlanInfo := &nl.BridgeVlanInfo{Vid: vid} + if pvid { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID + } + if untagged { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED + } + + if vidEnd != 0 { + vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd} + vlanEndInfo.Flags = vlanInfo.Flags + + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + + vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize()) + } else { + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + } + + req.AddData(br) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} diff --git a/vendor/github.com/vishvananda/netlink/chain.go b/vendor/github.com/vishvananda/netlink/chain.go new file mode 100644 index 000000000..1d1c144e9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain.go @@ -0,0 +1,22 @@ +package netlink + +import ( + "fmt" +) + +// Chain contains the attributes of a Chain +type Chain struct { + Parent uint32 + Chain uint32 +} + +func (c Chain) String() string { + return fmt.Sprintf("{Parent: %d, Chain: %d}", c.Parent, c.Chain) +} + +func NewChain(parent uint32, chain uint32) Chain { + return Chain{ + Parent: parent, + Chain: chain, + } +} diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go new file mode 100644 index 000000000..d9f441613 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -0,0 +1,112 @@ +package netlink + +import ( + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// ChainDel will delete a chain from the system. +func ChainDel(link Link, chain Chain) error { + // Equivalent to: `tc chain del $chain` + return pkgHandle.ChainDel(link, chain) +} + +// ChainDel will delete a chain from the system. +// Equivalent to: `tc chain del $chain` +func (h *Handle) ChainDel(link Link, chain Chain) error { + return h.chainModify(unix.RTM_DELCHAIN, 0, link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func ChainAdd(link Link, chain Chain) error { + return pkgHandle.ChainAdd(link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func (h *Handle) ChainAdd(link Link, chain Chain) error { + return h.chainModify( + unix.RTM_NEWCHAIN, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + link, + chain) +} + +func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: chain.Parent, + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(chain.Chain))) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func ChainList(link Link, parent uint32) ([]Chain, error) { + return pkgHandle.ChainList(link, parent) +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { + req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: parent, + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if err != nil { + return nil, err + } + + var res []Chain + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip chains from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + var chain Chain + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_CHAIN: + chain.Chain = native.Uint32(attr.Value) + chain.Parent = parent + } + } + res = append(res, chain) + } + + return res, nil +} diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go new file mode 100644 index 000000000..e686f6745 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -0,0 +1,241 @@ +package netlink + +import ( + "fmt" +) + +// Class interfaces for all classes +type Class interface { + Attrs() *ClassAttrs + Type() string +} + +// Generic networking statistics for netlink users. +// This file contains "gnet_" prefixed structs and relevant functions. +// See Documentation/networking/getn_stats.txt in Linux source code for more details. + +// GnetStatsBasic Ref: struct gnet_stats_basic { ... } +type GnetStatsBasic struct { + Bytes uint64 // number of seen bytes + Packets uint32 // number of seen packets +} + +// GnetStatsRateEst Ref: struct gnet_stats_rate_est { ... } +type GnetStatsRateEst struct { + Bps uint32 // current byte rate + Pps uint32 // current packet rate +} + +// GnetStatsRateEst64 Ref: struct gnet_stats_rate_est64 { ... } +type GnetStatsRateEst64 struct { + Bps uint64 // current byte rate + Pps uint64 // current packet rate +} + +// GnetStatsQueue Ref: struct gnet_stats_queue { ... } +type GnetStatsQueue struct { + Qlen uint32 // queue length + Backlog uint32 // backlog size of queue + Drops uint32 // number of dropped packets + Requeues uint32 // number of requues + Overlimits uint32 // number of enqueues over the limit +} + +// ClassStatistics representation based on generic networking statistics for netlink. +// See Documentation/networking/gen_stats.txt in Linux source code for more details. +type ClassStatistics struct { + Basic *GnetStatsBasic + Queue *GnetStatsQueue + RateEst *GnetStatsRateEst + BasicHw *GnetStatsBasic // Hardward statistics added in kernel 4.20 +} + +// NewClassStatistics Construct a ClassStatistics struct which fields are all initialized by 0. +func NewClassStatistics() *ClassStatistics { + return &ClassStatistics{ + Basic: &GnetStatsBasic{}, + Queue: &GnetStatsQueue{}, + RateEst: &GnetStatsRateEst{}, + BasicHw: &GnetStatsBasic{}, + } +} + +// ClassAttrs represents a netlink class. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type ClassAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Leaf uint32 + Statistics *ClassStatistics +} + +func (q ClassAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf) +} + +// HtbClassAttrs stores the attributes of HTB class +type HtbClassAttrs struct { + // TODO handle all attributes + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (q HtbClassAttrs) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +// HtbClass represents an Htb class +type HtbClass struct { + ClassAttrs + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (q HtbClass) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +// Attrs returns the class attributes +func (q *HtbClass) Attrs() *ClassAttrs { + return &q.ClassAttrs +} + +// Type return the class type +func (q *HtbClass) Type() string { + return "htb" +} + +// GenericClass classes represent types that are not currently understood +// by this netlink library. +type GenericClass struct { + ClassAttrs + ClassType string +} + +// Attrs return the class attributes +func (class *GenericClass) Attrs() *ClassAttrs { + return &class.ClassAttrs +} + +// Type return the class type +func (class *GenericClass) Type() string { + return class.ClassType +} + +// ServiceCurve is a nondecreasing function of some time unit, returning the amount of service +// (an allowed or allocated amount of bandwidth) at some specific point in time. The purpose of it +// should be subconsciously obvious: if a class was allowed to transfer not less than the amount +// specified by its service curve, then the service curve is not violated. +type ServiceCurve struct { + m1 uint32 + d uint32 + m2 uint32 +} + +// Attrs return the parameters of the service curve +func (c *ServiceCurve) Attrs() (uint32, uint32, uint32) { + return c.m1, c.d, c.m2 +} + +// Burst returns the burst rate (m1) of the curve +func (c *ServiceCurve) Burst() uint32 { + return c.m1 +} + +// Delay return the delay (d) of the curve +func (c *ServiceCurve) Delay() uint32 { + return c.d +} + +// Rate returns the rate (m2) of the curve +func (c *ServiceCurve) Rate() uint32 { + return c.m2 +} + +// HfscClass is a representation of the HFSC class +type HfscClass struct { + ClassAttrs + Rsc ServiceCurve + Fsc ServiceCurve + Usc ServiceCurve +} + +// SetUsc sets the USC curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. +func (hfsc *HfscClass) SetUsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Usc = ServiceCurve{m1: m1, d: d, m2: m2} +} + +// SetFsc sets the Fsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. +func (hfsc *HfscClass) SetFsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Fsc = ServiceCurve{m1: m1, d: d, m2: m2} +} + +// SetRsc sets the Rsc curve. The bandwidth (m1 and m2) is specified in bits and the delay in +// seconds. +func (hfsc *HfscClass) SetRsc(m1 uint32, d uint32, m2 uint32) { + hfsc.Rsc = ServiceCurve{m1: m1, d: d, m2: m2} +} + +// SetSC implements the SC from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. +func (hfsc *HfscClass) SetSC(m1 uint32, d uint32, m2 uint32) { + hfsc.SetRsc(m1, d, m2) + hfsc.SetFsc(m1, d, m2) +} + +// SetUL implements the UL from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. +func (hfsc *HfscClass) SetUL(m1 uint32, d uint32, m2 uint32) { + hfsc.SetUsc(m1, d, m2) +} + +// SetLS implements the LS from the `tc` CLI. This function behaves the same as if one would set the +// USC through the `tc` command-line tool. This means bandwidth (m1 and m2) is specified in bits and +// the delay in ms. +func (hfsc *HfscClass) SetLS(m1 uint32, d uint32, m2 uint32) { + hfsc.SetFsc(m1, d, m2) +} + +// NewHfscClass returns a new HFSC struct with the set parameters +func NewHfscClass(attrs ClassAttrs) *HfscClass { + return &HfscClass{ + ClassAttrs: attrs, + Rsc: ServiceCurve{}, + Fsc: ServiceCurve{}, + Usc: ServiceCurve{}, + } +} + +// String() returns a string that contains the information and attributes of the HFSC class +func (hfsc *HfscClass) String() string { + return fmt.Sprintf( + "{%s -- {RSC: {m1=%d d=%d m2=%d}} {FSC: {m1=%d d=%d m2=%d}} {USC: {m1=%d d=%d m2=%d}}}", + hfsc.Attrs(), hfsc.Rsc.m1*8, hfsc.Rsc.d, hfsc.Rsc.m2*8, hfsc.Fsc.m1*8, hfsc.Fsc.d, hfsc.Fsc.m2*8, hfsc.Usc.m1*8, hfsc.Usc.d, hfsc.Usc.m2*8, + ) +} + +// Attrs return the Hfsc parameters +func (hfsc *HfscClass) Attrs() *ClassAttrs { + return &hfsc.ClassAttrs +} + +// Type return the type of the class +func (hfsc *HfscClass) Type() string { + return "hfsc" +} diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go new file mode 100644 index 000000000..a82eb09de --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -0,0 +1,400 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// Internal tc_stats representation in Go struct. +// This is for internal uses only to deserialize the payload of rtattr. +// After the deserialization, this should be converted into the canonical stats +// struct, ClassStatistics, in case of statistics of a class. +// Ref: struct tc_stats { ... } +type tcStats struct { + Bytes uint64 // Number of enqueued bytes + Packets uint32 // Number of enqueued packets + Drops uint32 // Packets dropped because of lack of resources + Overlimits uint32 // Number of throttle events when this flow goes out of allocated bandwidth + Bps uint32 // Current flow byte rate + Pps uint32 // Current flow packet rate + Qlen uint32 + Backlog uint32 +} + +// NewHtbClass NOTE: function is in here because it uses other linux functions +func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { + mtu := 1600 + rate := cattrs.Rate / 8 + ceil := cattrs.Ceil / 8 + buffer := cattrs.Buffer + cbuffer := cattrs.Cbuffer + + if ceil == 0 { + ceil = rate + } + + if buffer == 0 { + buffer = uint32(float64(rate)/Hz() + float64(mtu)) + } + buffer = Xmittime(rate, buffer) + + if cbuffer == 0 { + cbuffer = uint32(float64(ceil)/Hz() + float64(mtu)) + } + cbuffer = Xmittime(ceil, cbuffer) + + return &HtbClass{ + ClassAttrs: attrs, + Rate: rate, + Ceil: ceil, + Buffer: buffer, + Cbuffer: cbuffer, + Level: 0, + Prio: cattrs.Prio, + Quantum: cattrs.Quantum, + } +} + +// ClassDel will delete a class from the system. +// Equivalent to: `tc class del $class` +func ClassDel(class Class) error { + return pkgHandle.ClassDel(class) +} + +// ClassDel will delete a class from the system. +// Equivalent to: `tc class del $class` +func (h *Handle) ClassDel(class Class) error { + return h.classModify(unix.RTM_DELTCLASS, 0, class) +} + +// ClassChange will change a class in place +// Equivalent to: `tc class change $class` +// The parent and handle MUST NOT be changed. +func ClassChange(class Class) error { + return pkgHandle.ClassChange(class) +} + +// ClassChange will change a class in place +// Equivalent to: `tc class change $class` +// The parent and handle MUST NOT be changed. +func (h *Handle) ClassChange(class Class) error { + return h.classModify(unix.RTM_NEWTCLASS, 0, class) +} + +// ClassReplace will replace a class to the system. +// quivalent to: `tc class replace $class` +// The handle MAY be changed. +// If a class already exist with this parent/handle pair, the class is changed. +// If a class does not already exist with this parent/handle, a new class is created. +func ClassReplace(class Class) error { + return pkgHandle.ClassReplace(class) +} + +// ClassReplace will replace a class to the system. +// quivalent to: `tc class replace $class` +// The handle MAY be changed. +// If a class already exist with this parent/handle pair, the class is changed. +// If a class does not already exist with this parent/handle, a new class is created. +func (h *Handle) ClassReplace(class Class) error { + return h.classModify(unix.RTM_NEWTCLASS, unix.NLM_F_CREATE, class) +} + +// ClassAdd will add a class to the system. +// Equivalent to: `tc class add $class` +func ClassAdd(class Class) error { + return pkgHandle.ClassAdd(class) +} + +// ClassAdd will add a class to the system. +// Equivalent to: `tc class add $class` +func (h *Handle) ClassAdd(class Class) error { + return h.classModify( + unix.RTM_NEWTCLASS, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + class, + ) +} + +func (h *Handle) classModify(cmd, flags int, class Class) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + base := class.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + if cmd != unix.RTM_DELTCLASS { + if err := classPayload(req, class); err != nil { + return err + } + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func classPayload(req *nl.NetlinkRequest, class Class) error { + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + switch class.Type() { + case "htb": + htb := class.(*HtbClass) + opt := nl.TcHtbCopt{} + opt.Buffer = htb.Buffer + opt.Cbuffer = htb.Cbuffer + opt.Quantum = htb.Quantum + opt.Level = htb.Level + opt.Prio = htb.Prio + // TODO: Handle Debug properly. For now default to 0 + /* Calculate {R,C}Tab and set Rate and Ceil */ + cellLog := -1 + ccellLog := -1 + linklayer := nl.LINKLAYER_ETHERNET + mtu := 1600 + var rtab [256]uint32 + var ctab [256]uint32 + tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} + if CalcRtable(&tcrate, rtab[:], cellLog, uint32(mtu), linklayer) < 0 { + return errors.New("HTB: failed to calculate rate table") + } + opt.Rate = tcrate + tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} + if CalcRtable(&tcceil, ctab[:], ccellLog, uint32(mtu), linklayer) < 0 { + return errors.New("HTB: failed to calculate ceil rate table") + } + opt.Ceil = tcceil + options.AddRtAttr(nl.TCA_HTB_PARMS, opt.Serialize()) + options.AddRtAttr(nl.TCA_HTB_RTAB, SerializeRtab(rtab)) + options.AddRtAttr(nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + if htb.Rate >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_RATE64, nl.Uint64Attr(htb.Rate)) + } + if htb.Ceil >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_HTB_CEIL64, nl.Uint64Attr(htb.Ceil)) + } + case "hfsc": + hfsc := class.(*HfscClass) + opt := nl.HfscCopt{} + rm1, rd, rm2 := hfsc.Rsc.Attrs() + opt.Rsc.Set(rm1/8, rd, rm2/8) + fm1, fd, fm2 := hfsc.Fsc.Attrs() + opt.Fsc.Set(fm1/8, fd, fm2/8) + um1, ud, um2 := hfsc.Usc.Attrs() + opt.Usc.Set(um1/8, ud, um2/8) + options.AddRtAttr(nl.TCA_HFSC_RSC, nl.SerializeHfscCurve(&opt.Rsc)) + options.AddRtAttr(nl.TCA_HFSC_FSC, nl.SerializeHfscCurve(&opt.Fsc)) + options.AddRtAttr(nl.TCA_HFSC_USC, nl.SerializeHfscCurve(&opt.Usc)) + } + req.AddData(options) + return nil +} + +// ClassList gets a list of classes in the system. +// Equivalent to: `tc class show`. +// Generally returns nothing if link and parent are not specified. +func ClassList(link Link, parent uint32) ([]Class, error) { + return pkgHandle.ClassList(link, parent) +} + +// ClassList gets a list of classes in the system. +// Equivalent to: `tc class show`. +// Generally returns nothing if link and parent are not specified. +func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { + req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) + if err != nil { + return nil, err + } + + var res []Class + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := ClassAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + Statistics: nil, + } + + var class Class + classType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + classType = string(attr.Value[:len(attr.Value)-1]) + switch classType { + case "htb": + class = &HtbClass{} + case "hfsc": + class = &HfscClass{} + default: + class = &GenericClass{ClassType: classType} + } + case nl.TCA_OPTIONS: + switch classType { + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + _, err = parseHtbClassData(class, data) + if err != nil { + return nil, err + } + case "hfsc": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + _, err = parseHfscClassData(class, data) + if err != nil { + return nil, err + } + } + // For backward compatibility. + case nl.TCA_STATS: + base.Statistics, err = parseTcStats(attr.Value) + if err != nil { + return nil, err + } + case nl.TCA_STATS2: + base.Statistics, err = parseTcStats2(attr.Value) + if err != nil { + return nil, err + } + } + } + *class.Attrs() = base + res = append(res, class) + } + + return res, nil +} + +func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { + htb := class.(*HtbClass) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_PARMS: + opt := nl.DeserializeTcHtbCopt(datum.Value) + htb.Rate = uint64(opt.Rate.Rate) + htb.Ceil = uint64(opt.Ceil.Rate) + htb.Buffer = opt.Buffer + htb.Cbuffer = opt.Cbuffer + htb.Quantum = opt.Quantum + htb.Level = opt.Level + htb.Prio = opt.Prio + case nl.TCA_HTB_RATE64: + htb.Rate = native.Uint64(datum.Value[0:8]) + case nl.TCA_HTB_CEIL64: + htb.Ceil = native.Uint64(datum.Value[0:8]) + } + } + return detailed, nil +} + +func parseHfscClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { + hfsc := class.(*HfscClass) + detailed := false + for _, datum := range data { + m1, d, m2 := nl.DeserializeHfscCurve(datum.Value).Attrs() + switch datum.Attr.Type { + case nl.TCA_HFSC_RSC: + hfsc.Rsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} + case nl.TCA_HFSC_FSC: + hfsc.Fsc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} + case nl.TCA_HFSC_USC: + hfsc.Usc = ServiceCurve{m1: m1 * 8, d: d, m2: m2 * 8} + } + } + return detailed, nil +} + +func parseTcStats(data []byte) (*ClassStatistics, error) { + buf := &bytes.Buffer{} + buf.Write(data) + tcStats := &tcStats{} + if err := binary.Read(buf, native, tcStats); err != nil { + return nil, err + } + + stats := NewClassStatistics() + stats.Basic.Bytes = tcStats.Bytes + stats.Basic.Packets = tcStats.Packets + stats.Queue.Qlen = tcStats.Qlen + stats.Queue.Backlog = tcStats.Backlog + stats.Queue.Drops = tcStats.Drops + stats.Queue.Overlimits = tcStats.Overlimits + stats.RateEst.Bps = tcStats.Bps + stats.RateEst.Pps = tcStats.Pps + + return stats, nil +} + +func parseGnetStats(data []byte, gnetStats interface{}) error { + buf := &bytes.Buffer{} + buf.Write(data) + return binary.Read(buf, native, gnetStats) +} + +func parseTcStats2(data []byte) (*ClassStatistics, error) { + rtAttrs, err := nl.ParseRouteAttr(data) + if err != nil { + return nil, err + } + stats := NewClassStatistics() + for _, datum := range rtAttrs { + switch datum.Attr.Type { + case nl.TCA_STATS_BASIC: + if err := parseGnetStats(datum.Value, stats.Basic); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.Basic with: %v\n%s", + err, hex.Dump(datum.Value)) + } + case nl.TCA_STATS_QUEUE: + if err := parseGnetStats(datum.Value, stats.Queue); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.Queue with: %v\n%s", + err, hex.Dump(datum.Value)) + } + case nl.TCA_STATS_RATE_EST: + if err := parseGnetStats(datum.Value, stats.RateEst); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.RateEst with: %v\n%s", + err, hex.Dump(datum.Value)) + } + case nl.TCA_STATS_BASIC_HW: + if err := parseGnetStats(datum.Value, stats.BasicHw); err != nil { + return nil, fmt.Errorf("Failed to parse ClassStatistics.BasicHw with: %v\n%s", + err, hex.Dump(datum.Value)) + } + } + } + + return stats, nil +} diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go new file mode 100644 index 000000000..ba022453b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -0,0 +1,902 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "net" + "time" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// ConntrackTableType Conntrack table for the netlink operation +type ConntrackTableType uint8 + +const ( + // ConntrackTable Conntrack table + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK 1 + ConntrackTable = 1 + // ConntrackExpectTable Conntrack expect table + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2 + ConntrackExpectTable = 2 +) + +const ( + // backward compatibility with golang 1.6 which does not have io.SeekCurrent + seekCurrent = 1 +) + +// InetFamily Family type +type InetFamily uint8 + +// -L [table] [options] List conntrack or expectation table +// -G [table] parameters Get conntrack or expectation + +// -I [table] parameters Create a conntrack or expectation +// -U [table] parameters Update a conntrack +// -E [table] [options] Show events + +// -C [table] Show counter +// -S Show statistics + +// ConntrackTableList returns the flow list of a table of a specific family +// conntrack -L [table] [options] List conntrack or expectation table +func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return pkgHandle.ConntrackTableList(table, family) +} + +// ConntrackTableFlush flushes all the flows of a specified table +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func ConntrackTableFlush(table ConntrackTableType) error { + return pkgHandle.ConntrackTableFlush(table) +} + +// ConntrackCreate creates a new conntrack flow in the desired table +// conntrack -I [table] Create a conntrack or expectation +func ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackCreate(table, family, flow) +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + return pkgHandle.ConntrackUpdate(table, family, flow) +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter +// conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. +func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + return pkgHandle.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return pkgHandle.ConntrackDeleteFilters(table, family, filters...) +} + +// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed +// conntrack -L [table] [options] List conntrack or expectation table +func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + res, err := h.dumpConntrackTable(table, family) + if err != nil { + return nil, err + } + + // Deserialize all the flows + var result []*ConntrackFlow + for _, dataRaw := range res { + result = append(result, parseRawData(dataRaw)) + } + + return result, nil +} + +// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { + req := h.newConntrackRequest(table, unix.AF_INET, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackCreate creates a new conntrack flow in the desired table using the handle +// conntrack -I [table] Create a conntrack or expectation +func (h *Handle) ConntrackCreate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_CREATE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackUpdate updates an existing conntrack flow in the desired table using the handle +// conntrack -U [table] Update a conntrack +func (h *Handle) ConntrackUpdate(table ConntrackTableType, family InetFamily, flow *ConntrackFlow) error { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_NEW, unix.NLM_F_ACK|unix.NLM_F_REPLACE) + attr, err := flow.toNlData() + if err != nil { + return err + } + + for _, a := range attr { + req.AddData(a) + } + + _, err = req.Execute(unix.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. +func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + return h.ConntrackDeleteFilters(table, family, filter) +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + res, err := h.dumpConntrackTable(table, family) + if err != nil { + return 0, err + } + + var matched uint + for _, dataRaw := range res { + flow := parseRawData(dataRaw) + for _, filter := range filters { + if match := filter.MatchConntrackFlow(flow); match { + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already + req2.AddRawData(dataRaw[4:]) + req2.Execute(unix.NETLINK_NETFILTER, 0) + matched++ + // flow is already deleted, no need to match on other filters and continue to the next flow. + break + } + } + } + + return matched, nil +} + +func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest { + // Create the Netlink request object + req := h.newNetlinkRequest((int(table)<<8)|operation, flags) + // Add the netfilter header + msg := &nl.Nfgenmsg{ + NfgenFamily: uint8(family), + Version: nl.NFNETLINK_V0, + ResId: 0, + } + req.AddData(msg) + return req +} + +func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, unix.NLM_F_DUMP) + return req.Execute(unix.NETLINK_NETFILTER, 0) +} + +// ProtoInfo wraps an L4-protocol structure - roughly corresponds to the +// __nfct_protoinfo union found in libnetfilter_conntrack/include/internal/object.h. +// Currently, only protocol names, and TCP state is supported. +type ProtoInfo interface { + Protocol() string +} + +// ProtoInfoTCP corresponds to the `tcp` struct of the __nfct_protoinfo union. +// Only TCP state is currently supported. +type ProtoInfoTCP struct { + State uint8 +} +// Protocol returns "tcp". +func (*ProtoInfoTCP) Protocol() string {return "tcp"} +func (p *ProtoInfoTCP) toNlData() ([]*nl.RtAttr, error) { + ctProtoInfo := nl.NewRtAttr(unix.NLA_F_NESTED | nl.CTA_PROTOINFO, []byte{}) + ctProtoInfoTCP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_PROTOINFO_TCP, []byte{}) + ctProtoInfoTCPState := nl.NewRtAttr(nl.CTA_PROTOINFO_TCP_STATE, nl.Uint8Attr(p.State)) + ctProtoInfoTCP.AddChild(ctProtoInfoTCPState) + ctProtoInfo.AddChild(ctProtoInfoTCP) + + return []*nl.RtAttr{ctProtoInfo}, nil +} + +// ProtoInfoSCTP only supports the protocol name. +type ProtoInfoSCTP struct {} +// Protocol returns "sctp". +func (*ProtoInfoSCTP) Protocol() string {return "sctp"} + +// ProtoInfoDCCP only supports the protocol name. +type ProtoInfoDCCP struct {} +// Protocol returns "dccp". +func (*ProtoInfoDCCP) Protocol() string {return "dccp"} + +// The full conntrack flow structure is very complicated and can be found in the file: +// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h +// For the time being, the structure below allows to parse and extract the base information of a flow +type IPTuple struct { + Bytes uint64 + DstIP net.IP + DstPort uint16 + Packets uint64 + Protocol uint8 + SrcIP net.IP + SrcPort uint16 +} + +// toNlData generates the inner fields of a nested tuple netlink datastructure +// does not generate the "nested"-flagged outer message. +func (t *IPTuple) toNlData(family uint8) ([]*nl.RtAttr, error) { + + var srcIPsFlag, dstIPsFlag int + if family == nl.FAMILY_V4 { + srcIPsFlag = nl.CTA_IP_V4_SRC + dstIPsFlag = nl.CTA_IP_V4_DST + } else if family == nl.FAMILY_V6 { + srcIPsFlag = nl.CTA_IP_V6_SRC + dstIPsFlag = nl.CTA_IP_V6_DST + } else { + return []*nl.RtAttr{}, fmt.Errorf("couldn't generate netlink message for tuple due to unrecognized FamilyType '%d'", family) + } + + ctTupleIP := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_IP, nil) + ctTupleIPSrc := nl.NewRtAttr(srcIPsFlag, t.SrcIP) + ctTupleIP.AddChild(ctTupleIPSrc) + ctTupleIPDst := nl.NewRtAttr(dstIPsFlag, t.DstIP) + ctTupleIP.AddChild(ctTupleIPDst) + + ctTupleProto := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_PROTO, nil) + ctTupleProtoNum := nl.NewRtAttr(nl.CTA_PROTO_NUM, []byte{t.Protocol}) + ctTupleProto.AddChild(ctTupleProtoNum) + ctTupleProtoSrcPort := nl.NewRtAttr(nl.CTA_PROTO_SRC_PORT, nl.BEUint16Attr(t.SrcPort)) + ctTupleProto.AddChild(ctTupleProtoSrcPort) + ctTupleProtoDstPort := nl.NewRtAttr(nl.CTA_PROTO_DST_PORT, nl.BEUint16Attr(t.DstPort)) + ctTupleProto.AddChild(ctTupleProtoDstPort, ) + + return []*nl.RtAttr{ctTupleIP, ctTupleProto}, nil +} + +type ConntrackFlow struct { + FamilyType uint8 + Forward IPTuple + Reverse IPTuple + Mark uint32 + Zone uint16 + TimeStart uint64 + TimeStop uint64 + TimeOut uint32 + Labels []byte + ProtoInfo ProtoInfo +} + +func (s *ConntrackFlow) String() string { + // conntrack cmd output: + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 labels=0x00000000050012ac4202010000000000 zone=100 + // start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec) + start := time.Unix(0, int64(s.TimeStart)) + stop := time.Unix(0, int64(s.TimeStop)) + timeout := int32(s.TimeOut) + res := fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x ", + nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, + s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, + s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, + s.Mark) + if len(s.Labels) > 0 { + res += fmt.Sprintf("labels=0x%x ", s.Labels) + } + if s.Zone != 0 { + res += fmt.Sprintf("zone=%d ", s.Zone) + } + res += fmt.Sprintf("start=%v stop=%v timeout=%d(sec)", start, stop, timeout) + return res +} + +// toNlData generates netlink messages representing the flow. +func (s *ConntrackFlow) toNlData() ([]*nl.RtAttr, error) { + var payload []*nl.RtAttr + // The message structure is built as follows: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + + // CTA_TUPLE_ORIG + ctTupleOrig := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_ORIG, nil) + forwardFlowAttrs, err := s.Forward.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack forward flow: %w", err) + } + for _, a := range forwardFlowAttrs { + ctTupleOrig.AddChild(a) + } + + // CTA_TUPLE_REPLY + ctTupleReply := nl.NewRtAttr(unix.NLA_F_NESTED|nl.CTA_TUPLE_REPLY, nil) + reverseFlowAttrs, err := s.Reverse.toNlData(s.FamilyType) + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack reverse flow: %w", err) + } + for _, a := range reverseFlowAttrs { + ctTupleReply.AddChild(a) + } + + ctMark := nl.NewRtAttr(nl.CTA_MARK, nl.BEUint32Attr(s.Mark)) + ctTimeout := nl.NewRtAttr(nl.CTA_TIMEOUT, nl.BEUint32Attr(s.TimeOut)) + + payload = append(payload, ctTupleOrig, ctTupleReply, ctMark, ctTimeout) + + if s.ProtoInfo != nil { + switch p := s.ProtoInfo.(type) { + case *ProtoInfoTCP: + attrs, err := p.toNlData() + if err != nil { + return nil, fmt.Errorf("couldn't generate netlink data for conntrack flow's TCP protoinfo: %w", err) + } + payload = append(payload, attrs...) + default: + return nil, errors.New("couldn't generate netlink data for conntrack: field 'ProtoInfo' only supports TCP or nil") + } + } + + return payload, nil +} + +// This method parse the ip tuple structure +// The message structure is the following: +// +// +// +// +// +func parseIpTuple(reader *bytes.Reader, tpl *IPTuple) uint8 { + for i := 0; i < 2; i++ { + _, t, _, v := parseNfAttrTLV(reader) + switch t { + case nl.CTA_IP_V4_SRC, nl.CTA_IP_V6_SRC: + tpl.SrcIP = v + case nl.CTA_IP_V4_DST, nl.CTA_IP_V6_DST: + tpl.DstIP = v + } + } + // Get total length of nested protocol-specific info. + _, _, protoInfoTotalLen := parseNfAttrTL(reader) + _, t, l, v := parseNfAttrTLV(reader) + // Track the number of bytes read. + protoInfoBytesRead := uint16(nl.SizeofNfattr) + l + if t == nl.CTA_PROTO_NUM { + tpl.Protocol = uint8(v[0]) + } + // We only parse TCP & UDP headers. Skip the others. + if tpl.Protocol != unix.IPPROTO_TCP && tpl.Protocol != unix.IPPROTO_UDP { + // skip the rest + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + return tpl.Protocol + } + // Skip 3 bytes of padding + reader.Seek(3, seekCurrent) + protoInfoBytesRead += 3 + for i := 0; i < 2; i++ { + _, t, _ := parseNfAttrTL(reader) + protoInfoBytesRead += uint16(nl.SizeofNfattr) + switch t { + case nl.CTA_PROTO_SRC_PORT: + parseBERaw16(reader, &tpl.SrcPort) + protoInfoBytesRead += 2 + case nl.CTA_PROTO_DST_PORT: + parseBERaw16(reader, &tpl.DstPort) + protoInfoBytesRead += 2 + } + // Skip 2 bytes of padding + reader.Seek(2, seekCurrent) + protoInfoBytesRead += 2 + } + // Skip any remaining/unknown parts of the message + bytesRemaining := protoInfoTotalLen - protoInfoBytesRead + reader.Seek(int64(bytesRemaining), seekCurrent) + + return tpl.Protocol +} + +func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) { + isNested, attrType, len = parseNfAttrTL(r) + + value = make([]byte, len) + binary.Read(r, binary.BigEndian, &value) + return isNested, attrType, len, value +} + +func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) { + binary.Read(r, nl.NativeEndian(), &len) + len -= nl.SizeofNfattr + + binary.Read(r, nl.NativeEndian(), &attrType) + isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED + attrType = attrType & (nl.NLA_F_NESTED - 1) + return isNested, attrType, len +} + +// skipNfAttrValue seeks `r` past attr of length `len`. +// Maintains buffer alignment. +// Returns length of the seek performed. +func skipNfAttrValue(r *bytes.Reader, len uint16) uint16 { + len = (len + nl.NLA_ALIGNTO - 1) & ^(nl.NLA_ALIGNTO - 1) + r.Seek(int64(len), seekCurrent) + return len +} + +func parseBERaw16(r *bytes.Reader, v *uint16) { + binary.Read(r, binary.BigEndian, v) +} + +func parseBERaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, binary.BigEndian, v) +} + +func parseBERaw64(r *bytes.Reader, v *uint64) { + binary.Read(r, binary.BigEndian, v) +} + +func parseRaw32(r *bytes.Reader, v *uint32) { + binary.Read(r, nl.NativeEndian(), v) +} + +func parseByteAndPacketCounters(r *bytes.Reader) (bytes, packets uint64) { + for i := 0; i < 2; i++ { + switch _, t, _ := parseNfAttrTL(r); t { + case nl.CTA_COUNTERS_BYTES: + parseBERaw64(r, &bytes) + case nl.CTA_COUNTERS_PACKETS: + parseBERaw64(r, &packets) + default: + return + } + } + return +} + +// when the flow is alive, only the timestamp_start is returned in structure +func parseTimeStamp(r *bytes.Reader, readSize uint16) (tstart, tstop uint64) { + var numTimeStamps int + oneItem := nl.SizeofNfattr + 8 // 4 bytes attr header + 8 bytes timestamp + if readSize == uint16(oneItem) { + numTimeStamps = 1 + } else if readSize == 2*uint16(oneItem) { + numTimeStamps = 2 + } else { + return + } + for i := 0; i < numTimeStamps; i++ { + switch _, t, _ := parseNfAttrTL(r); t { + case nl.CTA_TIMESTAMP_START: + parseBERaw64(r, &tstart) + case nl.CTA_TIMESTAMP_STOP: + parseBERaw64(r, &tstop) + default: + return + } + } + return + +} + +func parseProtoInfoTCPState(r *bytes.Reader) (s uint8) { + binary.Read(r, binary.BigEndian, &s) + r.Seek(nl.SizeofNfattr - 1, seekCurrent) + return s +} + +// parseProtoInfoTCP reads the entire nested protoinfo structure, but only parses the state attr. +func parseProtoInfoTCP(r *bytes.Reader, attrLen uint16) (*ProtoInfoTCP) { + p := new(ProtoInfoTCP) + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP_STATE: + p.State = parseProtoInfoTCPState(r) + bytesRead += nl.SizeofNfattr + default: + bytesRead += int(skipNfAttrValue(r, l)) + } + } + + return p +} + +func parseProtoInfo(r *bytes.Reader, attrLen uint16) (p ProtoInfo) { + bytesRead := 0 + for bytesRead < int(attrLen) { + _, t, l := parseNfAttrTL(r) + bytesRead += nl.SizeofNfattr + + switch t { + case nl.CTA_PROTOINFO_TCP: + p = parseProtoInfoTCP(r, l) + bytesRead += int(l) + // No inner fields of DCCP / SCTP currently supported. + case nl.CTA_PROTOINFO_DCCP: + p = new(ProtoInfoDCCP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + case nl.CTA_PROTOINFO_SCTP: + p = new(ProtoInfoSCTP) + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + default: + skipped := skipNfAttrValue(r, l) + bytesRead += int(skipped) + } + } + + return p +} + +func parseTimeOut(r *bytes.Reader) (ttimeout uint32) { + parseBERaw32(r, &ttimeout) + return +} + +func parseConnectionMark(r *bytes.Reader) (mark uint32) { + parseBERaw32(r, &mark) + return +} + +func parseConnectionLabels(r *bytes.Reader) (label []byte) { + label = make([]byte, 16) // netfilter defines 128 bit labels value + binary.Read(r, nl.NativeEndian(), &label) + return +} + +func parseConnectionZone(r *bytes.Reader) (zone uint16) { + parseBERaw16(r, &zone) + r.Seek(2, seekCurrent) + return +} + +func parseRawData(data []byte) *ConntrackFlow { + s := &ConntrackFlow{} + // First there is the Nfgenmsg header + // consume only the family field + reader := bytes.NewReader(data) + binary.Read(reader, nl.NativeEndian(), &s.FamilyType) + + // skip rest of the Netfilter header + reader.Seek(3, seekCurrent) + // The message structure is the following: + // 4 bytes + // 4 bytes + // flow information of the forward flow + // 4 bytes + // 4 bytes + // flow information of the reverse flow + for reader.Len() > 0 { + if nested, t, l := parseNfAttrTL(reader); nested { + switch t { + case nl.CTA_TUPLE_ORIG: + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + parseIpTuple(reader, &s.Forward) + } + case nl.CTA_TUPLE_REPLY: + if nested, t, l = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + parseIpTuple(reader, &s.Reverse) + } else { + // Header not recognized skip it + skipNfAttrValue(reader, l) + } + case nl.CTA_COUNTERS_ORIG: + s.Forward.Bytes, s.Forward.Packets = parseByteAndPacketCounters(reader) + case nl.CTA_COUNTERS_REPLY: + s.Reverse.Bytes, s.Reverse.Packets = parseByteAndPacketCounters(reader) + case nl.CTA_TIMESTAMP: + s.TimeStart, s.TimeStop = parseTimeStamp(reader, l) + case nl.CTA_PROTOINFO: + s.ProtoInfo = parseProtoInfo(reader, l) + default: + skipNfAttrValue(reader, l) + } + } else { + switch t { + case nl.CTA_MARK: + s.Mark = parseConnectionMark(reader) + case nl.CTA_LABELS: + s.Labels = parseConnectionLabels(reader) + case nl.CTA_TIMEOUT: + s.TimeOut = parseTimeOut(reader) + case nl.CTA_ID, nl.CTA_STATUS, nl.CTA_USE: + skipNfAttrValue(reader, l) + case nl.CTA_ZONE: + s.Zone = parseConnectionZone(reader) + default: + skipNfAttrValue(reader, l) + } + } + } + return s +} + +// Conntrack parameters and options: +// -n, --src-nat ip source NAT ip +// -g, --dst-nat ip destination NAT ip +// -j, --any-nat ip source or destination NAT ip +// -m, --mark mark Set mark +// -c, --secmark secmark Set selinux secmark +// -e, --event-mask eventmask Event mask, eg. NEW,DESTROY +// -z, --zero Zero counters while listing +// -o, --output type[,...] Output format, eg. xml +// -l, --label label[,...] conntrack labels + +// Common parameters and options: +// -s, --src, --orig-src ip Source address from original direction +// -d, --dst, --orig-dst ip Destination address from original direction +// -r, --reply-src ip Source address from reply direction +// -q, --reply-dst ip Destination address from reply direction +// -p, --protonum proto Layer 4 Protocol, eg. 'tcp' +// -f, --family proto Layer 3 Protocol, eg. 'ipv6' +// -t, --timeout timeout Set timeout +// -u, --status status Set status, eg. ASSURED +// -w, --zone value Set conntrack zone +// --orig-zone value Set zone for original direction +// --reply-zone value Set zone for reply direction +// -b, --buffer-size Netlink socket buffer size +// --mask-src ip Source mask address +// --mask-dst ip Destination mask address + +// Layer 4 Protocol common parameters and options: +// TCP, UDP, SCTP, UDPLite and DCCP +// --sport, --orig-port-src port Source port in original direction +// --dport, --orig-port-dst port Destination port in original direction + +// Filter types +type ConntrackFilterType uint8 + +const ( + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackReplySrcIP // --reply-src ip Reply Source IP + ConntrackReplyDstIP // --reply-dst ip Reply Destination IP + ConntrackReplyAnyIP // Match source or destination reply IP + ConntrackOrigSrcPort // --orig-port-src port Source port in original direction + ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction + ConntrackMatchLabels // --label label1,label2 Labels used in entry + ConntrackUnmatchLabels // --label label1,label2 Labels not used in entry + ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP + ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP + ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP +) + +type CustomConntrackFilter interface { + // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches + // the filter or false otherwise + MatchConntrackFlow(flow *ConntrackFlow) bool +} + +type ConntrackFilter struct { + ipNetFilter map[ConntrackFilterType]*net.IPNet + portFilter map[ConntrackFilterType]uint16 + protoFilter uint8 + labelFilter map[ConntrackFilterType][][]byte + zoneFilter *uint16 +} + +// AddIPNet adds a IP subnet to the conntrack filter +func (f *ConntrackFilter) AddIPNet(tp ConntrackFilterType, ipNet *net.IPNet) error { + if ipNet == nil { + return fmt.Errorf("Filter attribute empty") + } + if f.ipNetFilter == nil { + f.ipNetFilter = make(map[ConntrackFilterType]*net.IPNet) + } + if _, ok := f.ipNetFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.ipNetFilter[tp] = ipNet + return nil +} + +// AddIP adds an IP to the conntrack filter +func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { + if ip == nil { + return fmt.Errorf("Filter attribute empty") + } + return f.AddIPNet(tp, NewIPNet(ip)) +} + +// AddPort adds a Port to the conntrack filter if the Layer 4 protocol allows it +func (f *ConntrackFilter) AddPort(tp ConntrackFilterType, port uint16) error { + switch f.protoFilter { + // TCP, UDP, DCCP, SCTP, UDPLite + case 6, 17, 33, 132, 136: + default: + return fmt.Errorf("Filter attribute not available without a valid Layer 4 protocol: %d", f.protoFilter) + } + + if f.portFilter == nil { + f.portFilter = make(map[ConntrackFilterType]uint16) + } + if _, ok := f.portFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.portFilter[tp] = port + return nil +} + +// AddProtocol adds the Layer 4 protocol to the conntrack filter +func (f *ConntrackFilter) AddProtocol(proto uint8) error { + if f.protoFilter != 0 { + return errors.New("Filter attribute already present") + } + f.protoFilter = proto + return nil +} + +// AddLabels adds the provided list (zero or more) of labels to the conntrack filter +// ConntrackFilterType here can be either: +// 1. ConntrackMatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` contains ALL the provided labels +// it is considered a match. This can be used when you want to match flows that contain +// one or more labels. +// 2. ConntrackUnmatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` does NOT contain ALL the provided labels +// it is considered a match. This can be used when you want to match flows that don't contain +// one or more labels. +func (f *ConntrackFilter) AddLabels(tp ConntrackFilterType, labels [][]byte) error { + if len(labels) == 0 { + return errors.New("Invalid length for provided labels") + } + if f.labelFilter == nil { + f.labelFilter = make(map[ConntrackFilterType][][]byte) + } + if _, ok := f.labelFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.labelFilter[tp] = labels + return nil +} + +// AddZone adds a zone to the conntrack filter +func (f *ConntrackFilter) AddZone(zone uint16) error { + if f.zoneFilter != nil { + return errors.New("Filter attribute already present") + } + f.zoneFilter = &zone + return nil +} + +// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter +// false otherwise +func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { + if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 && len(f.labelFilter) == 0 && f.zoneFilter == nil { + // empty filter always not match + return false + } + + // -p, --protonum proto Layer 4 Protocol, eg. 'tcp' + if f.protoFilter != 0 && flow.Forward.Protocol != f.protoFilter { + // different Layer 4 protocol always not match + return false + } + + // Conntrack zone filter + if f.zoneFilter != nil && *f.zoneFilter != flow.Zone { + return false + } + + match := true + + // IP conntrack filter + if len(f.ipNetFilter) > 0 { + // -orig-src ip Source address from original direction + if elem, found := f.ipNetFilter[ConntrackOrigSrcIP]; found { + match = match && elem.Contains(flow.Forward.SrcIP) + } + + // -orig-dst ip Destination address from original direction + if elem, found := f.ipNetFilter[ConntrackOrigDstIP]; match && found { + match = match && elem.Contains(flow.Forward.DstIP) + } + + // -src-nat ip Source NAT ip + if elem, found := f.ipNetFilter[ConntrackReplySrcIP]; match && found { + match = match && elem.Contains(flow.Reverse.SrcIP) + } + + // -dst-nat ip Destination NAT ip + if elem, found := f.ipNetFilter[ConntrackReplyDstIP]; match && found { + match = match && elem.Contains(flow.Reverse.DstIP) + } + + // Match source or destination reply IP + if elem, found := f.ipNetFilter[ConntrackReplyAnyIP]; match && found { + match = match && (elem.Contains(flow.Reverse.SrcIP) || elem.Contains(flow.Reverse.DstIP)) + } + } + + // Layer 4 Port filter + if len(f.portFilter) > 0 { + // -orig-port-src port Source port from original direction + if elem, found := f.portFilter[ConntrackOrigSrcPort]; match && found { + match = match && elem == flow.Forward.SrcPort + } + + // -orig-port-dst port Destination port from original direction + if elem, found := f.portFilter[ConntrackOrigDstPort]; match && found { + match = match && elem == flow.Forward.DstPort + } + } + + // Label filter + if len(f.labelFilter) > 0 { + if len(flow.Labels) > 0 { + // --label label1,label2 in conn entry; + // every label passed should be contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackMatchLabels]; match && found { + for _, label := range elem { + match = match && (bytes.Contains(flow.Labels, label)) + } + } + // --label label1,label2 in conn entry; + // every label passed should be not contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackUnmatchLabels]; match && found { + for _, label := range elem { + match = match && !(bytes.Contains(flow.Labels, label)) + } + } + } else { + // flow doesn't contain labels, so it doesn't contain or notContain any provided matches + match = false + } + } + + return match +} + +var _ CustomConntrackFilter = (*ConntrackFilter)(nil) diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go new file mode 100644 index 000000000..0bfdf422d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -0,0 +1,72 @@ +// +build !linux + +package netlink + +// ConntrackTableType Conntrack table for the netlink operation +type ConntrackTableType uint8 + +// InetFamily Family type +type InetFamily uint8 + +// ConntrackFlow placeholder +type ConntrackFlow struct{} + +// CustomConntrackFilter placeholder +type CustomConntrackFilter struct{} + +// ConntrackFilter placeholder +type ConntrackFilter struct{} + +// ConntrackTableList returns the flow list of a table of a specific family +// conntrack -L [table] [options] List conntrack or expectation table +func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return nil, ErrNotImplemented +} + +// ConntrackTableFlush flushes all the flows of a specified table +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func ConntrackTableFlush(table ConntrackTableType) error { + return ErrNotImplemented +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter +// conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [ConntrackDeleteFilter] instead. +func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + +// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed +// conntrack -L [table] [options] List conntrack or expectation table +func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return nil, ErrNotImplemented +} + +// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { + return ErrNotImplemented +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +// +// Deprecated: use [Handle.ConntrackDeleteFilters] instead. +func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + +// ConntrackDeleteFilters deletes entries on the specified table matching any of the specified filters using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilters(table ConntrackTableType, family InetFamily, filters ...CustomConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/devlink_linux.go b/vendor/github.com/vishvananda/netlink/devlink_linux.go new file mode 100644 index 000000000..d98801dbb --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/devlink_linux.go @@ -0,0 +1,1159 @@ +package netlink + +import ( + "fmt" + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// DevlinkDevEswitchAttr represents device's eswitch attributes +type DevlinkDevEswitchAttr struct { + Mode string + InlineMode string + EncapMode string +} + +// DevlinkDevAttrs represents device attributes +type DevlinkDevAttrs struct { + Eswitch DevlinkDevEswitchAttr +} + +// DevlinkDevice represents device and its attributes +type DevlinkDevice struct { + BusName string + DeviceName string + Attrs DevlinkDevAttrs +} + +// DevlinkPortFn represents port function and its attributes +type DevlinkPortFn struct { + HwAddr net.HardwareAddr + State uint8 + OpState uint8 +} + +// DevlinkPortFnSetAttrs represents attributes to set +type DevlinkPortFnSetAttrs struct { + FnAttrs DevlinkPortFn + HwAddrValid bool + StateValid bool +} + +// DevlinkPort represents port and its attributes +type DevlinkPort struct { + BusName string + DeviceName string + PortIndex uint32 + PortType uint16 + NetdeviceName string + NetdevIfIndex uint32 + RdmaDeviceName string + PortFlavour uint16 + Fn *DevlinkPortFn +} + +type DevLinkPortAddAttrs struct { + Controller uint32 + SfNumber uint32 + PortIndex uint32 + PfNumber uint16 + SfNumberValid bool + PortIndexValid bool + ControllerValid bool +} + +// DevlinkDeviceInfo represents devlink info +type DevlinkDeviceInfo struct { + Driver string + SerialNumber string + BoardID string + FwApp string + FwAppBoundleID string + FwAppName string + FwBoundleID string + FwMgmt string + FwMgmtAPI string + FwMgmtBuild string + FwNetlist string + FwNetlistBuild string + FwPsidAPI string + FwUndi string +} + +// DevlinkResource represents a device resource +type DevlinkResource struct { + Name string + ID uint64 + Size uint64 + SizeNew uint64 + SizeMin uint64 + SizeMax uint64 + SizeGranularity uint64 + PendingChange bool + Unit uint8 + SizeValid bool + OCCValid bool + OCCSize uint64 + Parent *DevlinkResource + Children []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResource, returns error if occured +func (dlr *DevlinkResource) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // mandatory attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_ID] + if !ok { + return fmt.Errorf("missing resource id") + } + dlr.ID = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_NAME] + if !ok { + return fmt.Errorf("missing resource name") + } + dlr.Name = nl.BytesToString(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE] + if !ok { + return fmt.Errorf("missing resource size") + } + dlr.Size = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_GRAN] + if !ok { + return fmt.Errorf("missing resource size granularity") + } + dlr.SizeGranularity = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_UNIT] + if !ok { + return fmt.Errorf("missing resource unit") + } + dlr.Unit = uint8(attr.Value[0]) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MIN] + if !ok { + return fmt.Errorf("missing resource size min") + } + dlr.SizeMin = native.Uint64(attr.Value) + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_MAX] + if !ok { + return fmt.Errorf("missing resource size max") + } + dlr.SizeMax = native.Uint64(attr.Value) + + // optional attributes + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_OCC] + if ok { + dlr.OCCSize = native.Uint64(attr.Value) + dlr.OCCValid = true + } + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_VALID] + if ok { + dlr.SizeValid = uint8(attr.Value[0]) != 0 + } + + dlr.SizeNew = dlr.Size + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_SIZE_NEW] + if ok { + dlr.SizeNew = native.Uint64(attr.Value) + } + + dlr.PendingChange = dlr.Size != dlr.SizeNew + + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if ok { + // handle nested resoruces recursively + subResources, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, subresource := range subResources { + resource := DevlinkResource{Parent: dlr} + attrs, err := nl.ParseRouteAttrAsMap(subresource.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse child resource, parent:%s. %w", dlr.Name, err) + } + dlr.Children = append(dlr.Children, resource) + } + } + return nil +} + +// DevlinkResources represents all devlink resources of a devlink device +type DevlinkResources struct { + Bus string + Device string + Resources []DevlinkResource +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkResources, returns error if occured +func (dlrs *DevlinkResources) parseAttributes(attrs map[uint16]syscall.NetlinkRouteAttr) error { + var attr syscall.NetlinkRouteAttr + var ok bool + + // Bus + attr, ok = attrs[nl.DEVLINK_ATTR_BUS_NAME] + if !ok { + return fmt.Errorf("missing bus name") + } + dlrs.Bus = nl.BytesToString(attr.Value) + + // Device + attr, ok = attrs[nl.DEVLINK_ATTR_DEV_NAME] + if !ok { + return fmt.Errorf("missing device name") + } + dlrs.Device = nl.BytesToString(attr.Value) + + // Resource List + attr, ok = attrs[nl.DEVLINK_ATTR_RESOURCE_LIST] + if !ok { + return fmt.Errorf("missing resource list") + } + + resourceAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + + for _, resourceAttr := range resourceAttrs { + resource := DevlinkResource{} + attrs, err := nl.ParseRouteAttrAsMap(resourceAttr.Value) + if err != nil { + return err + } + err = resource.parseAttributes(attrs) + if err != nil { + return fmt.Errorf("failed to parse root resoruces, %w", err) + } + dlrs.Resources = append(dlrs.Resources, resource) + } + + return nil +} + +// DevlinkParam represents parameter of the device +type DevlinkParam struct { + Name string + IsGeneric bool + Type uint8 // possible values are in nl.DEVLINK_PARAM_TYPE_* constants + Values []DevlinkParamValue +} + +// DevlinkParamValue contains values of the parameter +// Data field contains specific type which can be casted by unsing info from the DevlinkParam.Type field +type DevlinkParamValue struct { + rawData []byte + Data interface{} + CMODE uint8 // possible values are in nl.DEVLINK_PARAM_CMODE_* constants +} + +// parseAttributes parses provided Netlink Attributes and populates DevlinkParam, returns error if occured +func (dlp *DevlinkParam) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + var valuesList [][]syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM: + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_NAME: + dlp.Name = nl.BytesToString(nattr.Value) + case nl.DEVLINK_ATTR_PARAM_GENERIC: + dlp.IsGeneric = true + case nl.DEVLINK_ATTR_PARAM_TYPE: + if len(nattr.Value) == 1 { + dlp.Type = nattr.Value[0] + } + case nl.DEVLINK_ATTR_PARAM_VALUES_LIST: + nnattrs, err := nl.ParseRouteAttr(nattr.Value) + if err != nil { + return err + } + valuesList = append(valuesList, nnattrs) + } + } + } + } + for _, valAttr := range valuesList { + v := DevlinkParamValue{} + if err := v.parseAttributes(valAttr, dlp.Type); err != nil { + return err + } + dlp.Values = append(dlp.Values, v) + } + return nil +} + +func (dlpv *DevlinkParamValue) parseAttributes(attrs []syscall.NetlinkRouteAttr, paramType uint8) error { + for _, attr := range attrs { + nattrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return err + } + var rawData []byte + for _, nattr := range nattrs { + switch nattr.Attr.Type { + case nl.DEVLINK_ATTR_PARAM_VALUE_DATA: + rawData = nattr.Value + case nl.DEVLINK_ATTR_PARAM_VALUE_CMODE: + if len(nattr.Value) == 1 { + dlpv.CMODE = nattr.Value[0] + } + } + } + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + dlpv.Data = uint8(0) + if rawData != nil && len(rawData) == 1 { + dlpv.Data = uint8(rawData[0]) + } + case nl.DEVLINK_PARAM_TYPE_U16: + dlpv.Data = uint16(0) + if rawData != nil { + dlpv.Data = native.Uint16(rawData) + } + case nl.DEVLINK_PARAM_TYPE_U32: + dlpv.Data = uint32(0) + if rawData != nil { + dlpv.Data = native.Uint32(rawData) + } + case nl.DEVLINK_PARAM_TYPE_STRING: + dlpv.Data = "" + if rawData != nil { + dlpv.Data = nl.BytesToString(rawData) + } + case nl.DEVLINK_PARAM_TYPE_BOOL: + dlpv.Data = rawData != nil + } + } + return nil +} + +func parseDevLinkDeviceList(msgs [][]byte) ([]*DevlinkDevice, error) { + devices := make([]*DevlinkDevice, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + devices = append(devices, dev) + } + return devices, nil +} + +func eswitchStringToMode(modeName string) (uint16, error) { + if modeName == "legacy" { + return nl.DEVLINK_ESWITCH_MODE_LEGACY, nil + } else if modeName == "switchdev" { + return nl.DEVLINK_ESWITCH_MODE_SWITCHDEV, nil + } else { + return 0xffff, fmt.Errorf("invalid switchdev mode") + } +} + +func parseEswitchMode(mode uint16) string { + var eswitchMode = map[uint16]string{ + nl.DEVLINK_ESWITCH_MODE_LEGACY: "legacy", + nl.DEVLINK_ESWITCH_MODE_SWITCHDEV: "switchdev", + } + if eswitchMode[mode] == "" { + return "unknown" + } else { + return eswitchMode[mode] + } +} + +func parseEswitchInlineMode(inlinemode uint8) string { + var eswitchInlineMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_INLINE_MODE_NONE: "none", + nl.DEVLINK_ESWITCH_INLINE_MODE_LINK: "link", + nl.DEVLINK_ESWITCH_INLINE_MODE_NETWORK: "network", + nl.DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: "transport", + } + if eswitchInlineMode[inlinemode] == "" { + return "unknown" + } else { + return eswitchInlineMode[inlinemode] + } +} + +func parseEswitchEncapMode(encapmode uint8) string { + var eswitchEncapMode = map[uint8]string{ + nl.DEVLINK_ESWITCH_ENCAP_MODE_NONE: "disable", + nl.DEVLINK_ESWITCH_ENCAP_MODE_BASIC: "enable", + } + if eswitchEncapMode[encapmode] == "" { + return "unknown" + } else { + return eswitchEncapMode[encapmode] + } +} + +func (d *DevlinkDevice) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + d.BusName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_DEV_NAME: + d.DeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_ESWITCH_MODE: + d.Attrs.Eswitch.Mode = parseEswitchMode(native.Uint16(a.Value)) + case nl.DEVLINK_ATTR_ESWITCH_INLINE_MODE: + d.Attrs.Eswitch.InlineMode = parseEswitchInlineMode(uint8(a.Value[0])) + case nl.DEVLINK_ATTR_ESWITCH_ENCAP_MODE: + d.Attrs.Eswitch.EncapMode = parseEswitchEncapMode(uint8(a.Value[0])) + } + } + return nil +} + +func (dev *DevlinkDevice) parseEswitchAttrs(msgs [][]byte) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return + } + dev.parseAttributes(attrs) +} + +func (h *Handle) getEswitchAttrs(family *GenlFamily, dev *DevlinkDevice) { + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_ESWITCH_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(family.ID), unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(dev.BusName)+1) + copy(b, dev.BusName) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(dev.DeviceName)+1) + copy(b, dev.DeviceName) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return + } + dev.parseEswitchAttrs(msgs) +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + devices, err := parseDevLinkDeviceList(msgs) + if err != nil { + return nil, err + } + for _, d := range devices { + h.getEswitchAttrs(f, d) + } + return devices, nil +} + +// DevLinkGetDeviceList provides a pointer to devlink devices and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceList() ([]*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceList() +} + +func parseDevlinkDevice(msgs [][]byte) (*DevlinkDevice, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + dev := &DevlinkDevice{} + if err = dev.parseAttributes(attrs); err != nil { + return nil, err + } + return dev, nil +} + +func (h *Handle) createCmdReq(cmd uint8, bus string, device string) (*GenlFamily, *nl.NetlinkRequest, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, nil, err + } + + msg := &nl.Genlmsg{ + Command: cmd, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK) + req.AddData(msg) + + b := make([]byte, len(bus)+1) + copy(b, bus) + data := nl.NewRtAttr(nl.DEVLINK_ATTR_BUS_NAME, b) + req.AddData(data) + + b = make([]byte, len(device)+1) + copy(b, device) + data = nl.NewRtAttr(nl.DEVLINK_ATTR_DEV_NAME, b) + req.AddData(data) + + return f, req, nil +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + f, req, err := h.createCmdReq(nl.DEVLINK_CMD_GET, Bus, Device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + dev, err := parseDevlinkDevice(respmsg) + if err == nil { + h.getEswitchAttrs(f, dev) + } + return dev, err +} + +// DevlinkGetDeviceByName provides a pointer to devlink device and nil error, +// otherwise returns an error code. +func DevLinkGetDeviceByName(Bus string, Device string) (*DevlinkDevice, error) { + return pkgHandle.DevLinkGetDeviceByName(Bus, Device) +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func (h *Handle) DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + mode, err := eswitchStringToMode(NewMode) + if err != nil { + return err + } + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_ESWITCH_SET, Dev.BusName, Dev.DeviceName) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_ESWITCH_MODE, nl.Uint16Attr(mode))) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkSetEswitchMode sets eswitch mode if able to set successfully or +// returns an error code. +// Equivalent to: `devlink dev eswitch set $dev mode switchdev` +// Equivalent to: `devlink dev eswitch set $dev mode legacy` +func DevLinkSetEswitchMode(Dev *DevlinkDevice, NewMode string) error { + return pkgHandle.DevLinkSetEswitchMode(Dev, NewMode) +} + +func (port *DevlinkPort) parseAttributes(attrs []syscall.NetlinkRouteAttr) error { + for _, a := range attrs { + switch a.Attr.Type { + case nl.DEVLINK_ATTR_BUS_NAME: + port.BusName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_DEV_NAME: + port.DeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_INDEX: + port.PortIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_TYPE: + port.PortType = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_NETDEV_NAME: + port.NetdeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_NETDEV_IFINDEX: + port.NetdevIfIndex = native.Uint32(a.Value) + case nl.DEVLINK_ATTR_PORT_IBDEV_NAME: + port.RdmaDeviceName = string(a.Value[:len(a.Value)-1]) + case nl.DEVLINK_ATTR_PORT_FLAVOUR: + port.PortFlavour = native.Uint16(a.Value) + case nl.DEVLINK_ATTR_PORT_FUNCTION: + port.Fn = &DevlinkPortFn{} + for nested := range nl.ParseAttributes(a.Value) { + switch nested.Type { + case nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR: + port.Fn.HwAddr = nested.Value[:] + case nl.DEVLINK_PORT_FN_ATTR_STATE: + port.Fn.State = uint8(nested.Value[0]) + case nl.DEVLINK_PORT_FN_ATTR_OPSTATE: + port.Fn.OpState = uint8(nested.Value[0]) + } + } + } + } + return nil +} + +func parseDevLinkAllPortList(msgs [][]byte) ([]*DevlinkPort, error) { + ports := make([]*DevlinkPort, 0, len(msgs)) + for _, m := range msgs { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + ports = append(ports, port) + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetAllPortList() ([]*DevlinkPort, error) { + f, err := h.GenlFamilyGet(nl.GENL_DEVLINK_NAME) + if err != nil { + return nil, err + } + msg := &nl.Genlmsg{ + Command: nl.DEVLINK_CMD_PORT_GET, + Version: nl.GENL_DEVLINK_VERSION, + } + req := h.newNetlinkRequest(int(f.ID), + unix.NLM_F_REQUEST|unix.NLM_F_ACK|unix.NLM_F_DUMP) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + ports, err := parseDevLinkAllPortList(msgs) + if err != nil { + return nil, err + } + return ports, nil +} + +// DevLinkGetPortList provides a pointer to devlink ports and nil error, +// otherwise returns an error code. +func DevLinkGetAllPortList() ([]*DevlinkPort, error) { + return pkgHandle.DevLinkGetAllPortList() +} + +func parseDevlinkPortMsg(msgs [][]byte) (*DevlinkPort, error) { + m := msgs[0] + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + port := &DevlinkPort{} + if err = port.parseAttributes(attrs); err != nil { + return nil, err + } + return port, nil +} + +// DevLinkGetPortByIndexprovides a pointer to devlink device and nil error, +// otherwise returns an error code. +func (h *Handle) DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_GET, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevlinkGetDeviceResources returns devlink device resources +func DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + return pkgHandle.DevlinkGetDeviceResources(bus, device) +} + +// DevlinkGetDeviceResources returns devlink device resources +func (h *Handle) DevlinkGetDeviceResources(bus string, device string) (*DevlinkResources, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_RESOURCE_DUMP, bus, device) + if err != nil { + return nil, err + } + + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + var resources DevlinkResources + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttrAsMap(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + resources.parseAttributes(attrs) + } + + return &resources, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func (h *Handle) DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.Flags |= unix.NLM_F_DUMP + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + var params []*DevlinkParam + for _, m := range respmsg { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + params = append(params, p) + } + + return params, nil +} + +// DevlinkGetDeviceParams returns parameters for devlink device +// Equivalent to: `devlink dev param show /` +func DevlinkGetDeviceParams(bus string, device string) ([]*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParams(bus, device) +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func (h *Handle) DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_GET, bus, device) + if err != nil { + return nil, err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + if len(respmsg) == 0 { + return nil, fmt.Errorf("unexpected response") + } + attrs, err := nl.ParseRouteAttr(respmsg[0][nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + p := &DevlinkParam{} + if err := p.parseAttributes(attrs); err != nil { + return nil, err + } + return p, nil +} + +// DevlinkGetDeviceParamByName returns specific parameter for devlink device +// Equivalent to: `devlink dev param show / name ` +func DevlinkGetDeviceParamByName(bus string, device string, param string) (*DevlinkParam, error) { + return pkgHandle.DevlinkGetDeviceParamByName(bus, device, param) +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func (h *Handle) DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + // retrive the param type + p, err := h.DevlinkGetDeviceParamByName(bus, device, param) + if err != nil { + return fmt.Errorf("failed to get device param: %v", err) + } + paramType := p.Type + + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PARAM_SET, bus, device) + if err != nil { + return err + } + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_TYPE, nl.Uint8Attr(paramType))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_NAME, nl.ZeroTerminated(param))) + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_CMODE, nl.Uint8Attr(cmode))) + + var valueAsBytes []byte + switch paramType { + case nl.DEVLINK_PARAM_TYPE_U8: + v, ok := value.(uint8) + if !ok { + return fmt.Errorf("unepected value type required: uint8, actual: %T", value) + } + valueAsBytes = nl.Uint8Attr(v) + case nl.DEVLINK_PARAM_TYPE_U16: + v, ok := value.(uint16) + if !ok { + return fmt.Errorf("unepected value type required: uint16, actual: %T", value) + } + valueAsBytes = nl.Uint16Attr(v) + case nl.DEVLINK_PARAM_TYPE_U32: + v, ok := value.(uint32) + if !ok { + return fmt.Errorf("unepected value type required: uint32, actual: %T", value) + } + valueAsBytes = nl.Uint32Attr(v) + case nl.DEVLINK_PARAM_TYPE_STRING: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unepected value type required: string, actual: %T", value) + } + valueAsBytes = nl.ZeroTerminated(v) + case nl.DEVLINK_PARAM_TYPE_BOOL: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unepected value type required: bool, actual: %T", value) + } + if v { + valueAsBytes = []byte{} + } + default: + return fmt.Errorf("unsupported parameter type: %d", paramType) + } + if valueAsBytes != nil { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PARAM_VALUE_DATA, valueAsBytes)) + } + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkSetDeviceParam set specific parameter for devlink device +// Equivalent to: `devlink dev param set / name cmode value ` +// cmode argument should contain valid cmode value as uint8, modes are define in nl.DEVLINK_PARAM_CMODE_* constants +// value argument should have one of the following types: uint8, uint16, uint32, string, bool +func DevlinkSetDeviceParam(bus string, device string, param string, cmode uint8, value interface{}) error { + return pkgHandle.DevlinkSetDeviceParam(bus, device, param, cmode, value) +} + +// DevLinkGetPortByIndex provides a pointer to devlink portand nil error, +// otherwise returns an error code. +func DevLinkGetPortByIndex(Bus string, Device string, PortIndex uint32) (*DevlinkPort, error) { + return pkgHandle.DevLinkGetPortByIndex(Bus, Device, PortIndex) +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func (h *Handle) DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_NEW, Bus, Device) + if err != nil { + return nil, err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FLAVOUR, nl.Uint16Attr(Flavour))) + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_PF_NUMBER, nl.Uint16Attr(Attrs.PfNumber))) + if Flavour == nl.DEVLINK_PORT_FLAVOUR_PCI_SF && Attrs.SfNumberValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_PCI_SF_NUMBER, nl.Uint32Attr(Attrs.SfNumber))) + } + if Attrs.PortIndexValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(Attrs.PortIndex))) + } + if Attrs.ControllerValid { + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, nl.Uint32Attr(Attrs.Controller))) + } + respmsg, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + port, err := parseDevlinkPortMsg(respmsg) + return port, err +} + +// DevLinkPortAdd adds a devlink port and returns a port on success +// otherwise returns nil port and an error code. +func DevLinkPortAdd(Bus string, Device string, Flavour uint16, Attrs DevLinkPortAddAttrs) (*DevlinkPort, error) { + return pkgHandle.DevLinkPortAdd(Bus, Device, Flavour, Attrs) +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func (h *Handle) DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_DEL, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevLinkPortDel deletes a devlink port and returns success or error code. +func DevLinkPortDel(Bus string, Device string, PortIndex uint32) error { + return pkgHandle.DevLinkPortDel(Bus, Device, PortIndex) +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func (h *Handle) DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_PORT_SET, Bus, Device) + if err != nil { + return err + } + + req.AddData(nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_INDEX, nl.Uint32Attr(PortIndex))) + + fnAttr := nl.NewRtAttr(nl.DEVLINK_ATTR_PORT_FUNCTION|unix.NLA_F_NESTED, nil) + + if FnAttrs.HwAddrValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, []byte(FnAttrs.FnAttrs.HwAddr)) + } + + if FnAttrs.StateValid { + fnAttr.AddRtAttr(nl.DEVLINK_PORT_FN_ATTR_STATE, nl.Uint8Attr(FnAttrs.FnAttrs.State)) + } + req.AddData(fnAttr) + + _, err = req.Execute(unix.NETLINK_GENERIC, 0) + return err +} + +// DevlinkPortFnSet sets one or more port function attributes specified by the attribute mask. +// It returns 0 on success or error code. +func DevlinkPortFnSet(Bus string, Device string, PortIndex uint32, FnAttrs DevlinkPortFnSetAttrs) error { + return pkgHandle.DevlinkPortFnSet(Bus, Device, PortIndex, FnAttrs) +} + +// devlinkInfoGetter is function that is responsible for getting devlink info message +// this is introduced for test purpose +type devlinkInfoGetter func(bus, device string) ([]byte, error) + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByName(Bus string, Device string, getInfoMsg devlinkInfoGetter) (*DevlinkDeviceInfo, error) { + info, err := h.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, getInfoMsg) + if err != nil { + return nil, err + } + + return parseInfoData(info), nil +} + +// DevlinkGetDeviceInfoByName returns devlink info for selected device, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByName(Bus string, Device string) (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func (h *Handle) DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string, getInfoMsg devlinkInfoGetter) (map[string]string, error) { + response, err := getInfoMsg(Bus, Device) + if err != nil { + return nil, err + } + + info, err := parseInfoMsg(response) + if err != nil { + return nil, err + } + + return info, nil +} + +// DevlinkGetDeviceInfoByNameAsMap returns devlink info for selected device as a map, +// otherwise returns an error code. +// Equivalent to: `devlink dev info $dev` +func DevlinkGetDeviceInfoByNameAsMap(Bus string, Device string) (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(Bus, Device, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfo returns devlink info for target device, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfo() (*DevlinkDeviceInfo, error) { + return pkgHandle.DevlinkGetDeviceInfoByName(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +// GetDevlinkInfoAsMap returns devlink info for target device as a map, +// otherwise returns an error code. +func (d *DevlinkDevice) GetDevlinkInfoAsMap() (map[string]string, error) { + return pkgHandle.DevlinkGetDeviceInfoByNameAsMap(d.BusName, d.DeviceName, pkgHandle.getDevlinkInfoMsg) +} + +func (h *Handle) getDevlinkInfoMsg(bus, device string) ([]byte, error) { + _, req, err := h.createCmdReq(nl.DEVLINK_CMD_INFO_GET, bus, device) + if err != nil { + return nil, err + } + + response, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + + if len(response) < 1 { + return nil, fmt.Errorf("getDevlinkInfoMsg: message too short") + } + + return response[0], nil +} + +func parseInfoMsg(msg []byte) (map[string]string, error) { + if len(msg) < nl.SizeofGenlmsg { + return nil, fmt.Errorf("parseInfoMsg: message too short") + } + + info := make(map[string]string) + err := collectInfoData(msg[nl.SizeofGenlmsg:], info) + + if err != nil { + return nil, err + } + + return info, nil +} + +func collectInfoData(msg []byte, data map[string]string) error { + attrs, err := nl.ParseRouteAttr(msg) + if err != nil { + return err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_DRIVER_NAME: + data["driver"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_SERIAL_NUMBER: + data["serialNumber"] = parseInfoValue(attr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_RUNNING, nl.DEVLINK_ATTR_INFO_VERSION_FIXED, + nl.DEVLINK_ATTR_INFO_VERSION_STORED: + key, value, err := getNestedInfoData(attr.Value) + if err != nil { + return err + } + data[key] = value + } + } + + if len(data) == 0 { + return fmt.Errorf("collectInfoData: could not read attributes") + } + + return nil +} + +func getNestedInfoData(msg []byte) (string, string, error) { + nestedAttrs, err := nl.ParseRouteAttr(msg) + + var key, value string + + if err != nil { + return "", "", err + } + + if len(nestedAttrs) != 2 { + return "", "", fmt.Errorf("getNestedInfoData: too few attributes in nested structure") + } + + for _, nestedAttr := range nestedAttrs { + switch nestedAttr.Attr.Type { + case nl.DEVLINK_ATTR_INFO_VERSION_NAME: + key = parseInfoValue(nestedAttr.Value) + case nl.DEVLINK_ATTR_INFO_VERSION_VALUE: + value = parseInfoValue(nestedAttr.Value) + } + } + + if key == "" { + return "", "", fmt.Errorf("getNestedInfoData: key not found") + } + + if value == "" { + return "", "", fmt.Errorf("getNestedInfoData: value not found") + } + + return key, value, nil +} + +func parseInfoData(data map[string]string) *DevlinkDeviceInfo { + info := new(DevlinkDeviceInfo) + for key, value := range data { + switch key { + case "driver": + info.Driver = value + case "serialNumber": + info.SerialNumber = value + case "board.id": + info.BoardID = value + case "fw.app": + info.FwApp = value + case "fw.app.bundle_id": + info.FwAppBoundleID = value + case "fw.app.name": + info.FwAppName = value + case "fw.bundle_id": + info.FwBoundleID = value + case "fw.mgmt": + info.FwMgmt = value + case "fw.mgmt.api": + info.FwMgmtAPI = value + case "fw.mgmt.build": + info.FwMgmtBuild = value + case "fw.netlist": + info.FwNetlist = value + case "fw.netlist.build": + info.FwNetlistBuild = value + case "fw.psid.api": + info.FwPsidAPI = value + case "fw.undi": + info.FwUndi = value + } + } + return info +} + +func parseInfoValue(value []byte) string { + v := strings.ReplaceAll(string(value), "\x00", "") + return strings.TrimSpace(v) +} diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go new file mode 100644 index 000000000..84e1ca7a4 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -0,0 +1,462 @@ +package netlink + +import ( + "fmt" + "net" +) + +type Filter interface { + Attrs() *FilterAttrs + Type() string +} + +// FilterAttrs represents a netlink filter. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type FilterAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Priority uint16 // lower is higher priority + Protocol uint16 // unix.ETH_P_* + Chain *uint32 +} + +func (q FilterAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol) +} + +type TcAct int32 + +const ( + TC_ACT_EXT_SHIFT = 28 + TC_ACT_EXT_VAL_MASK = (1 << TC_ACT_EXT_SHIFT) - 1 +) + +const ( + TC_ACT_UNSPEC TcAct = -1 + TC_ACT_OK TcAct = 0 + TC_ACT_RECLASSIFY TcAct = 1 + TC_ACT_SHOT TcAct = 2 + TC_ACT_PIPE TcAct = 3 + TC_ACT_STOLEN TcAct = 4 + TC_ACT_QUEUED TcAct = 5 + TC_ACT_REPEAT TcAct = 6 + TC_ACT_REDIRECT TcAct = 7 + TC_ACT_JUMP TcAct = 0x10000000 +) + +func getTcActExt(local int32) int32 { + return local << TC_ACT_EXT_SHIFT +} + +func getTcActGotoChain() TcAct { + return TcAct(getTcActExt(2)) +} + +func getTcActExtOpcode(combined int32) int32 { + return combined & (^TC_ACT_EXT_VAL_MASK) +} + +func TcActExtCmp(combined int32, opcode int32) bool { + return getTcActExtOpcode(combined) == opcode +} + +func (a TcAct) String() string { + switch a { + case TC_ACT_UNSPEC: + return "unspec" + case TC_ACT_OK: + return "ok" + case TC_ACT_RECLASSIFY: + return "reclassify" + case TC_ACT_SHOT: + return "shot" + case TC_ACT_PIPE: + return "pipe" + case TC_ACT_STOLEN: + return "stolen" + case TC_ACT_QUEUED: + return "queued" + case TC_ACT_REPEAT: + return "repeat" + case TC_ACT_REDIRECT: + return "redirect" + case TC_ACT_JUMP: + return "jump" + } + if TcActExtCmp(int32(a), int32(getTcActGotoChain())) { + return "goto" + } + return fmt.Sprintf("0x%x", int32(a)) +} + +type TcPolAct int32 + +const ( + TC_POLICE_UNSPEC TcPolAct = TcPolAct(TC_ACT_UNSPEC) + TC_POLICE_OK TcPolAct = TcPolAct(TC_ACT_OK) + TC_POLICE_RECLASSIFY TcPolAct = TcPolAct(TC_ACT_RECLASSIFY) + TC_POLICE_SHOT TcPolAct = TcPolAct(TC_ACT_SHOT) + TC_POLICE_PIPE TcPolAct = TcPolAct(TC_ACT_PIPE) +) + +func (a TcPolAct) String() string { + switch a { + case TC_POLICE_UNSPEC: + return "unspec" + case TC_POLICE_OK: + return "ok" + case TC_POLICE_RECLASSIFY: + return "reclassify" + case TC_POLICE_SHOT: + return "shot" + case TC_POLICE_PIPE: + return "pipe" + } + return fmt.Sprintf("0x%x", int32(a)) +} + +type ActionAttrs struct { + Index int + Capab int + Action TcAct + Refcnt int + Bindcnt int + Statistics *ActionStatistic + Timestamp *ActionTimestamp +} + +func (q ActionAttrs) String() string { + return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt) +} + +type ActionTimestamp struct { + Installed uint64 + LastUsed uint64 + Expires uint64 + FirstUsed uint64 +} + +func (t ActionTimestamp) String() string { + return fmt.Sprintf("Installed %d LastUsed %d Expires %d FirstUsed %d", t.Installed, t.LastUsed, t.Expires, t.FirstUsed) +} + +type ActionStatistic ClassStatistics + +// Action represents an action in any supported filter. +type Action interface { + Attrs() *ActionAttrs + Type() string +} + +type GenericAction struct { + ActionAttrs + Chain int32 +} + +func (action *GenericAction) Type() string { + return "generic" +} + +func (action *GenericAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +type BpfAction struct { + ActionAttrs + Fd int + Name string +} + +func (action *BpfAction) Type() string { + return "bpf" +} + +func (action *BpfAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +type ConnmarkAction struct { + ActionAttrs + Zone uint16 +} + +func (action *ConnmarkAction) Type() string { + return "connmark" +} + +func (action *ConnmarkAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewConnmarkAction() *ConnmarkAction { + return &ConnmarkAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type CsumUpdateFlags uint32 + +const ( + TCA_CSUM_UPDATE_FLAG_IPV4HDR CsumUpdateFlags = 1 + TCA_CSUM_UPDATE_FLAG_ICMP CsumUpdateFlags = 2 + TCA_CSUM_UPDATE_FLAG_IGMP CsumUpdateFlags = 4 + TCA_CSUM_UPDATE_FLAG_TCP CsumUpdateFlags = 8 + TCA_CSUM_UPDATE_FLAG_UDP CsumUpdateFlags = 16 + TCA_CSUM_UPDATE_FLAG_UDPLITE CsumUpdateFlags = 32 + TCA_CSUM_UPDATE_FLAG_SCTP CsumUpdateFlags = 64 +) + +type CsumAction struct { + ActionAttrs + UpdateFlags CsumUpdateFlags +} + +func (action *CsumAction) Type() string { + return "csum" +} + +func (action *CsumAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewCsumAction() *CsumAction { + return &CsumAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type MirredAct uint8 + +func (a MirredAct) String() string { + switch a { + case TCA_EGRESS_REDIR: + return "egress redir" + case TCA_EGRESS_MIRROR: + return "egress mirror" + case TCA_INGRESS_REDIR: + return "ingress redir" + case TCA_INGRESS_MIRROR: + return "ingress mirror" + } + return "unknown" +} + +const ( + TCA_EGRESS_REDIR MirredAct = 1 /* packet redirect to EGRESS*/ + TCA_EGRESS_MIRROR MirredAct = 2 /* mirror packet to EGRESS */ + TCA_INGRESS_REDIR MirredAct = 3 /* packet redirect to INGRESS*/ + TCA_INGRESS_MIRROR MirredAct = 4 /* mirror packet to INGRESS */ +) + +type MirredAction struct { + ActionAttrs + MirredAction MirredAct + Ifindex int +} + +func (action *MirredAction) Type() string { + return "mirred" +} + +func (action *MirredAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewMirredAction(redirIndex int) *MirredAction { + return &MirredAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_STOLEN, + }, + MirredAction: TCA_EGRESS_REDIR, + Ifindex: redirIndex, + } +} + +type TunnelKeyAct int8 + +const ( + TCA_TUNNEL_KEY_SET TunnelKeyAct = 1 // set tunnel key + TCA_TUNNEL_KEY_UNSET TunnelKeyAct = 2 // unset tunnel key +) + +type TunnelKeyAction struct { + ActionAttrs + Action TunnelKeyAct + SrcAddr net.IP + DstAddr net.IP + KeyID uint32 + DestPort uint16 +} + +func (action *TunnelKeyAction) Type() string { + return "tunnel_key" +} + +func (action *TunnelKeyAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewTunnelKeyAction() *TunnelKeyAction { + return &TunnelKeyAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type SkbEditAction struct { + ActionAttrs + QueueMapping *uint16 + PType *uint16 + Priority *uint32 + Mark *uint32 + Mask *uint32 +} + +func (action *SkbEditAction) Type() string { + return "skbedit" +} + +func (action *SkbEditAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewSkbEditAction() *SkbEditAction { + return &SkbEditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + +type PoliceAction struct { + ActionAttrs + Rate uint32 // in byte per second + Burst uint32 // in byte + RCellLog int + Mtu uint32 + Mpu uint16 // in byte + PeakRate uint32 // in byte per second + PCellLog int + AvRate uint32 // in byte per second + Overhead uint16 + LinkLayer int + ExceedAction TcPolAct + NotExceedAction TcPolAct +} + +func (action *PoliceAction) Type() string { + return "police" +} + +func (action *PoliceAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewPoliceAction() *PoliceAction { + return &PoliceAction{ + RCellLog: -1, + PCellLog: -1, + LinkLayer: 1, // ETHERNET + ExceedAction: TC_POLICE_RECLASSIFY, + NotExceedAction: TC_POLICE_OK, + } +} + +// MatchAll filters match all packets +type MatchAll struct { + FilterAttrs + ClassId uint32 + Actions []Action +} + +func (filter *MatchAll) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *MatchAll) Type() string { + return "matchall" +} + +type FwFilter struct { + FilterAttrs + ClassId uint32 + InDev string + Mask uint32 + Police *PoliceAction + Actions []Action +} + +func (filter *FwFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *FwFilter) Type() string { + return "fw" +} + +type BpfFilter struct { + FilterAttrs + ClassId uint32 + Fd int + Name string + DirectAction bool + Id int + Tag string +} + +func (filter *BpfFilter) Type() string { + return "bpf" +} + +func (filter *BpfFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +// GenericFilter filters represent types that are not currently understood +// by this netlink library. +type GenericFilter struct { + FilterAttrs + FilterType string +} + +func (filter *GenericFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *GenericFilter) Type() string { + return filter.FilterType +} + +type PeditAction struct { + ActionAttrs + Proto uint8 + SrcMacAddr net.HardwareAddr + DstMacAddr net.HardwareAddr + SrcIP net.IP + DstIP net.IP + SrcPort uint16 + DstPort uint16 +} + +func (p *PeditAction) Attrs() *ActionAttrs { + return &p.ActionAttrs +} + +func (p *PeditAction) Type() string { + return "pedit" +} + +func NewPeditAction() *PeditAction { + return &PeditAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go new file mode 100644 index 000000000..87cd18f8e --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -0,0 +1,1120 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// Constants used in TcU32Sel.Flags. +const ( + TC_U32_TERMINAL = nl.TC_U32_TERMINAL + TC_U32_OFFSET = nl.TC_U32_OFFSET + TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET + TC_U32_EAT = nl.TC_U32_EAT +) + +// Sel of the U32 filters that contains multiple TcU32Key. This is the type +// alias and the frontend representation of nl.TcU32Sel. It is serialized into +// canonical nl.TcU32Sel with the appropriate endianness. +type TcU32Sel = nl.TcU32Sel + +// TcU32Key contained of Sel in the U32 filters. This is the type alias and the +// frontend representation of nl.TcU32Key. It is serialized into chanonical +// nl.TcU32Sel with the appropriate endianness. +type TcU32Key = nl.TcU32Key + +// U32 filters on many packet related properties +type U32 struct { + FilterAttrs + ClassId uint32 + Divisor uint32 // Divisor MUST be power of 2. + Hash uint32 + Link uint32 + RedirIndex int + Sel *TcU32Sel + Actions []Action + Police *PoliceAction +} + +func (filter *U32) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *U32) Type() string { + return "u32" +} + +type Flower struct { + FilterAttrs + DestIP net.IP + DestIPMask net.IPMask + SrcIP net.IP + SrcIPMask net.IPMask + EthType uint16 + EncDestIP net.IP + EncDestIPMask net.IPMask + EncSrcIP net.IP + EncSrcIPMask net.IPMask + EncDestPort uint16 + EncKeyId uint32 + SkipHw bool + SkipSw bool + IPProto *nl.IPProto + DestPort uint16 + SrcPort uint16 + + Actions []Action +} + +func (filter *Flower) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *Flower) Type() string { + return "flower" +} + +func (filter *Flower) encodeIP(parent *nl.RtAttr, ip net.IP, mask net.IPMask, v4Type, v6Type int, v4MaskType, v6MaskType int) { + ipType := v4Type + maskType := v4MaskType + + encodeMask := mask + if mask == nil { + encodeMask = net.CIDRMask(32, 32) + } + v4IP := ip.To4() + if v4IP == nil { + ipType = v6Type + maskType = v6MaskType + if mask == nil { + encodeMask = net.CIDRMask(128, 128) + } + } else { + ip = v4IP + } + + parent.AddRtAttr(ipType, ip) + parent.AddRtAttr(maskType, encodeMask) +} + +func (filter *Flower) encode(parent *nl.RtAttr) error { + if filter.EthType != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ETH_TYPE, htons(filter.EthType)) + } + if filter.SrcIP != nil { + filter.encodeIP(parent, filter.SrcIP, filter.SrcIPMask, + nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC, + nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK) + } + if filter.DestIP != nil { + filter.encodeIP(parent, filter.DestIP, filter.DestIPMask, + nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST, + nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK) + } + if filter.EncSrcIP != nil { + filter.encodeIP(parent, filter.EncSrcIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC, + nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK) + } + if filter.EncDestIP != nil { + filter.encodeIP(parent, filter.EncDestIP, filter.EncSrcIPMask, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST, + nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK) + } + if filter.EncDestPort != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT, htons(filter.EncDestPort)) + } + if filter.EncKeyId != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) + } + if filter.IPProto != nil { + ipproto := *filter.IPProto + parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize()) + if filter.SrcPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_SRC, htons(filter.SrcPort)) + } + } + if filter.DestPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_DST, htons(filter.DestPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_DST, htons(filter.DestPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_DST, htons(filter.DestPort)) + } + } + } + + var flags uint32 = 0 + if filter.SkipHw { + flags |= nl.TCA_CLS_FLAGS_SKIP_HW + } + if filter.SkipSw { + flags |= nl.TCA_CLS_FLAGS_SKIP_SW + } + parent.AddRtAttr(nl.TCA_FLOWER_FLAGS, htonl(flags)) + + actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + return nil +} + +func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FLOWER_KEY_ETH_TYPE: + filter.EthType = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_IPV4_SRC, nl.TCA_FLOWER_KEY_IPV6_SRC: + filter.SrcIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_IPV6_SRC_MASK: + filter.SrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST, nl.TCA_FLOWER_KEY_IPV6_DST: + filter.DestIP = datum.Value + case nl.TCA_FLOWER_KEY_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_IPV6_DST_MASK: + filter.DestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC: + filter.EncSrcIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK: + filter.EncSrcIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST, nl.TCA_FLOWER_KEY_ENC_IPV6_DST: + filter.EncDestIP = datum.Value + case nl.TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, nl.TCA_FLOWER_KEY_ENC_IPV6_DST_MASK: + filter.EncDestIPMask = datum.Value + case nl.TCA_FLOWER_KEY_ENC_UDP_DST_PORT: + filter.EncDestPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_ENC_KEY_ID: + filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_KEY_IP_PROTO: + val := new(nl.IPProto) + *val = nl.IPProto(datum.Value[0]) + filter.IPProto = val + case nl.TCA_FLOWER_KEY_TCP_SRC, nl.TCA_FLOWER_KEY_UDP_SRC, nl.TCA_FLOWER_KEY_SCTP_SRC: + filter.SrcPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_TCP_DST, nl.TCA_FLOWER_KEY_UDP_DST, nl.TCA_FLOWER_KEY_SCTP_DST: + filter.DestPort = ntohs(datum.Value) + case nl.TCA_FLOWER_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return err + } + filter.Actions, err = parseActions(tables) + if err != nil { + return err + } + case nl.TCA_FLOWER_FLAGS: + attr := nl.DeserializeUint32Bitfield(datum.Value) + skipSw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_HW + skipHw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_SW + if skipSw != 0 { + filter.SkipSw = true + } + if skipHw != 0 { + filter.SkipHw = true + } + } + } + return nil +} + +// FilterDel will delete a filter from the system. +// Equivalent to: `tc filter del $filter` +func FilterDel(filter Filter) error { + return pkgHandle.FilterDel(filter) +} + +// FilterDel will delete a filter from the system. +// Equivalent to: `tc filter del $filter` +func (h *Handle) FilterDel(filter Filter) error { + return h.filterModify(filter, unix.RTM_DELTFILTER, 0) +} + +// FilterAdd will add a filter to the system. +// Equivalent to: `tc filter add $filter` +func FilterAdd(filter Filter) error { + return pkgHandle.FilterAdd(filter) +} + +// FilterAdd will add a filter to the system. +// Equivalent to: `tc filter add $filter` +func (h *Handle) FilterAdd(filter Filter) error { + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func FilterReplace(filter Filter) error { + return pkgHandle.FilterReplace(filter) +} + +// FilterReplace will replace a filter. +// Equivalent to: `tc filter replace $filter` +func (h *Handle) FilterReplace(filter Filter) error { + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE) +} + +func (h *Handle) filterModify(filter Filter, proto, flags int) error { + req := h.newNetlinkRequest(proto, flags|unix.NLM_F_ACK) + base := filter.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), + } + req.AddData(msg) + if filter.Attrs().Chain != nil { + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(*filter.Attrs().Chain))) + } + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + + switch filter := filter.(type) { + case *U32: + sel := filter.Sel + if sel == nil { + // match all + sel = &nl.TcU32Sel{ + Nkeys: 1, + Flags: nl.TC_U32_TERMINAL, + } + sel.Keys = append(sel.Keys, nl.TcU32Key{}) + } + + if native != networkOrder { + // Copy TcU32Sel. + cSel := *sel + keys := make([]nl.TcU32Key, cap(sel.Keys)) + copy(keys, sel.Keys) + cSel.Keys = keys + sel = &cSel + + // Handle the endianness of attributes + sel.Offmask = native.Uint16(htons(sel.Offmask)) + sel.Hmask = native.Uint32(htonl(sel.Hmask)) + for i, key := range sel.Keys { + sel.Keys[i].Mask = native.Uint32(htonl(key.Mask)) + sel.Keys[i].Val = native.Uint32(htonl(key.Val)) + } + } + sel.Nkeys = uint8(len(sel.Keys)) + options.AddRtAttr(nl.TCA_U32_SEL, sel.Serialize()) + if filter.ClassId != 0 { + options.AddRtAttr(nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + if filter.Divisor != 0 { + if (filter.Divisor-1)&filter.Divisor != 0 { + return fmt.Errorf("illegal divisor %d. Must be a power of 2", filter.Divisor) + } + options.AddRtAttr(nl.TCA_U32_DIVISOR, nl.Uint32Attr(filter.Divisor)) + } + if filter.Hash != 0 { + options.AddRtAttr(nl.TCA_U32_HASH, nl.Uint32Attr(filter.Hash)) + } + if filter.Link != 0 { + options.AddRtAttr(nl.TCA_U32_LINK, nl.Uint32Attr(filter.Link)) + } + if filter.Police != nil { + police := options.AddRtAttr(nl.TCA_U32_POLICE, nil) + if err := encodePolice(police, filter.Police); err != nil { + return err + } + } + actionsAttr := options.AddRtAttr(nl.TCA_U32_ACT, nil) + // backwards compatibility + if filter.RedirIndex != 0 { + filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...) + } + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + case *FwFilter: + if filter.Mask != 0 { + b := make([]byte, 4) + native.PutUint32(b, filter.Mask) + options.AddRtAttr(nl.TCA_FW_MASK, b) + } + if filter.InDev != "" { + options.AddRtAttr(nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) + } + if filter.Police != nil { + police := options.AddRtAttr(nl.TCA_FW_POLICE, nil) + if err := encodePolice(police, filter.Police); err != nil { + return err + } + } + if filter.ClassId != 0 { + b := make([]byte, 4) + native.PutUint32(b, filter.ClassId) + options.AddRtAttr(nl.TCA_FW_CLASSID, b) + } + actionsAttr := options.AddRtAttr(nl.TCA_FW_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + case *BpfFilter: + var bpfFlags uint32 + if filter.ClassId != 0 { + options.AddRtAttr(nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + if filter.Fd >= 0 { + options.AddRtAttr(nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd)))) + } + if filter.Name != "" { + options.AddRtAttr(nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name)) + } + if filter.DirectAction { + bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT + } + options.AddRtAttr(nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) + case *MatchAll: + actionsAttr := options.AddRtAttr(nl.TCA_MATCHALL_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + if filter.ClassId != 0 { + options.AddRtAttr(nl.TCA_MATCHALL_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + case *Flower: + if err := filter.encode(options); err != nil { + return err + } + } + req.AddData(options) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// FilterList gets a list of filters in the system. +// Equivalent to: `tc filter show`. +// Generally returns nothing if link and parent are not specified. +func FilterList(link Link, parent uint32) ([]Filter, error) { + return pkgHandle.FilterList(link, parent) +} + +// FilterList gets a list of filters in the system. +// Equivalent to: `tc filter show`. +// Generally returns nothing if link and parent are not specified. +func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { + req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) + if err != nil { + return nil, err + } + + var res []Filter + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := FilterAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + } + base.Priority, base.Protocol = MajorMinor(msg.Info) + base.Protocol = nl.Swap16(base.Protocol) + + var filter Filter + filterType := "" + detailed := false + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + filterType = string(attr.Value[:len(attr.Value)-1]) + switch filterType { + case "u32": + filter = &U32{} + case "fw": + filter = &FwFilter{} + case "bpf": + filter = &BpfFilter{} + case "matchall": + filter = &MatchAll{} + case "flower": + filter = &Flower{} + default: + filter = &GenericFilter{FilterType: filterType} + } + case nl.TCA_OPTIONS: + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + switch filterType { + case "u32": + detailed, err = parseU32Data(filter, data) + if err != nil { + return nil, err + } + case "fw": + detailed, err = parseFwData(filter, data) + if err != nil { + return nil, err + } + case "bpf": + detailed, err = parseBpfData(filter, data) + if err != nil { + return nil, err + } + case "matchall": + detailed, err = parseMatchAllData(filter, data) + if err != nil { + return nil, err + } + case "flower": + detailed, err = parseFlowerData(filter, data) + if err != nil { + return nil, err + } + default: + detailed = true + } + case nl.TCA_CHAIN: + val := new(uint32) + *val = native.Uint32(attr.Value) + base.Chain = val + } + } + // only return the detailed version of the filter + if detailed { + *filter.Attrs() = base + res = append(res, filter) + } + } + + return res, nil +} + +func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) { + tcgen.Index = uint32(attrs.Index) + tcgen.Capab = uint32(attrs.Capab) + tcgen.Action = int32(attrs.Action) + tcgen.Refcnt = int32(attrs.Refcnt) + tcgen.Bindcnt = int32(attrs.Bindcnt) +} + +func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { + attrs.Index = int(tcgen.Index) + attrs.Capab = int(tcgen.Capab) + attrs.Action = TcAct(tcgen.Action) + attrs.Refcnt = int(tcgen.Refcnt) + attrs.Bindcnt = int(tcgen.Bindcnt) +} + +func toTimeStamp(tcf *nl.Tcf) *ActionTimestamp { + return &ActionTimestamp{ + Installed: tcf.Install, + LastUsed: tcf.LastUse, + Expires: tcf.Expires, + FirstUsed: tcf.FirstUse} +} + +func encodePolice(attr *nl.RtAttr, action *PoliceAction) error { + var rtab [256]uint32 + var ptab [256]uint32 + police := nl.TcPolice{} + police.Index = uint32(action.Attrs().Index) + police.Bindcnt = int32(action.Attrs().Bindcnt) + police.Capab = uint32(action.Attrs().Capab) + police.Refcnt = int32(action.Attrs().Refcnt) + police.Rate.Rate = action.Rate + police.PeakRate.Rate = action.PeakRate + police.Action = int32(action.ExceedAction) + + if police.Rate.Rate != 0 { + police.Rate.Mpu = action.Mpu + police.Rate.Overhead = action.Overhead + if CalcRtable(&police.Rate, rtab[:], action.RCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("TBF: failed to calculate rate table") + } + police.Burst = Xmittime(uint64(police.Rate.Rate), action.Burst) + } + + police.Mtu = action.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = action.Mpu + police.PeakRate.Overhead = action.Overhead + if CalcRtable(&police.PeakRate, ptab[:], action.PCellLog, action.Mtu, action.LinkLayer) < 0 { + return errors.New("POLICE: failed to calculate peak rate table") + } + } + + attr.AddRtAttr(nl.TCA_POLICE_TBF, police.Serialize()) + if police.Rate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RATE, SerializeRtab(rtab)) + } + if police.PeakRate.Rate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_PEAKRATE, SerializeRtab(ptab)) + } + if action.AvRate != 0 { + attr.AddRtAttr(nl.TCA_POLICE_AVRATE, nl.Uint32Attr(action.AvRate)) + } + if action.NotExceedAction != 0 { + attr.AddRtAttr(nl.TCA_POLICE_RESULT, nl.Uint32Attr(uint32(action.NotExceedAction))) + } + + return nil +} + +func EncodeActions(attr *nl.RtAttr, actions []Action) error { + tabIndex := int(nl.TCA_ACT_TAB) + + for _, action := range actions { + switch action := action.(type) { + default: + return fmt.Errorf("unknown action type %s", action.Type()) + case *PoliceAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("police")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + if err := encodePolice(aopts, action); err != nil { + return err + } + case *MirredAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + mirred := nl.TcMirred{ + Eaction: int32(action.MirredAction), + Ifindex: uint32(action.Ifindex), + } + toTcGen(action.Attrs(), &mirred.TcGen) + aopts.AddRtAttr(nl.TCA_MIRRED_PARMS, mirred.Serialize()) + case *TunnelKeyAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("tunnel_key")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + tun := nl.TcTunnelKey{ + Action: int32(action.Action), + } + toTcGen(action.Attrs(), &tun.TcGen) + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_PARMS, tun.Serialize()) + if action.Action == TCA_TUNNEL_KEY_SET { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_KEY_ID, htonl(action.KeyID)) + if v4 := action.SrcAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC, v4[:]) + } else if v6 := action.SrcAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, v6[:]) + } else { + return fmt.Errorf("invalid src addr %s for tunnel_key action", action.SrcAddr) + } + if v4 := action.DstAddr.To4(); v4 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV4_DST, v4[:]) + } else if v6 := action.DstAddr.To16(); v6 != nil { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, v6[:]) + } else { + return fmt.Errorf("invalid dst addr %s for tunnel_key action", action.DstAddr) + } + if action.DestPort != 0 { + aopts.AddRtAttr(nl.TCA_TUNNEL_KEY_ENC_DST_PORT, htons(action.DestPort)) + } + } + case *SkbEditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("skbedit")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + skbedit := nl.TcSkbEdit{} + toTcGen(action.Attrs(), &skbedit.TcGen) + aopts.AddRtAttr(nl.TCA_SKBEDIT_PARMS, skbedit.Serialize()) + if action.QueueMapping != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_QUEUE_MAPPING, nl.Uint16Attr(*action.QueueMapping)) + } + if action.Priority != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PRIORITY, nl.Uint32Attr(*action.Priority)) + } + if action.PType != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_PTYPE, nl.Uint16Attr(*action.PType)) + } + if action.Mark != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) + } + if action.Mask != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MASK, nl.Uint32Attr(*action.Mask)) + } + case *ConnmarkAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("connmark")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + connmark := nl.TcConnmark{ + Zone: action.Zone, + } + toTcGen(action.Attrs(), &connmark.TcGen) + aopts.AddRtAttr(nl.TCA_CONNMARK_PARMS, connmark.Serialize()) + case *CsumAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("csum")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + csum := nl.TcCsum{ + UpdateFlags: uint32(action.UpdateFlags), + } + toTcGen(action.Attrs(), &csum.TcGen) + aopts.AddRtAttr(nl.TCA_CSUM_PARMS, csum.Serialize()) + case *BpfAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + aopts.AddRtAttr(nl.TCA_ACT_BPF_PARMS, gen.Serialize()) + aopts.AddRtAttr(nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) + aopts.AddRtAttr(nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) + case *GenericAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("gact")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + aopts.AddRtAttr(nl.TCA_GACT_PARMS, gen.Serialize()) + case *PeditAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + pedit := nl.TcPedit{} + if action.SrcMacAddr != nil { + pedit.SetEthSrc(action.SrcMacAddr) + } + if action.DstMacAddr != nil { + pedit.SetEthDst(action.DstMacAddr) + } + if action.SrcIP != nil { + pedit.SetSrcIP(action.SrcIP) + } + if action.DstIP != nil { + pedit.SetDstIP(action.DstIP) + } + if action.SrcPort != 0 { + pedit.SetSrcPort(action.SrcPort, action.Proto) + } + if action.DstPort != 0 { + pedit.SetDstPort(action.DstPort, action.Proto) + } + pedit.Encode(table) + } + } + return nil +} + +func parsePolice(data syscall.NetlinkRouteAttr, police *PoliceAction) { + switch data.Attr.Type { + case nl.TCA_POLICE_RESULT: + police.NotExceedAction = TcPolAct(native.Uint32(data.Value[0:4])) + case nl.TCA_POLICE_AVRATE: + police.AvRate = native.Uint32(data.Value[0:4]) + case nl.TCA_POLICE_TBF: + p := *nl.DeserializeTcPolice(data.Value) + police.ActionAttrs = ActionAttrs{} + police.Attrs().Index = int(p.Index) + police.Attrs().Bindcnt = int(p.Bindcnt) + police.Attrs().Capab = int(p.Capab) + police.Attrs().Refcnt = int(p.Refcnt) + police.ExceedAction = TcPolAct(p.Action) + police.Rate = p.Rate.Rate + police.PeakRate = p.PeakRate.Rate + police.Burst = Xmitsize(uint64(p.Rate.Rate), p.Burst) + police.Mtu = p.Mtu + police.LinkLayer = int(p.Rate.Linklayer) & nl.TC_LINKLAYER_MASK + police.Overhead = p.Rate.Overhead + } +} + +func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { + var actions []Action + for _, table := range tables { + var action Action + var actionType string + var actionnStatistic *ActionStatistic + var actionTimestamp *ActionTimestamp + aattrs, err := nl.ParseRouteAttr(table.Value) + if err != nil { + return nil, err + } + nextattr: + for _, aattr := range aattrs { + switch aattr.Attr.Type { + case nl.TCA_KIND: + actionType = string(aattr.Value[:len(aattr.Value)-1]) + // only parse if the action is mirred or bpf + switch actionType { + case "mirred": + action = &MirredAction{} + case "bpf": + action = &BpfAction{} + case "connmark": + action = &ConnmarkAction{} + case "csum": + action = &CsumAction{} + case "gact": + action = &GenericAction{} + case "tunnel_key": + action = &TunnelKeyAction{} + case "skbedit": + action = &SkbEditAction{} + case "police": + action = &PoliceAction{} + case "pedit": + action = &PeditAction{} + default: + break nextattr + } + case nl.TCA_OPTIONS: + adata, err := nl.ParseRouteAttr(aattr.Value) + if err != nil { + return nil, err + } + for _, adatum := range adata { + switch actionType { + case "mirred": + switch adatum.Attr.Type { + case nl.TCA_MIRRED_PARMS: + mirred := *nl.DeserializeTcMirred(adatum.Value) + action.(*MirredAction).ActionAttrs = ActionAttrs{} + toAttrs(&mirred.TcGen, action.Attrs()) + action.(*MirredAction).Ifindex = int(mirred.Ifindex) + action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) + case nl.TCA_MIRRED_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + + case "tunnel_key": + switch adatum.Attr.Type { + case nl.TCA_TUNNEL_KEY_PARMS: + tun := *nl.DeserializeTunnelKey(adatum.Value) + action.(*TunnelKeyAction).ActionAttrs = ActionAttrs{} + toAttrs(&tun.TcGen, action.Attrs()) + action.(*TunnelKeyAction).Action = TunnelKeyAct(tun.Action) + case nl.TCA_TUNNEL_KEY_ENC_KEY_ID: + action.(*TunnelKeyAction).KeyID = networkOrder.Uint32(adatum.Value[0:4]) + case nl.TCA_TUNNEL_KEY_ENC_IPV6_SRC, nl.TCA_TUNNEL_KEY_ENC_IPV4_SRC: + action.(*TunnelKeyAction).SrcAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_IPV6_DST, nl.TCA_TUNNEL_KEY_ENC_IPV4_DST: + action.(*TunnelKeyAction).DstAddr = adatum.Value[:] + case nl.TCA_TUNNEL_KEY_ENC_DST_PORT: + action.(*TunnelKeyAction).DestPort = ntohs(adatum.Value) + case nl.TCA_TUNNEL_KEY_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "skbedit": + switch adatum.Attr.Type { + case nl.TCA_SKBEDIT_PARMS: + skbedit := *nl.DeserializeSkbEdit(adatum.Value) + action.(*SkbEditAction).ActionAttrs = ActionAttrs{} + toAttrs(&skbedit.TcGen, action.Attrs()) + case nl.TCA_SKBEDIT_MARK: + mark := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_MASK: + mask := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mask = &mask + case nl.TCA_SKBEDIT_PRIORITY: + priority := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Priority = &priority + case nl.TCA_SKBEDIT_PTYPE: + ptype := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).PType = &ptype + case nl.TCA_SKBEDIT_QUEUE_MAPPING: + mapping := native.Uint16(adatum.Value[0:2]) + action.(*SkbEditAction).QueueMapping = &mapping + case nl.TCA_SKBEDIT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "bpf": + switch adatum.Attr.Type { + case nl.TCA_ACT_BPF_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + case nl.TCA_ACT_BPF_FD: + action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4])) + case nl.TCA_ACT_BPF_NAME: + action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) + case nl.TCA_ACT_BPF_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "connmark": + switch adatum.Attr.Type { + case nl.TCA_CONNMARK_PARMS: + connmark := *nl.DeserializeTcConnmark(adatum.Value) + action.(*ConnmarkAction).ActionAttrs = ActionAttrs{} + toAttrs(&connmark.TcGen, action.Attrs()) + action.(*ConnmarkAction).Zone = connmark.Zone + case nl.TCA_CONNMARK_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "csum": + switch adatum.Attr.Type { + case nl.TCA_CSUM_PARMS: + csum := *nl.DeserializeTcCsum(adatum.Value) + action.(*CsumAction).ActionAttrs = ActionAttrs{} + toAttrs(&csum.TcGen, action.Attrs()) + action.(*CsumAction).UpdateFlags = CsumUpdateFlags(csum.UpdateFlags) + case nl.TCA_CSUM_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "gact": + switch adatum.Attr.Type { + case nl.TCA_GACT_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + if action.Attrs().Action.String() == "goto" { + action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action + } + case nl.TCA_GACT_TM: + tcTs := nl.DeserializeTcf(adatum.Value) + actionTimestamp = toTimeStamp(tcTs) + } + case "police": + parsePolice(adatum, action.(*PoliceAction)) + } + } + case nl.TCA_ACT_STATS: + s, err := parseTcStats2(aattr.Value) + if err != nil { + return nil, err + } + actionnStatistic = (*ActionStatistic)(s) + } + } + action.Attrs().Statistics = actionnStatistic + action.Attrs().Timestamp = actionTimestamp + actions = append(actions, action) + } + return actions, nil +} + +func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + u32 := filter.(*U32) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_U32_SEL: + detailed = true + sel := nl.DeserializeTcU32Sel(datum.Value) + u32.Sel = sel + if native != networkOrder { + // Handle the endianness of attributes + u32.Sel.Offmask = native.Uint16(htons(sel.Offmask)) + u32.Sel.Hmask = native.Uint32(htonl(sel.Hmask)) + for i, key := range u32.Sel.Keys { + u32.Sel.Keys[i].Mask = native.Uint32(htonl(key.Mask)) + u32.Sel.Keys[i].Val = native.Uint32(htonl(key.Val)) + } + } + case nl.TCA_U32_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + u32.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } + for _, action := range u32.Actions { + if action, ok := action.(*MirredAction); ok { + u32.RedirIndex = int(action.Ifindex) + } + } + case nl.TCA_U32_POLICE: + var police PoliceAction + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + parsePolice(aattr, &police) + } + u32.Police = &police + case nl.TCA_U32_CLASSID: + u32.ClassId = native.Uint32(datum.Value) + case nl.TCA_U32_DIVISOR: + u32.Divisor = native.Uint32(datum.Value) + case nl.TCA_U32_HASH: + u32.Hash = native.Uint32(datum.Value) + case nl.TCA_U32_LINK: + u32.Link = native.Uint32(datum.Value) + } + } + return detailed, nil +} + +func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + fw := filter.(*FwFilter) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FW_MASK: + fw.Mask = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_CLASSID: + fw.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_INDEV: + fw.InDev = string(datum.Value[:len(datum.Value)-1]) + case nl.TCA_FW_POLICE: + var police PoliceAction + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + parsePolice(aattr, &police) + } + fw.Police = &police + case nl.TCA_FW_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + fw.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } + } + } + return detailed, nil +} + +func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + bpf := filter.(*BpfFilter) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_BPF_FD: + bpf.Fd = int(native.Uint32(datum.Value[0:4])) + case nl.TCA_BPF_NAME: + bpf.Name = string(datum.Value[:len(datum.Value)-1]) + case nl.TCA_BPF_CLASSID: + bpf.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_BPF_FLAGS: + flags := native.Uint32(datum.Value[0:4]) + if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 { + bpf.DirectAction = true + } + case nl.TCA_BPF_ID: + bpf.Id = int(native.Uint32(datum.Value[0:4])) + case nl.TCA_BPF_TAG: + bpf.Tag = hex.EncodeToString(datum.Value) + } + } + return detailed, nil +} + +func parseMatchAllData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + matchall := filter.(*MatchAll) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_MATCHALL_CLASSID: + matchall.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_MATCHALL_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + matchall.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } + } + } + return detailed, nil +} + +func parseFlowerData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + return true, filter.(*Flower).decode(data) +} + +func AlignToAtm(size uint) uint { + var linksize, cells int + cells = int(size / nl.ATM_CELL_PAYLOAD) + if (size % nl.ATM_CELL_PAYLOAD) > 0 { + cells++ + } + linksize = cells * nl.ATM_CELL_SIZE + return uint(linksize) +} + +func AdjustSize(sz uint, mpu uint, linklayer int) uint { + if sz < mpu { + sz = mpu + } + switch linklayer { + case nl.LINKLAYER_ATM: + return AlignToAtm(sz) + default: + return sz + } +} + +func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, linklayer int) int { + bps := rate.Rate + mpu := rate.Mpu + var sz uint + if mtu == 0 { + mtu = 2047 + } + if cellLog < 0 { + cellLog = 0 + for (mtu >> uint(cellLog)) > 255 { + cellLog++ + } + } + for i := 0; i < 256; i++ { + sz = AdjustSize(uint((i+1)<= nl.IPSET_ERR_PRIVATE { + err = nl.IPSetError(uintptr(errno)) + } + } + return +} + +func ipsetUnserialize(msgs [][]byte) (result IPSetResult) { + for _, msg := range msgs { + result.unserialize(msg) + } + return result +} + +func (result *IPSetResult) unserialize(msg []byte) { + result.Nfgenmsg = nl.DeserializeNfgenmsg(msg) + + for attr := range nl.ParseAttributes(msg[4:]) { + switch attr.Type { + case nl.IPSET_ATTR_PROTOCOL: + result.Protocol = attr.Value[0] + case nl.IPSET_ATTR_SETNAME: + result.SetName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_TYPENAME: + result.TypeName = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_REVISION: + result.Revision = attr.Value[0] + case nl.IPSET_ATTR_FAMILY: + result.Family = attr.Value[0] + case nl.IPSET_ATTR_FLAGS: + result.Flags = attr.Value[0] + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.parseAttrData(attr.Value) + case nl.IPSET_ATTR_ADT | nl.NLA_F_NESTED: + result.parseAttrADT(attr.Value) + case nl.IPSET_ATTR_PROTOCOL_MIN: + result.ProtocolMinVersion = attr.Value[0] + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() + default: + log.Printf("unknown ipset attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrData(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_HASHSIZE | nl.NLA_F_NET_BYTEORDER: + result.HashSize = attr.Uint32() + case nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER: + result.MaxElements = attr.Uint32() + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + result.Timeout = &val + case nl.IPSET_ATTR_ELEMENTS | nl.NLA_F_NET_BYTEORDER: + result.NumEntries = attr.Uint32() + case nl.IPSET_ATTR_REFERENCES | nl.NLA_F_NET_BYTEORDER: + result.References = attr.Uint32() + case nl.IPSET_ATTR_MEMSIZE | nl.NLA_F_NET_BYTEORDER: + result.SizeInMemory = attr.Uint32() + case nl.IPSET_ATTR_CADT_FLAGS | nl.NLA_F_NET_BYTEORDER: + result.CadtFlags = attr.Uint32() + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP | nl.NLA_F_NET_BYTEORDER: + result.Entries = append(result.Entries, IPSetEntry{IP: nested.Value}) + case nl.IPSET_ATTR_IP: + result.IPFrom = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_IP_TO | nl.NLA_F_NESTED: + for nested := range nl.ParseAttributes(attr.Value) { + switch nested.Type { + case nl.IPSET_ATTR_IP: + result.IPTo = nested.Value + default: + log.Printf("unknown nested ipset data attribute from kernel: %+v %v", nested, nested.Type&nl.NLA_TYPE_MASK) + } + } + case nl.IPSET_ATTR_PORT_FROM | nl.NLA_F_NET_BYTEORDER: + result.PortFrom = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_PORT_TO | nl.NLA_F_NET_BYTEORDER: + result.PortTo = networkOrder.Uint16(attr.Value) + case nl.IPSET_ATTR_CADT_LINENO | nl.NLA_F_NET_BYTEORDER: + result.LineNo = attr.Uint32() + case nl.IPSET_ATTR_COMMENT: + result.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARKMASK: + result.MarkMask = attr.Uint32() + default: + log.Printf("unknown ipset data attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func (result *IPSetResult) parseAttrADT(data []byte) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_DATA | nl.NLA_F_NESTED: + result.Entries = append(result.Entries, parseIPSetEntry(attr.Value)) + default: + log.Printf("unknown ADT attribute from kernel: %+v %v", attr, attr.Type&nl.NLA_TYPE_MASK) + } + } +} + +func parseIPSetEntry(data []byte) (entry IPSetEntry) { + for attr := range nl.ParseAttributes(data) { + switch attr.Type { + case nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Timeout = &val + case nl.IPSET_ATTR_BYTES | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Bytes = &val + case nl.IPSET_ATTR_PACKETS | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint64() + entry.Packets = &val + case nl.IPSET_ATTR_ETHER: + entry.MAC = net.HardwareAddr(attr.Value) + case nl.IPSET_ATTR_IP: + entry.IP = net.IP(attr.Value) + case nl.IPSET_ATTR_COMMENT: + entry.Comment = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: + entry.IP = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED: + for attr := range nl.ParseAttributes(attr.Value) { + switch attr.Type { + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: + entry.IP2 = net.IP(attr.Value) + default: + log.Printf("unknown nested ADT attribute from kernel: %+v", attr) + } + } + case nl.IPSET_ATTR_CIDR: + entry.CIDR = attr.Value[0] + case nl.IPSET_ATTR_CIDR2: + entry.CIDR2 = attr.Value[0] + case nl.IPSET_ATTR_PORT | nl.NLA_F_NET_BYTEORDER: + val := networkOrder.Uint16(attr.Value) + entry.Port = &val + case nl.IPSET_ATTR_PROTO: + val := attr.Value[0] + entry.Protocol = &val + case nl.IPSET_ATTR_IFACE: + entry.IFace = nl.BytesToString(attr.Value) + case nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER: + val := attr.Uint32() + entry.Mark = &val + default: + log.Printf("unknown ADT attribute from kernel: %+v", attr) + } + } + return +} diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go new file mode 100644 index 000000000..f820cdb67 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -0,0 +1,1417 @@ +package netlink + +import ( + "fmt" + "net" + "os" + "strconv" +) + +// Link represents a link device from netlink. Shared link attributes +// like name may be retrieved using the Attrs() method. Unique data +// can be retrieved by casting the object to the proper type. +type Link interface { + Attrs() *LinkAttrs + Type() string +} + +type ( + NsPid int + NsFd int +) + +// LinkAttrs represents data shared by most link types +type LinkAttrs struct { + Index int + MTU int + TxQLen int // Transmit Queue Length + Name string + HardwareAddr net.HardwareAddr + Flags net.Flags + RawFlags uint32 + ParentIndex int // index of the parent link device + MasterIndex int // must be the index of a bridge + Namespace interface{} // nil | NsPid | NsFd + Alias string + AltNames []string + Statistics *LinkStatistics + Promisc int + Allmulti int + Multi int + Xdp *LinkXdp + EncapType string + Protinfo *Protinfo + OperState LinkOperState + PhysSwitchID int + NetNsID int + NumTxQueues int + NumRxQueues int + TSOMaxSegs uint32 + TSOMaxSize uint32 + GSOMaxSegs uint32 + GSOMaxSize uint32 + GROMaxSize uint32 + GSOIPv4MaxSize uint32 + GROIPv4MaxSize uint32 + Vfs []VfInfo // virtual functions available on link + Group uint32 + PermHWAddr net.HardwareAddr + Slave LinkSlave +} + +// LinkSlave represents a slave device. +type LinkSlave interface { + SlaveType() string +} + +// VfInfo represents configuration of virtual function +type VfInfo struct { + ID int + Mac net.HardwareAddr + Vlan int + Qos int + VlanProto int + TxRate int // IFLA_VF_TX_RATE Max TxRate + Spoofchk bool + LinkState uint32 + MaxTxRate uint32 // IFLA_VF_RATE Max TxRate + MinTxRate uint32 // IFLA_VF_RATE Min TxRate + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 + + RssQuery uint32 + Trust uint32 +} + +// LinkOperState represents the values of the IFLA_OPERSTATE link +// attribute, which contains the RFC2863 state of the interface. +type LinkOperState uint8 + +const ( + OperUnknown = iota // Status can't be determined. + OperNotPresent // Some component is missing. + OperDown // Down. + OperLowerLayerDown // Down due to state of lower layer. + OperTesting // In some test mode. + OperDormant // Not up but pending an external event. + OperUp // Up, ready to send packets. +) + +func (s LinkOperState) String() string { + switch s { + case OperNotPresent: + return "not-present" + case OperDown: + return "down" + case OperLowerLayerDown: + return "lower-layer-down" + case OperTesting: + return "testing" + case OperDormant: + return "dormant" + case OperUp: + return "up" + default: + return "unknown" + } +} + +// NewLinkAttrs returns LinkAttrs structure filled with default values +func NewLinkAttrs() LinkAttrs { + return LinkAttrs{ + NetNsID: -1, + TxQLen: -1, + } +} + +type LinkStatistics LinkStatistics64 + +/* +Ref: struct rtnl_link_stats {...} +*/ +type LinkStatistics32 struct { + RxPackets uint32 + TxPackets uint32 + RxBytes uint32 + TxBytes uint32 + RxErrors uint32 + TxErrors uint32 + RxDropped uint32 + TxDropped uint32 + Multicast uint32 + Collisions uint32 + RxLengthErrors uint32 + RxOverErrors uint32 + RxCrcErrors uint32 + RxFrameErrors uint32 + RxFifoErrors uint32 + RxMissedErrors uint32 + TxAbortedErrors uint32 + TxCarrierErrors uint32 + TxFifoErrors uint32 + TxHeartbeatErrors uint32 + TxWindowErrors uint32 + RxCompressed uint32 + TxCompressed uint32 +} + +func (s32 LinkStatistics32) to64() *LinkStatistics64 { + return &LinkStatistics64{ + RxPackets: uint64(s32.RxPackets), + TxPackets: uint64(s32.TxPackets), + RxBytes: uint64(s32.RxBytes), + TxBytes: uint64(s32.TxBytes), + RxErrors: uint64(s32.RxErrors), + TxErrors: uint64(s32.TxErrors), + RxDropped: uint64(s32.RxDropped), + TxDropped: uint64(s32.TxDropped), + Multicast: uint64(s32.Multicast), + Collisions: uint64(s32.Collisions), + RxLengthErrors: uint64(s32.RxLengthErrors), + RxOverErrors: uint64(s32.RxOverErrors), + RxCrcErrors: uint64(s32.RxCrcErrors), + RxFrameErrors: uint64(s32.RxFrameErrors), + RxFifoErrors: uint64(s32.RxFifoErrors), + RxMissedErrors: uint64(s32.RxMissedErrors), + TxAbortedErrors: uint64(s32.TxAbortedErrors), + TxCarrierErrors: uint64(s32.TxCarrierErrors), + TxFifoErrors: uint64(s32.TxFifoErrors), + TxHeartbeatErrors: uint64(s32.TxHeartbeatErrors), + TxWindowErrors: uint64(s32.TxWindowErrors), + RxCompressed: uint64(s32.RxCompressed), + TxCompressed: uint64(s32.TxCompressed), + } +} + +/* +Ref: struct rtnl_link_stats64 {...} +*/ +type LinkStatistics64 struct { + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + RxErrors uint64 + TxErrors uint64 + RxDropped uint64 + TxDropped uint64 + Multicast uint64 + Collisions uint64 + RxLengthErrors uint64 + RxOverErrors uint64 + RxCrcErrors uint64 + RxFrameErrors uint64 + RxFifoErrors uint64 + RxMissedErrors uint64 + TxAbortedErrors uint64 + TxCarrierErrors uint64 + TxFifoErrors uint64 + TxHeartbeatErrors uint64 + TxWindowErrors uint64 + RxCompressed uint64 + TxCompressed uint64 +} + +type LinkXdp struct { + Fd int + Attached bool + AttachMode uint32 + Flags uint32 + ProgId uint32 +} + +// Device links cannot be created via netlink. These links +// are links created by udev like 'lo' and 'etho0' +type Device struct { + LinkAttrs +} + +func (device *Device) Attrs() *LinkAttrs { + return &device.LinkAttrs +} + +func (device *Device) Type() string { + return "device" +} + +// Dummy links are dummy ethernet devices +type Dummy struct { + LinkAttrs +} + +func (dummy *Dummy) Attrs() *LinkAttrs { + return &dummy.LinkAttrs +} + +func (dummy *Dummy) Type() string { + return "dummy" +} + +// Ifb links are advanced dummy devices for packet filtering +type Ifb struct { + LinkAttrs +} + +func (ifb *Ifb) Attrs() *LinkAttrs { + return &ifb.LinkAttrs +} + +func (ifb *Ifb) Type() string { + return "ifb" +} + +// Bridge links are simple linux bridges +type Bridge struct { + LinkAttrs + MulticastSnooping *bool + AgeingTime *uint32 + HelloTime *uint32 + VlanFiltering *bool + VlanDefaultPVID *uint16 + GroupFwdMask *uint16 +} + +func (bridge *Bridge) Attrs() *LinkAttrs { + return &bridge.LinkAttrs +} + +func (bridge *Bridge) Type() string { + return "bridge" +} + +// Vlan links have ParentIndex set in their Attrs() +type Vlan struct { + LinkAttrs + VlanId int + VlanProtocol VlanProtocol +} + +func (vlan *Vlan) Attrs() *LinkAttrs { + return &vlan.LinkAttrs +} + +func (vlan *Vlan) Type() string { + return "vlan" +} + +type MacvlanMode uint16 + +const ( + MACVLAN_MODE_DEFAULT MacvlanMode = iota + MACVLAN_MODE_PRIVATE + MACVLAN_MODE_VEPA + MACVLAN_MODE_BRIDGE + MACVLAN_MODE_PASSTHRU + MACVLAN_MODE_SOURCE +) + +// Macvlan links have ParentIndex set in their Attrs() +type Macvlan struct { + LinkAttrs + Mode MacvlanMode + + // MACAddrs is only populated for Macvlan SOURCE links + MACAddrs []net.HardwareAddr + + BCQueueLen uint32 + UsedBCQueueLen uint32 +} + +func (macvlan *Macvlan) Attrs() *LinkAttrs { + return &macvlan.LinkAttrs +} + +func (macvlan *Macvlan) Type() string { + return "macvlan" +} + +// Macvtap - macvtap is a virtual interfaces based on macvlan +type Macvtap struct { + Macvlan +} + +func (macvtap Macvtap) Type() string { + return "macvtap" +} + +type TuntapMode uint16 +type TuntapFlag uint16 + +// Tuntap links created via /dev/tun/tap, but can be destroyed via netlink +type Tuntap struct { + LinkAttrs + Mode TuntapMode + Flags TuntapFlag + NonPersist bool + Queues int + Fds []*os.File + Owner uint32 + Group uint32 +} + +func (tuntap *Tuntap) Attrs() *LinkAttrs { + return &tuntap.LinkAttrs +} + +func (tuntap *Tuntap) Type() string { + return "tuntap" +} + +type NetkitMode uint32 + +const ( + NETKIT_MODE_L2 NetkitMode = iota + NETKIT_MODE_L3 +) + +type NetkitPolicy int + +const ( + NETKIT_POLICY_FORWARD NetkitPolicy = 0 + NETKIT_POLICY_BLACKHOLE NetkitPolicy = 2 +) + +func (n *Netkit) IsPrimary() bool { + return n.isPrimary +} + +// SetPeerAttrs will not take effect if trying to modify an existing netkit device +func (n *Netkit) SetPeerAttrs(Attrs *LinkAttrs) { + n.peerLinkAttrs = *Attrs +} + +type Netkit struct { + LinkAttrs + Mode NetkitMode + Policy NetkitPolicy + PeerPolicy NetkitPolicy + isPrimary bool + peerLinkAttrs LinkAttrs +} + +func (n *Netkit) Attrs() *LinkAttrs { + return &n.LinkAttrs +} + +func (n *Netkit) Type() string { + return "netkit" +} + +// Veth devices must specify PeerName on create +type Veth struct { + LinkAttrs + PeerName string // veth on create only + PeerHardwareAddr net.HardwareAddr + PeerNamespace interface{} +} + +func (veth *Veth) Attrs() *LinkAttrs { + return &veth.LinkAttrs +} + +func (veth *Veth) Type() string { + return "veth" +} + +// Wireguard represent links of type "wireguard", see https://www.wireguard.com/ +type Wireguard struct { + LinkAttrs +} + +func (wg *Wireguard) Attrs() *LinkAttrs { + return &wg.LinkAttrs +} + +func (wg *Wireguard) Type() string { + return "wireguard" +} + +// GenericLink links represent types that are not currently understood +// by this netlink library. +type GenericLink struct { + LinkAttrs + LinkType string +} + +func (generic *GenericLink) Attrs() *LinkAttrs { + return &generic.LinkAttrs +} + +func (generic *GenericLink) Type() string { + return generic.LinkType +} + +type Vxlan struct { + LinkAttrs + VxlanId int + VtepDevIndex int + SrcAddr net.IP + Group net.IP + TTL int + TOS int + Learning bool + Proxy bool + RSC bool + L2miss bool + L3miss bool + UDPCSum bool + UDP6ZeroCSumTx bool + UDP6ZeroCSumRx bool + NoAge bool + GBP bool + FlowBased bool + Age int + Limit int + Port int + PortLow int + PortHigh int +} + +func (vxlan *Vxlan) Attrs() *LinkAttrs { + return &vxlan.LinkAttrs +} + +func (vxlan *Vxlan) Type() string { + return "vxlan" +} + +type IPVlanMode uint16 + +const ( + IPVLAN_MODE_L2 IPVlanMode = iota + IPVLAN_MODE_L3 + IPVLAN_MODE_L3S + IPVLAN_MODE_MAX +) + +type IPVlanFlag uint16 + +const ( + IPVLAN_FLAG_BRIDGE IPVlanFlag = iota + IPVLAN_FLAG_PRIVATE + IPVLAN_FLAG_VEPA +) + +type IPVlan struct { + LinkAttrs + Mode IPVlanMode + Flag IPVlanFlag +} + +func (ipvlan *IPVlan) Attrs() *LinkAttrs { + return &ipvlan.LinkAttrs +} + +func (ipvlan *IPVlan) Type() string { + return "ipvlan" +} + +// IPVtap - IPVtap is a virtual interfaces based on ipvlan +type IPVtap struct { + IPVlan +} + +func (ipvtap *IPVtap) Attrs() *LinkAttrs { + return &ipvtap.LinkAttrs +} + +func (ipvtap IPVtap) Type() string { + return "ipvtap" +} + +// VlanProtocol type +type VlanProtocol int + +func (p VlanProtocol) String() string { + s, ok := VlanProtocolToString[p] + if !ok { + return fmt.Sprintf("VlanProtocol(%d)", p) + } + return s +} + +// StringToVlanProtocol returns vlan protocol, or unknown is the s is invalid. +func StringToVlanProtocol(s string) VlanProtocol { + mode, ok := StringToVlanProtocolMap[s] + if !ok { + return VLAN_PROTOCOL_UNKNOWN + } + return mode +} + +// VlanProtocol possible values +const ( + VLAN_PROTOCOL_UNKNOWN VlanProtocol = 0 + VLAN_PROTOCOL_8021Q VlanProtocol = 0x8100 + VLAN_PROTOCOL_8021AD VlanProtocol = 0x88A8 +) + +var VlanProtocolToString = map[VlanProtocol]string{ + VLAN_PROTOCOL_8021Q: "802.1q", + VLAN_PROTOCOL_8021AD: "802.1ad", +} + +var StringToVlanProtocolMap = map[string]VlanProtocol{ + "802.1q": VLAN_PROTOCOL_8021Q, + "802.1ad": VLAN_PROTOCOL_8021AD, +} + +// BondMode type +type BondMode int + +func (b BondMode) String() string { + s, ok := bondModeToString[b] + if !ok { + return fmt.Sprintf("BondMode(%d)", b) + } + return s +} + +// StringToBondMode returns bond mode, or unknown is the s is invalid. +func StringToBondMode(s string) BondMode { + mode, ok := StringToBondModeMap[s] + if !ok { + return BOND_MODE_UNKNOWN + } + return mode +} + +// Possible BondMode +const ( + BOND_MODE_BALANCE_RR BondMode = iota + BOND_MODE_ACTIVE_BACKUP + BOND_MODE_BALANCE_XOR + BOND_MODE_BROADCAST + BOND_MODE_802_3AD + BOND_MODE_BALANCE_TLB + BOND_MODE_BALANCE_ALB + BOND_MODE_UNKNOWN +) + +var bondModeToString = map[BondMode]string{ + BOND_MODE_BALANCE_RR: "balance-rr", + BOND_MODE_ACTIVE_BACKUP: "active-backup", + BOND_MODE_BALANCE_XOR: "balance-xor", + BOND_MODE_BROADCAST: "broadcast", + BOND_MODE_802_3AD: "802.3ad", + BOND_MODE_BALANCE_TLB: "balance-tlb", + BOND_MODE_BALANCE_ALB: "balance-alb", +} +var StringToBondModeMap = map[string]BondMode{ + "balance-rr": BOND_MODE_BALANCE_RR, + "active-backup": BOND_MODE_ACTIVE_BACKUP, + "balance-xor": BOND_MODE_BALANCE_XOR, + "broadcast": BOND_MODE_BROADCAST, + "802.3ad": BOND_MODE_802_3AD, + "balance-tlb": BOND_MODE_BALANCE_TLB, + "balance-alb": BOND_MODE_BALANCE_ALB, +} + +// BondArpValidate type +type BondArpValidate int + +// Possible BondArpValidate value +const ( + BOND_ARP_VALIDATE_NONE BondArpValidate = iota + BOND_ARP_VALIDATE_ACTIVE + BOND_ARP_VALIDATE_BACKUP + BOND_ARP_VALIDATE_ALL +) + +var bondArpValidateToString = map[BondArpValidate]string{ + BOND_ARP_VALIDATE_NONE: "none", + BOND_ARP_VALIDATE_ACTIVE: "active", + BOND_ARP_VALIDATE_BACKUP: "backup", + BOND_ARP_VALIDATE_ALL: "none", +} +var StringToBondArpValidateMap = map[string]BondArpValidate{ + "none": BOND_ARP_VALIDATE_NONE, + "active": BOND_ARP_VALIDATE_ACTIVE, + "backup": BOND_ARP_VALIDATE_BACKUP, + "all": BOND_ARP_VALIDATE_ALL, +} + +func (b BondArpValidate) String() string { + s, ok := bondArpValidateToString[b] + if !ok { + return fmt.Sprintf("BondArpValidate(%d)", b) + } + return s +} + +// BondPrimaryReselect type +type BondPrimaryReselect int + +// Possible BondPrimaryReselect value +const ( + BOND_PRIMARY_RESELECT_ALWAYS BondPrimaryReselect = iota + BOND_PRIMARY_RESELECT_BETTER + BOND_PRIMARY_RESELECT_FAILURE +) + +var bondPrimaryReselectToString = map[BondPrimaryReselect]string{ + BOND_PRIMARY_RESELECT_ALWAYS: "always", + BOND_PRIMARY_RESELECT_BETTER: "better", + BOND_PRIMARY_RESELECT_FAILURE: "failure", +} +var StringToBondPrimaryReselectMap = map[string]BondPrimaryReselect{ + "always": BOND_PRIMARY_RESELECT_ALWAYS, + "better": BOND_PRIMARY_RESELECT_BETTER, + "failure": BOND_PRIMARY_RESELECT_FAILURE, +} + +func (b BondPrimaryReselect) String() string { + s, ok := bondPrimaryReselectToString[b] + if !ok { + return fmt.Sprintf("BondPrimaryReselect(%d)", b) + } + return s +} + +// BondArpAllTargets type +type BondArpAllTargets int + +// Possible BondArpAllTargets value +const ( + BOND_ARP_ALL_TARGETS_ANY BondArpAllTargets = iota + BOND_ARP_ALL_TARGETS_ALL +) + +var bondArpAllTargetsToString = map[BondArpAllTargets]string{ + BOND_ARP_ALL_TARGETS_ANY: "any", + BOND_ARP_ALL_TARGETS_ALL: "all", +} +var StringToBondArpAllTargetsMap = map[string]BondArpAllTargets{ + "any": BOND_ARP_ALL_TARGETS_ANY, + "all": BOND_ARP_ALL_TARGETS_ALL, +} + +func (b BondArpAllTargets) String() string { + s, ok := bondArpAllTargetsToString[b] + if !ok { + return fmt.Sprintf("BondArpAllTargets(%d)", b) + } + return s +} + +// BondFailOverMac type +type BondFailOverMac int + +// Possible BondFailOverMac value +const ( + BOND_FAIL_OVER_MAC_NONE BondFailOverMac = iota + BOND_FAIL_OVER_MAC_ACTIVE + BOND_FAIL_OVER_MAC_FOLLOW +) + +var bondFailOverMacToString = map[BondFailOverMac]string{ + BOND_FAIL_OVER_MAC_NONE: "none", + BOND_FAIL_OVER_MAC_ACTIVE: "active", + BOND_FAIL_OVER_MAC_FOLLOW: "follow", +} +var StringToBondFailOverMacMap = map[string]BondFailOverMac{ + "none": BOND_FAIL_OVER_MAC_NONE, + "active": BOND_FAIL_OVER_MAC_ACTIVE, + "follow": BOND_FAIL_OVER_MAC_FOLLOW, +} + +func (b BondFailOverMac) String() string { + s, ok := bondFailOverMacToString[b] + if !ok { + return fmt.Sprintf("BondFailOverMac(%d)", b) + } + return s +} + +// BondXmitHashPolicy type +type BondXmitHashPolicy int + +func (b BondXmitHashPolicy) String() string { + s, ok := bondXmitHashPolicyToString[b] + if !ok { + return fmt.Sprintf("XmitHashPolicy(%d)", b) + } + return s +} + +// StringToBondXmitHashPolicy returns bond lacp arte, or unknown is the s is invalid. +func StringToBondXmitHashPolicy(s string) BondXmitHashPolicy { + lacp, ok := StringToBondXmitHashPolicyMap[s] + if !ok { + return BOND_XMIT_HASH_POLICY_UNKNOWN + } + return lacp +} + +// Possible BondXmitHashPolicy value +const ( + BOND_XMIT_HASH_POLICY_LAYER2 BondXmitHashPolicy = iota + BOND_XMIT_HASH_POLICY_LAYER3_4 + BOND_XMIT_HASH_POLICY_LAYER2_3 + BOND_XMIT_HASH_POLICY_ENCAP2_3 + BOND_XMIT_HASH_POLICY_ENCAP3_4 + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC + BOND_XMIT_HASH_POLICY_UNKNOWN +) + +var bondXmitHashPolicyToString = map[BondXmitHashPolicy]string{ + BOND_XMIT_HASH_POLICY_LAYER2: "layer2", + BOND_XMIT_HASH_POLICY_LAYER3_4: "layer3+4", + BOND_XMIT_HASH_POLICY_LAYER2_3: "layer2+3", + BOND_XMIT_HASH_POLICY_ENCAP2_3: "encap2+3", + BOND_XMIT_HASH_POLICY_ENCAP3_4: "encap3+4", + BOND_XMIT_HASH_POLICY_VLAN_SRCMAC: "vlan+srcmac", +} +var StringToBondXmitHashPolicyMap = map[string]BondXmitHashPolicy{ + "layer2": BOND_XMIT_HASH_POLICY_LAYER2, + "layer3+4": BOND_XMIT_HASH_POLICY_LAYER3_4, + "layer2+3": BOND_XMIT_HASH_POLICY_LAYER2_3, + "encap2+3": BOND_XMIT_HASH_POLICY_ENCAP2_3, + "encap3+4": BOND_XMIT_HASH_POLICY_ENCAP3_4, + "vlan+srcmac": BOND_XMIT_HASH_POLICY_VLAN_SRCMAC, +} + +// BondLacpRate type +type BondLacpRate int + +func (b BondLacpRate) String() string { + s, ok := bondLacpRateToString[b] + if !ok { + return fmt.Sprintf("LacpRate(%d)", b) + } + return s +} + +// StringToBondLacpRate returns bond lacp arte, or unknown is the s is invalid. +func StringToBondLacpRate(s string) BondLacpRate { + lacp, ok := StringToBondLacpRateMap[s] + if !ok { + return BOND_LACP_RATE_UNKNOWN + } + return lacp +} + +// Possible BondLacpRate value +const ( + BOND_LACP_RATE_SLOW BondLacpRate = iota + BOND_LACP_RATE_FAST + BOND_LACP_RATE_UNKNOWN +) + +var bondLacpRateToString = map[BondLacpRate]string{ + BOND_LACP_RATE_SLOW: "slow", + BOND_LACP_RATE_FAST: "fast", +} +var StringToBondLacpRateMap = map[string]BondLacpRate{ + "slow": BOND_LACP_RATE_SLOW, + "fast": BOND_LACP_RATE_FAST, +} + +// BondAdSelect type +type BondAdSelect int + +// Possible BondAdSelect value +const ( + BOND_AD_SELECT_STABLE BondAdSelect = iota + BOND_AD_SELECT_BANDWIDTH + BOND_AD_SELECT_COUNT +) + +var bondAdSelectToString = map[BondAdSelect]string{ + BOND_AD_SELECT_STABLE: "stable", + BOND_AD_SELECT_BANDWIDTH: "bandwidth", + BOND_AD_SELECT_COUNT: "count", +} +var StringToBondAdSelectMap = map[string]BondAdSelect{ + "stable": BOND_AD_SELECT_STABLE, + "bandwidth": BOND_AD_SELECT_BANDWIDTH, + "count": BOND_AD_SELECT_COUNT, +} + +func (b BondAdSelect) String() string { + s, ok := bondAdSelectToString[b] + if !ok { + return fmt.Sprintf("BondAdSelect(%d)", b) + } + return s +} + +// BondAdInfo represents ad info for bond +type BondAdInfo struct { + AggregatorId int + NumPorts int + ActorKey int + PartnerKey int + PartnerMac net.HardwareAddr +} + +// Bond representation +type Bond struct { + LinkAttrs + Mode BondMode + ActiveSlave int + Miimon int + UpDelay int + DownDelay int + UseCarrier int + ArpInterval int + ArpIpTargets []net.IP + ArpValidate BondArpValidate + ArpAllTargets BondArpAllTargets + Primary int + PrimaryReselect BondPrimaryReselect + FailOverMac BondFailOverMac + XmitHashPolicy BondXmitHashPolicy + ResendIgmp int + NumPeerNotif int + AllSlavesActive int + MinLinks int + LpInterval int + PacketsPerSlave int + LacpRate BondLacpRate + AdSelect BondAdSelect + // looking at iproute tool AdInfo can only be retrived. It can't be set. + AdInfo *BondAdInfo + AdActorSysPrio int + AdUserPortKey int + AdActorSystem net.HardwareAddr + TlbDynamicLb int +} + +func NewLinkBond(atr LinkAttrs) *Bond { + return &Bond{ + LinkAttrs: atr, + Mode: -1, + ActiveSlave: -1, + Miimon: -1, + UpDelay: -1, + DownDelay: -1, + UseCarrier: -1, + ArpInterval: -1, + ArpIpTargets: nil, + ArpValidate: -1, + ArpAllTargets: -1, + Primary: -1, + PrimaryReselect: -1, + FailOverMac: -1, + XmitHashPolicy: -1, + ResendIgmp: -1, + NumPeerNotif: -1, + AllSlavesActive: -1, + MinLinks: -1, + LpInterval: -1, + PacketsPerSlave: -1, + LacpRate: -1, + AdSelect: -1, + AdActorSysPrio: -1, + AdUserPortKey: -1, + AdActorSystem: nil, + TlbDynamicLb: -1, + } +} + +// Flag mask for bond options. Bond.Flagmask must be set to on for option to work. +const ( + BOND_MODE_MASK uint64 = 1 << (1 + iota) + BOND_ACTIVE_SLAVE_MASK + BOND_MIIMON_MASK + BOND_UPDELAY_MASK + BOND_DOWNDELAY_MASK + BOND_USE_CARRIER_MASK + BOND_ARP_INTERVAL_MASK + BOND_ARP_VALIDATE_MASK + BOND_ARP_ALL_TARGETS_MASK + BOND_PRIMARY_MASK + BOND_PRIMARY_RESELECT_MASK + BOND_FAIL_OVER_MAC_MASK + BOND_XMIT_HASH_POLICY_MASK + BOND_RESEND_IGMP_MASK + BOND_NUM_PEER_NOTIF_MASK + BOND_ALL_SLAVES_ACTIVE_MASK + BOND_MIN_LINKS_MASK + BOND_LP_INTERVAL_MASK + BOND_PACKETS_PER_SLAVE_MASK + BOND_LACP_RATE_MASK + BOND_AD_SELECT_MASK +) + +// Attrs implementation. +func (bond *Bond) Attrs() *LinkAttrs { + return &bond.LinkAttrs +} + +// Type implementation fro Vxlan. +func (bond *Bond) Type() string { + return "bond" +} + +// BondSlaveState represents the values of the IFLA_BOND_SLAVE_STATE bond slave +// attribute, which contains the state of the bond slave. +type BondSlaveState uint8 + +const ( + //BondStateActive Link is active. + BondStateActive BondSlaveState = iota + //BondStateBackup Link is backup. + BondStateBackup +) + +func (s BondSlaveState) String() string { + switch s { + case BondStateActive: + return "ACTIVE" + case BondStateBackup: + return "BACKUP" + default: + return strconv.Itoa(int(s)) + } +} + +// BondSlaveMiiStatus represents the values of the IFLA_BOND_SLAVE_MII_STATUS bond slave +// attribute, which contains the status of MII link monitoring +type BondSlaveMiiStatus uint8 + +const ( + //BondLinkUp link is up and running. + BondLinkUp BondSlaveMiiStatus = iota + //BondLinkFail link has just gone down. + BondLinkFail + //BondLinkDown link has been down for too long time. + BondLinkDown + //BondLinkBack link is going back. + BondLinkBack +) + +func (s BondSlaveMiiStatus) String() string { + switch s { + case BondLinkUp: + return "UP" + case BondLinkFail: + return "GOING_DOWN" + case BondLinkDown: + return "DOWN" + case BondLinkBack: + return "GOING_BACK" + default: + return strconv.Itoa(int(s)) + } +} + +type BondSlave struct { + State BondSlaveState + MiiStatus BondSlaveMiiStatus + LinkFailureCount uint32 + PermHardwareAddr net.HardwareAddr + QueueId uint16 + AggregatorId uint16 + AdActorOperPortState uint8 + AdPartnerOperPortState uint16 +} + +func (b *BondSlave) SlaveType() string { + return "bond" +} + +type VrfSlave struct { + Table uint32 +} + +func (v *VrfSlave) SlaveType() string { + return "vrf" +} + +// Geneve devices must specify RemoteIP and ID (VNI) on create +// https://github.com/torvalds/linux/blob/47ec5303d73ea344e84f46660fff693c57641386/drivers/net/geneve.c#L1209-L1223 +type Geneve struct { + LinkAttrs + ID uint32 // vni + Remote net.IP + Ttl uint8 + Tos uint8 + Dport uint16 + UdpCsum uint8 + UdpZeroCsum6Tx uint8 + UdpZeroCsum6Rx uint8 + Link uint32 + FlowBased bool + InnerProtoInherit bool + Df GeneveDf +} + +func (geneve *Geneve) Attrs() *LinkAttrs { + return &geneve.LinkAttrs +} + +func (geneve *Geneve) Type() string { + return "geneve" +} + +type GeneveDf uint8 + +const ( + GENEVE_DF_UNSET GeneveDf = iota + GENEVE_DF_SET + GENEVE_DF_INHERIT + GENEVE_DF_MAX +) + +// Gretap devices must specify LocalIP and RemoteIP on create +type Gretap struct { + LinkAttrs + IKey uint32 + OKey uint32 + EncapSport uint16 + EncapDport uint16 + Local net.IP + Remote net.IP + IFlags uint16 + OFlags uint16 + PMtuDisc uint8 + Ttl uint8 + Tos uint8 + EncapType uint16 + EncapFlags uint16 + Link uint32 + FlowBased bool +} + +func (gretap *Gretap) Attrs() *LinkAttrs { + return &gretap.LinkAttrs +} + +func (gretap *Gretap) Type() string { + if gretap.Local.To4() == nil { + return "ip6gretap" + } + return "gretap" +} + +type Iptun struct { + LinkAttrs + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Link uint32 + Local net.IP + Remote net.IP + EncapSport uint16 + EncapDport uint16 + EncapType uint16 + EncapFlags uint16 + FlowBased bool + Proto uint8 +} + +func (iptun *Iptun) Attrs() *LinkAttrs { + return &iptun.LinkAttrs +} + +func (iptun *Iptun) Type() string { + return "ipip" +} + +type Ip6tnl struct { + LinkAttrs + Link uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + Flags uint32 + Proto uint8 + FlowInfo uint32 + EncapLimit uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 + FlowBased bool +} + +func (ip6tnl *Ip6tnl) Attrs() *LinkAttrs { + return &ip6tnl.LinkAttrs +} + +func (ip6tnl *Ip6tnl) Type() string { + return "ip6tnl" +} + +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L84 +type TunnelEncapType uint16 + +const ( + None TunnelEncapType = iota + FOU + GUE +) + +// from https://elixir.bootlin.com/linux/v5.15.4/source/include/uapi/linux/if_tunnel.h#L91 +type TunnelEncapFlag uint16 + +const ( + CSum TunnelEncapFlag = 1 << 0 + CSum6 = 1 << 1 + RemCSum = 1 << 2 +) + +// from https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/ip6_tunnel.h#L12 +type IP6TunnelFlag uint16 + +const ( + IP6_TNL_F_IGN_ENCAP_LIMIT IP6TunnelFlag = 1 // don't add encapsulation limit if one isn't present in inner packet + IP6_TNL_F_USE_ORIG_TCLASS = 2 // copy the traffic class field from the inner packet + IP6_TNL_F_USE_ORIG_FLOWLABEL = 4 // copy the flowlabel from the inner packet + IP6_TNL_F_MIP6_DEV = 8 // being used for Mobile IPv6 + IP6_TNL_F_RCV_DSCP_COPY = 10 // copy DSCP from the outer packet + IP6_TNL_F_USE_ORIG_FWMARK = 20 // copy fwmark from inner packet + IP6_TNL_F_ALLOW_LOCAL_REMOTE = 40 // allow remote endpoint on the local node +) + +type Sittun struct { + LinkAttrs + Link uint32 + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Proto uint8 + Local net.IP + Remote net.IP + EncapLimit uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 +} + +func (sittun *Sittun) Attrs() *LinkAttrs { + return &sittun.LinkAttrs +} + +func (sittun *Sittun) Type() string { + return "sit" +} + +type Vti struct { + LinkAttrs + IKey uint32 + OKey uint32 + Link uint32 + Local net.IP + Remote net.IP +} + +func (vti *Vti) Attrs() *LinkAttrs { + return &vti.LinkAttrs +} + +func (vti *Vti) Type() string { + if vti.Local.To4() == nil { + return "vti6" + } + return "vti" +} + +type Gretun struct { + LinkAttrs + Link uint32 + IFlags uint16 + OFlags uint16 + IKey uint32 + OKey uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + EncapType uint16 + EncapFlags uint16 + EncapSport uint16 + EncapDport uint16 + FlowBased bool +} + +func (gretun *Gretun) Attrs() *LinkAttrs { + return &gretun.LinkAttrs +} + +func (gretun *Gretun) Type() string { + if gretun.Local.To4() == nil { + return "ip6gre" + } + return "gre" +} + +type Vrf struct { + LinkAttrs + Table uint32 +} + +func (vrf *Vrf) Attrs() *LinkAttrs { + return &vrf.LinkAttrs +} + +func (vrf *Vrf) Type() string { + return "vrf" +} + +type GTP struct { + LinkAttrs + FD0 int + FD1 int + Role int + PDPHashsize int +} + +func (gtp *GTP) Attrs() *LinkAttrs { + return >p.LinkAttrs +} + +func (gtp *GTP) Type() string { + return "gtp" +} + +// Virtual XFRM Interfaces +// +// Named "xfrmi" to prevent confusion with XFRM objects +type Xfrmi struct { + LinkAttrs + Ifid uint32 +} + +func (xfrm *Xfrmi) Attrs() *LinkAttrs { + return &xfrm.LinkAttrs +} + +func (xfrm *Xfrmi) Type() string { + return "xfrm" +} + +// IPoIB interface + +type IPoIBMode uint16 + +func (m *IPoIBMode) String() string { + str, ok := iPoIBModeToString[*m] + if !ok { + return fmt.Sprintf("mode(%d)", *m) + } + return str +} + +const ( + IPOIB_MODE_DATAGRAM = iota + IPOIB_MODE_CONNECTED +) + +var iPoIBModeToString = map[IPoIBMode]string{ + IPOIB_MODE_DATAGRAM: "datagram", + IPOIB_MODE_CONNECTED: "connected", +} + +var StringToIPoIBMode = map[string]IPoIBMode{ + "datagram": IPOIB_MODE_DATAGRAM, + "connected": IPOIB_MODE_CONNECTED, +} + +const ( + CAN_STATE_ERROR_ACTIVE = iota + CAN_STATE_ERROR_WARNING + CAN_STATE_ERROR_PASSIVE + CAN_STATE_BUS_OFF + CAN_STATE_STOPPED + CAN_STATE_SLEEPING +) + +type Can struct { + LinkAttrs + + BitRate uint32 + SamplePoint uint32 + TimeQuanta uint32 + PropagationSegment uint32 + PhaseSegment1 uint32 + PhaseSegment2 uint32 + SyncJumpWidth uint32 + BitRatePreScaler uint32 + + Name string + TimeSegment1Min uint32 + TimeSegment1Max uint32 + TimeSegment2Min uint32 + TimeSegment2Max uint32 + SyncJumpWidthMax uint32 + BitRatePreScalerMin uint32 + BitRatePreScalerMax uint32 + BitRatePreScalerInc uint32 + + ClockFrequency uint32 + + State uint32 + + Mask uint32 + Flags uint32 + + TxError uint16 + RxError uint16 + + RestartMs uint32 +} + +func (can *Can) Attrs() *LinkAttrs { + return &can.LinkAttrs +} + +func (can *Can) Type() string { + return "can" +} + +type IPoIB struct { + LinkAttrs + Pkey uint16 + Mode IPoIBMode + Umcast uint16 +} + +func (ipoib *IPoIB) Attrs() *LinkAttrs { + return &ipoib.LinkAttrs +} + +func (ipoib *IPoIB) Type() string { + return "ipoib" +} + +type BareUDP struct { + LinkAttrs + Port uint16 + EtherType uint16 + SrcPortMin uint16 + MultiProto bool +} + +func (bareudp *BareUDP) Attrs() *LinkAttrs { + return &bareudp.LinkAttrs +} + +func (bareudp *BareUDP) Type() string { + return "bareudp" +} + +// iproute2 supported devices; +// vlan | veth | vcan | dummy | ifb | macvlan | macvtap | +// bridge | bond | ipoib | ip6tnl | ipip | sit | vxlan | +// gre | gretap | ip6gre | ip6gretap | vti | vti6 | nlmon | +// bond_slave | ipvlan | xfrm | bareudp + +// LinkNotFoundError wraps the various not found errors when +// getting/reading links. This is intended for better error +// handling by dependent code so that "not found error" can +// be distinguished from other errors +type LinkNotFoundError struct { + error +} diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go new file mode 100644 index 000000000..d713612a9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -0,0 +1,3967 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const ( + SizeofLinkStats32 = 0x5c + SizeofLinkStats64 = 0xb8 +) + +const ( + TUNTAP_MODE_TUN TuntapMode = unix.IFF_TUN + TUNTAP_MODE_TAP TuntapMode = unix.IFF_TAP + TUNTAP_DEFAULTS TuntapFlag = unix.IFF_TUN_EXCL | unix.IFF_ONE_QUEUE + TUNTAP_VNET_HDR TuntapFlag = unix.IFF_VNET_HDR + TUNTAP_TUN_EXCL TuntapFlag = unix.IFF_TUN_EXCL + TUNTAP_NO_PI TuntapFlag = unix.IFF_NO_PI + TUNTAP_ONE_QUEUE TuntapFlag = unix.IFF_ONE_QUEUE + TUNTAP_MULTI_QUEUE TuntapFlag = unix.IFF_MULTI_QUEUE + TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI +) + +var StringToTuntapModeMap = map[string]TuntapMode{ + "tun": TUNTAP_MODE_TUN, + "tap": TUNTAP_MODE_TAP, +} + +func (ttm TuntapMode) String() string { + switch ttm { + case TUNTAP_MODE_TUN: + return "tun" + case TUNTAP_MODE_TAP: + return "tap" + } + return "unknown" +} + +const ( + VF_LINK_STATE_AUTO uint32 = 0 + VF_LINK_STATE_ENABLE uint32 = 1 + VF_LINK_STATE_DISABLE uint32 = 2 +) + +var macvlanModes = [...]uint32{ + 0, + nl.MACVLAN_MODE_PRIVATE, + nl.MACVLAN_MODE_VEPA, + nl.MACVLAN_MODE_BRIDGE, + nl.MACVLAN_MODE_PASSTHRU, + nl.MACVLAN_MODE_SOURCE, +} + +func ensureIndex(link *LinkAttrs) { + if link != nil && link.Index == 0 { + newlink, _ := LinkByName(link.Name) + if newlink != nil { + link.Index = newlink.Attrs().Index + } + } +} + +func (h *Handle) ensureIndex(link *LinkAttrs) { + if link != nil && link.Index == 0 { + newlink, _ := h.LinkByName(link.Name) + if newlink != nil { + link.Index = newlink.Attrs().Index + } + } +} + +func (h *Handle) LinkSetARPOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags |= unix.IFF_NOARP + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func LinkSetARPOff(link Link) error { + return pkgHandle.LinkSetARPOff(link) +} + +func (h *Handle) LinkSetARPOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change |= unix.IFF_NOARP + msg.Flags &= ^uint32(unix.IFF_NOARP) + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func LinkSetARPOn(link Link) error { + return pkgHandle.LinkSetARPOn(link) +} + +func (h *Handle) SetPromiscOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Flags = unix.IFF_PROMISC + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast on` +func LinkSetAllmulticastOn(link Link) error { + return pkgHandle.LinkSetAllmulticastOn(link) +} + +// LinkSetAllmulticastOn enables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast on` +func (h *Handle) LinkSetAllmulticastOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_ALLMULTI + msg.Flags = unix.IFF_ALLMULTI + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast off` +func LinkSetAllmulticastOff(link Link) error { + return pkgHandle.LinkSetAllmulticastOff(link) +} + +// LinkSetAllmulticastOff disables the reception of all hardware multicast packets for the link device. +// Equivalent to: `ip link set $link allmulticast off` +func (h *Handle) LinkSetAllmulticastOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_ALLMULTI + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func LinkSetMulticastOn(link Link) error { + return pkgHandle.LinkSetMulticastOn(link) +} + +// LinkSetMulticastOn enables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast on` +func (h *Handle) LinkSetMulticastOn(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Flags = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func LinkSetMulticastOff(link Link) error { + return pkgHandle.LinkSetMulticastOff(link) +} + +// LinkSetAllmulticastOff disables the reception of multicast packets for the link device. +// Equivalent to: `ip link set $link multicast off` +func (h *Handle) LinkSetMulticastOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_MULTICAST + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrAdd(link, addr) +} + +func (h *Handle) MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_ADD) +} + +func MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrDel(link, addr) +} + +func (h *Handle) MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_DEL) +} + +func MacvlanMACAddrFlush(link Link) error { + return pkgHandle.MacvlanMACAddrFlush(link) +} + +func (h *Handle) MacvlanMACAddrFlush(link Link) error { + return h.macvlanMACAddrChange(link, nil, nl.MACVLAN_MACADDR_FLUSH) +} + +func MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return pkgHandle.MacvlanMACAddrSet(link, addrs) +} + +func (h *Handle) MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { + return h.macvlanMACAddrChange(link, addrs, nl.MACVLAN_MACADDR_SET) +} + +func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + inner := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + // IFLA_MACVLAN_MACADDR_MODE = mode + b := make([]byte, 4) + native.PutUint32(b, mode) + inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_MODE, b) + + // populate message with MAC addrs, if necessary + switch mode { + case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: + if len(addrs) == 1 { + inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) + } + case nl.MACVLAN_MACADDR_SET: + mad := inner.AddRtAttr(nl.IFLA_MACVLAN_MACADDR_DATA, nil) + for _, addr := range addrs { + mad.AddRtAttr(nl.IFLA_MACVLAN_MACADDR, []byte(addr)) + } + } + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetMacvlanMode sets the mode of a macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + return pkgHandle.LinkSetMacvlanMode(link, mode) +} + +// LinkSetMacvlanMode sets the mode of the macvlan or macvtap link device. +// Note that passthrough mode cannot be set to and from and will fail. +// Equivalent to: `ip link set $link type (macvlan|macvtap) mode $mode +func (h *Handle) LinkSetMacvlanMode(link Link, mode MacvlanMode) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[mode])) + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func BridgeSetMcastSnoop(link Link, on bool) error { + return pkgHandle.BridgeSetMcastSnoop(link, on) +} + +func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { + bridge := link.(*Bridge) + bridge.MulticastSnooping = &on + return h.linkModify(bridge, unix.NLM_F_ACK) +} + +func BridgeSetVlanFiltering(link Link, on bool) error { + return pkgHandle.BridgeSetVlanFiltering(link, on) +} + +func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error { + bridge := link.(*Bridge) + bridge.VlanFiltering = &on + return h.linkModify(bridge, unix.NLM_F_ACK) +} + +func BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + return pkgHandle.BridgeSetVlanDefaultPVID(link, pvid) +} + +func (h *Handle) BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + bridge := link.(*Bridge) + bridge.VlanDefaultPVID = &pvid + return h.linkModify(bridge, unix.NLM_F_ACK) +} + +func SetPromiscOn(link Link) error { + return pkgHandle.SetPromiscOn(link) +} + +func (h *Handle) SetPromiscOff(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_PROMISC + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func SetPromiscOff(link Link) error { + return pkgHandle.SetPromiscOff(link) +} + +// LinkSetUp enables the link device. +// Equivalent to: `ip link set $link up` +func LinkSetUp(link Link) error { + return pkgHandle.LinkSetUp(link) +} + +// LinkSetUp enables the link device. +// Equivalent to: `ip link set $link up` +func (h *Handle) LinkSetUp(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetDown disables link device. +// Equivalent to: `ip link set $link down` +func LinkSetDown(link Link) error { + return pkgHandle.LinkSetDown(link) +} + +// LinkSetDown disables link device. +// Equivalent to: `ip link set $link down` +func (h *Handle) LinkSetDown(link Link) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Change = unix.IFF_UP + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetMTU sets the mtu of the link device. +// Equivalent to: `ip link set $link mtu $mtu` +func LinkSetMTU(link Link, mtu int) error { + return pkgHandle.LinkSetMTU(link, mtu) +} + +// LinkSetMTU sets the mtu of the link device. +// Equivalent to: `ip link set $link mtu $mtu` +func (h *Handle) LinkSetMTU(link Link, mtu int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(mtu)) + + data := nl.NewRtAttr(unix.IFLA_MTU, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetName sets the name of the link device. +// Equivalent to: `ip link set $link name $name` +func LinkSetName(link Link, name string) error { + return pkgHandle.LinkSetName(link, name) +} + +// LinkSetName sets the name of the link device. +// Equivalent to: `ip link set $link name $name` +func (h *Handle) LinkSetName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_IFNAME, []byte(name)) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetAlias sets the alias of the link device. +// Equivalent to: `ip link set dev $link alias $name` +func LinkSetAlias(link Link, name string) error { + return pkgHandle.LinkSetAlias(link, name) +} + +// LinkSetAlias sets the alias of the link device. +// Equivalent to: `ip link set dev $link alias $name` +func (h *Handle) LinkSetAlias(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(name)) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func LinkAddAltName(link Link, name string) error { + return pkgHandle.LinkAddAltName(link, name) +} + +// LinkAddAltName adds a new alternative name for the link device. +// Equivalent to: `ip link property add $link altname $name` +func (h *Handle) LinkAddAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_NEWLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func LinkDelAltName(link Link, name string) error { + return pkgHandle.LinkDelAltName(link, name) +} + +// LinkDelAltName delete an alternative name for the link device. +// Equivalent to: `ip link property del $link altname $name` +func (h *Handle) LinkDelAltName(link Link, name string) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_DELLINKPROP, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_PROP_LIST|unix.NLA_F_NESTED, nil) + data.AddRtAttr(unix.IFLA_ALT_IFNAME, []byte(name)) + + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetHardwareAddr sets the hardware address of the link device. +// Equivalent to: `ip link set $link address $hwaddr` +func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { + return pkgHandle.LinkSetHardwareAddr(link, hwaddr) +} + +// LinkSetHardwareAddr sets the hardware address of the link device. +// Equivalent to: `ip link set $link address $hwaddr` +func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(hwaddr)) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfHardwareAddr sets the hardware address of a vf for the link. +// Equivalent to: `ip link set $link vf $vf mac $hwaddr` +func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { + return pkgHandle.LinkSetVfHardwareAddr(link, vf, hwaddr) +} + +// LinkSetVfHardwareAddr sets the hardware address of a vf for the link. +// Equivalent to: `ip link set $link vf $vf mac $hwaddr` +func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfMac{ + Vf: uint32(vf), + } + copy(vfmsg.Mac[:], []byte(hwaddr)) + info.AddRtAttr(nl.IFLA_VF_MAC, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfVlan sets the vlan of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan` +func LinkSetVfVlan(link Link, vf, vlan int) error { + return pkgHandle.LinkSetVfVlan(link, vf, vlan) +} + +// LinkSetVfVlan sets the vlan of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan` +func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + } + info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return pkgHandle.LinkSetVfVlanQos(link, vf, vlan, qos) +} + +// LinkSetVfVlanQos sets the vlan and qos priority of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos` +func (h *Handle) LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + Qos: uint32(qos), + } + info.AddRtAttr(nl.IFLA_VF_VLAN, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return pkgHandle.LinkSetVfVlanQosProto(link, vf, vlan, qos, proto) +} + +// LinkSetVfVlanQosProto sets the vlan, qos and protocol of a vf for the link. +// Equivalent to: `ip link set $link vf $vf vlan $vlan qos $qos proto $proto` +func (h *Handle) LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + vfInfo := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfVlanList := vfInfo.AddRtAttr(nl.IFLA_VF_VLAN_LIST, nil) + + vfmsg := nl.VfVlanInfo{ + VfVlan: nl.VfVlan{ + Vf: uint32(vf), + Vlan: uint32(vlan), + Qos: uint32(qos), + }, + VlanProto: (uint16(proto)>>8)&0xFF | (uint16(proto)&0xFF)<<8, + } + + vfVlanList.AddRtAttr(nl.IFLA_VF_VLAN_INFO, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfTxRate sets the tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf rate $rate` +func LinkSetVfTxRate(link Link, vf, rate int) error { + return pkgHandle.LinkSetVfTxRate(link, vf, rate) +} + +// LinkSetVfTxRate sets the tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf rate $rate` +func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfTxRate{ + Vf: uint32(vf), + Rate: uint32(rate), + } + info.AddRtAttr(nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return pkgHandle.LinkSetVfRate(link, vf, minRate, maxRate) +} + +// LinkSetVfRate sets the min and max tx rate of a vf for the link. +// Equivalent to: `ip link set $link vf $vf min_tx_rate $min_rate max_tx_rate $max_rate` +func (h *Handle) LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfRate{ + Vf: uint32(vf), + MinTxRate: uint32(minRate), + MaxTxRate: uint32(maxRate), + } + info.AddRtAttr(nl.IFLA_VF_RATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func LinkSetVfState(link Link, vf int, state uint32) error { + return pkgHandle.LinkSetVfState(link, vf, state) +} + +// LinkSetVfState enables/disables virtual link state on a vf. +// Equivalent to: `ip link set $link vf $vf state $state` +func (h *Handle) LinkSetVfState(link Link, vf int, state uint32) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfLinkState{ + Vf: uint32(vf), + LinkState: state, + } + info.AddRtAttr(nl.IFLA_VF_LINK_STATE, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link. +// Equivalent to: `ip link set $link vf $vf spoofchk $check` +func LinkSetVfSpoofchk(link Link, vf int, check bool) error { + return pkgHandle.LinkSetVfSpoofchk(link, vf, check) +} + +// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link. +// Equivalent to: `ip link set $link vf $vf spoofchk $check` +func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { + var setting uint32 + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + if check { + setting = 1 + } + vfmsg := nl.VfSpoofchk{ + Vf: uint32(vf), + Setting: setting, + } + info.AddRtAttr(nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfTrust enables/disables trust state on a vf for the link. +// Equivalent to: `ip link set $link vf $vf trust $state` +func LinkSetVfTrust(link Link, vf int, state bool) error { + return pkgHandle.LinkSetVfTrust(link, vf, state) +} + +// LinkSetVfTrust enables/disables trust state on a vf for the link. +// Equivalent to: `ip link set $link vf $vf trust $state` +func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { + var setting uint32 + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + if state { + setting = 1 + } + vfmsg := nl.VfTrust{ + Vf: uint32(vf), + Setting: setting, + } + info.AddRtAttr(nl.IFLA_VF_TRUST, vfmsg.Serialize()) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetVfNodeGUID sets the node GUID of a vf for the link. +// Equivalent to: `ip link set dev $link vf $vf node_guid $nodeguid` +func LinkSetVfNodeGUID(link Link, vf int, nodeguid net.HardwareAddr) error { + return pkgHandle.LinkSetVfGUID(link, vf, nodeguid, nl.IFLA_VF_IB_NODE_GUID) +} + +// LinkSetVfPortGUID sets the port GUID of a vf for the link. +// Equivalent to: `ip link set dev $link vf $vf port_guid $portguid` +func LinkSetVfPortGUID(link Link, vf int, portguid net.HardwareAddr) error { + return pkgHandle.LinkSetVfGUID(link, vf, portguid, nl.IFLA_VF_IB_PORT_GUID) +} + +// LinkSetVfGUID sets the node or port GUID of a vf for the link. +func (h *Handle) LinkSetVfGUID(link Link, vf int, vfGuid net.HardwareAddr, guidType int) error { + var err error + var guid uint64 + + buf := bytes.NewBuffer(vfGuid) + err = binary.Read(buf, binary.BigEndian, &guid) + if err != nil { + return err + } + + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + data := nl.NewRtAttr(unix.IFLA_VFINFO_LIST, nil) + info := data.AddRtAttr(nl.IFLA_VF_INFO, nil) + vfmsg := nl.VfGUID{ + Vf: uint32(vf), + GUID: guid, + } + info.AddRtAttr(guidType, vfmsg.Serialize()) + req.AddData(data) + + _, err = req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetMaster sets the master of the link device. +// Equivalent to: `ip link set $link master $master` +func LinkSetMaster(link Link, master Link) error { + return pkgHandle.LinkSetMaster(link, master) +} + +// LinkSetMaster sets the master of the link device. +// Equivalent to: `ip link set $link master $master` +func (h *Handle) LinkSetMaster(link Link, master Link) error { + index := 0 + if master != nil { + masterBase := master.Attrs() + h.ensureIndex(masterBase) + index = masterBase.Index + } + if index <= 0 { + return fmt.Errorf("Device does not exist") + } + return h.LinkSetMasterByIndex(link, index) +} + +// LinkSetNoMaster removes the master of the link device. +// Equivalent to: `ip link set $link nomaster` +func LinkSetNoMaster(link Link) error { + return pkgHandle.LinkSetNoMaster(link) +} + +// LinkSetNoMaster removes the master of the link device. +// Equivalent to: `ip link set $link nomaster` +func (h *Handle) LinkSetNoMaster(link Link) error { + return h.LinkSetMasterByIndex(link, 0) +} + +// LinkSetMasterByIndex sets the master of the link device. +// Equivalent to: `ip link set $link master $master` +func LinkSetMasterByIndex(link Link, masterIndex int) error { + return pkgHandle.LinkSetMasterByIndex(link, masterIndex) +} + +// LinkSetMasterByIndex sets the master of the link device. +// Equivalent to: `ip link set $link master $master` +func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(masterIndex)) + + data := nl.NewRtAttr(unix.IFLA_MASTER, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetNsPid puts the device into a new network namespace. The +// pid must be a pid of a running process. +// Equivalent to: `ip link set $link netns $pid` +func LinkSetNsPid(link Link, nspid int) error { + return pkgHandle.LinkSetNsPid(link, nspid) +} + +// LinkSetNsPid puts the device into a new network namespace. The +// pid must be a pid of a running process. +// Equivalent to: `ip link set $link netns $pid` +func (h *Handle) LinkSetNsPid(link Link, nspid int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(nspid)) + + data := nl.NewRtAttr(unix.IFLA_NET_NS_PID, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetNsFd puts the device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `ip link set $link netns $ns` +func LinkSetNsFd(link Link, fd int) error { + return pkgHandle.LinkSetNsFd(link, fd) +} + +// LinkSetNsFd puts the device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `ip link set $link netns $ns` +func (h *Handle) LinkSetNsFd(link Link, fd int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(fd)) + + data := nl.NewRtAttr(unix.IFLA_NET_NS_FD, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetXdpFd adds a bpf function to the driver. The fd must be a bpf +// program loaded with bpf(type=BPF_PROG_TYPE_XDP) +func LinkSetXdpFd(link Link, fd int) error { + return LinkSetXdpFdWithFlags(link, fd, 0) +} + +// LinkSetXdpFdWithFlags adds a bpf function to the driver with the given +// options. The fd must be a bpf program loaded with bpf(type=BPF_PROG_TYPE_XDP) +func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { + base := link.Attrs() + ensureIndex(base) + req := nl.NewNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func LinkSetGSOMaxSegs(link Link, maxSegs int) error { + return pkgHandle.LinkSetGSOMaxSegs(link, maxSegs) +} + +// LinkSetGSOMaxSegs sets the GSO maximum segment count of the link device. +// Equivalent to: `ip link set $link gso_max_segs $maxSegs` +func (h *Handle) LinkSetGSOMaxSegs(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOMaxSize(link, maxSize) +} + +// LinkSetGSOMaxSize sets the IPv6 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_max_size $maxSize` +func (h *Handle) LinkSetGSOMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func LinkSetGROMaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROMaxSize(link, maxSize) +} + +// LinkSetGROMaxSize sets the IPv6 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_max_size $maxSize` +func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGSOIPv4MaxSize(link, maxSize) +} + +// LinkSetGSOIPv4MaxSize sets the IPv4 GSO maximum size of the link device. +// Equivalent to: `ip link set $link gso_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return pkgHandle.LinkSetGROIPv4MaxSize(link, maxSize) +} + +// LinkSetGROIPv4MaxSize sets the IPv4 GRO maximum size of the link device. +// Equivalent to: `ip link set $link gro_ipv4_max_size $maxSize` +func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(maxSize)) + + data := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func boolAttr(val bool) []byte { + var v uint8 + if val { + v = 1 + } + return nl.Uint8Attr(v) +} + +type vxlanPortRange struct { + Lo, Hi uint16 +} + +func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if vxlan.FlowBased { + vxlan.VxlanId = 0 + } + + data.AddRtAttr(nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId))) + + if vxlan.VtepDevIndex != 0 { + data.AddRtAttr(nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex))) + } + if vxlan.SrcAddr != nil { + ip := vxlan.SrcAddr.To4() + if ip != nil { + data.AddRtAttr(nl.IFLA_VXLAN_LOCAL, []byte(ip)) + } else { + ip = vxlan.SrcAddr.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_VXLAN_LOCAL6, []byte(ip)) + } + } + } + if vxlan.Group != nil { + group := vxlan.Group.To4() + if group != nil { + data.AddRtAttr(nl.IFLA_VXLAN_GROUP, []byte(group)) + } else { + group = vxlan.Group.To16() + if group != nil { + data.AddRtAttr(nl.IFLA_VXLAN_GROUP6, []byte(group)) + } + } + } + + data.AddRtAttr(nl.IFLA_VXLAN_TTL, nl.Uint8Attr(uint8(vxlan.TTL))) + data.AddRtAttr(nl.IFLA_VXLAN_TOS, nl.Uint8Attr(uint8(vxlan.TOS))) + data.AddRtAttr(nl.IFLA_VXLAN_LEARNING, boolAttr(vxlan.Learning)) + data.AddRtAttr(nl.IFLA_VXLAN_PROXY, boolAttr(vxlan.Proxy)) + data.AddRtAttr(nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) + data.AddRtAttr(nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) + data.AddRtAttr(nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) + data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) + data.AddRtAttr(nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) + + if vxlan.UDPCSum { + data.AddRtAttr(nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) + } + if vxlan.GBP { + data.AddRtAttr(nl.IFLA_VXLAN_GBP, []byte{}) + } + if vxlan.FlowBased { + data.AddRtAttr(nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased)) + } + if vxlan.NoAge { + data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0)) + } else if vxlan.Age > 0 { + data.AddRtAttr(nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age))) + } + if vxlan.Limit > 0 { + data.AddRtAttr(nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit))) + } + if vxlan.Port > 0 { + data.AddRtAttr(nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port))) + } + if vxlan.PortLow > 0 || vxlan.PortHigh > 0 { + pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)} + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, &pr) + + data.AddRtAttr(nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes()) + } +} + +func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if bond.Mode >= 0 { + data.AddRtAttr(nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode))) + } + if bond.ActiveSlave >= 0 { + data.AddRtAttr(nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave))) + } + if bond.Miimon >= 0 { + data.AddRtAttr(nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon))) + } + if bond.UpDelay >= 0 { + data.AddRtAttr(nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay))) + } + if bond.DownDelay >= 0 { + data.AddRtAttr(nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay))) + } + if bond.UseCarrier >= 0 { + data.AddRtAttr(nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier))) + } + if bond.ArpInterval >= 0 { + data.AddRtAttr(nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval))) + } + if bond.ArpIpTargets != nil { + msg := data.AddRtAttr(nl.IFLA_BOND_ARP_IP_TARGET, nil) + for i := range bond.ArpIpTargets { + ip := bond.ArpIpTargets[i].To4() + if ip != nil { + msg.AddRtAttr(i, []byte(ip)) + continue + } + ip = bond.ArpIpTargets[i].To16() + if ip != nil { + msg.AddRtAttr(i, []byte(ip)) + } + } + } + if bond.ArpValidate >= 0 { + data.AddRtAttr(nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate))) + } + if bond.ArpAllTargets >= 0 { + data.AddRtAttr(nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets))) + } + if bond.Primary >= 0 { + data.AddRtAttr(nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary))) + } + if bond.PrimaryReselect >= 0 { + data.AddRtAttr(nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect))) + } + if bond.FailOverMac >= 0 { + data.AddRtAttr(nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac))) + } + if bond.XmitHashPolicy >= 0 { + data.AddRtAttr(nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy))) + } + if bond.ResendIgmp >= 0 { + data.AddRtAttr(nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp))) + } + if bond.NumPeerNotif >= 0 { + data.AddRtAttr(nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif))) + } + if bond.AllSlavesActive >= 0 { + data.AddRtAttr(nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive))) + } + if bond.MinLinks >= 0 { + data.AddRtAttr(nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks))) + } + if bond.LpInterval >= 0 { + data.AddRtAttr(nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) + } + if bond.PacketsPerSlave >= 0 { + data.AddRtAttr(nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PacketsPerSlave))) + } + if bond.LacpRate >= 0 { + data.AddRtAttr(nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) + } + if bond.AdSelect >= 0 { + data.AddRtAttr(nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect))) + } + if bond.AdActorSysPrio >= 0 { + data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio))) + } + if bond.AdUserPortKey >= 0 { + data.AddRtAttr(nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey))) + } + if bond.AdActorSystem != nil { + data.AddRtAttr(nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem)) + } + if bond.TlbDynamicLb >= 0 { + data.AddRtAttr(nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb))) + } +} + +func cleanupFds(fds []*os.File) { + for _, f := range fds { + f.Close() + } +} + +// LinkAdd adds a new link device. The type and features of the device +// are taken from the parameters in the link object. +// Equivalent to: `ip link add $link` +func LinkAdd(link Link) error { + return pkgHandle.LinkAdd(link) +} + +// LinkAdd adds a new link device. The type and features of the device +// are taken from the parameters in the link object. +// Equivalent to: `ip link add $link` +func (h *Handle) LinkAdd(link Link) error { + return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) +} + +func LinkModify(link Link) error { + return pkgHandle.LinkModify(link) +} + +func (h *Handle) LinkModify(link Link) error { + return h.linkModify(link, unix.NLM_F_REQUEST|unix.NLM_F_ACK) +} + +func (h *Handle) linkModify(link Link, flags int) error { + // TODO: support extra data for macvlan + base := link.Attrs() + + // if tuntap, then the name can be empty, OS will provide a name + tuntap, isTuntap := link.(*Tuntap) + + if base.Name == "" && !isTuntap { + return fmt.Errorf("LinkAttrs.Name cannot be empty") + } + + if isTuntap { + if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { + return fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) + } + + queues := tuntap.Queues + + var fds []*os.File + var req ifReq + copy(req.Name[:15], base.Name) + + req.Flags = uint16(tuntap.Flags) + + if queues == 0 { //Legacy compatibility + queues = 1 + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) + } + } else { + // For best peformance set Flags to TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR + // when a) KVM has support for this ABI and + // b) the value of the flag is queryable using the TUNGETIFF ioctl + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_MULTI_QUEUE_DEFAULTS) + } + } + + req.Flags |= uint16(tuntap.Mode) + const TUN = "/dev/net/tun" + for i := 0; i < queues; i++ { + localReq := req + fd, err := unix.Open(TUN, os.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + cleanupFds(fds) + return err + } + + _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) + if errno != 0 { + // close the new fd + unix.Close(fd) + // and the already opened ones + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) + } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETOWNER, uintptr(tuntap.Owner)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETOWNER failed [%d], errno %v", i, errno) + } + + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), syscall.TUNSETGROUP, uintptr(tuntap.Group)) + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETGROUP failed [%d], errno %v", i, errno) + } + + // Set the tun device to non-blocking before use. The below comment + // taken from: + // + // https://github.com/mistsys/tuntap/commit/161418c25003bbee77d085a34af64d189df62bea + // + // Note there is a complication because in go, if a device node is + // opened, go sets it to use nonblocking I/O. However a /dev/net/tun + // doesn't work with epoll until after the TUNSETIFF ioctl has been + // done. So we open the unix fd directly, do the ioctl, then put the + // fd in nonblocking mode, an then finally wrap it in a os.File, + // which will see the nonblocking mode and add the fd to the + // pollable set, so later on when we Read() from it blocked the + // calling thread in the kernel. + // + // See + // https://github.com/golang/go/issues/30426 + // which got exposed in go 1.13 by the fix to + // https://github.com/golang/go/issues/30624 + err = unix.SetNonblock(fd, true) + if err != nil { + cleanupFds(fds) + return fmt.Errorf("Tuntap set to non-blocking failed [%d], err %v", i, err) + } + + // create the file from the file descriptor and store it + file := os.NewFile(uintptr(fd), TUN) + fds = append(fds, file) + + // 1) we only care for the name of the first tap in the multi queue set + // 2) if the original name was empty, the localReq has now the actual name + // + // In addition: + // This ensures that the link name is always identical to what the kernel returns. + // Not only in case of an empty name, but also when using name templates. + // e.g. when the provided name is "tap%d", the kernel replaces %d with the next available number. + if i == 0 { + link.Attrs().Name = strings.Trim(string(localReq.Name[:]), "\x00") + } + + } + + control := func(file *os.File, f func(fd uintptr)) error { + name := file.Name() + conn, err := file.SyscallConn() + if err != nil { + return fmt.Errorf("SyscallConn() failed on %s: %v", name, err) + } + if err := conn.Control(f); err != nil { + return fmt.Errorf("Failed to get file descriptor for %s: %v", name, err) + } + return nil + } + + // only persist interface if NonPersist is NOT set + if !tuntap.NonPersist { + var errno syscall.Errno + if err := control(fds[0], func(fd uintptr) { + _, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 1) + }); err != nil { + return err + } + if errno != 0 { + cleanupFds(fds) + return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) + } + } + + h.ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + err := h.LinkSetMasterByIndex(link, base.MasterIndex) + if err != nil { + // un-persist (e.g. allow the interface to be removed) the tuntap + // should not hurt if not set prior, condition might be not needed + if !tuntap.NonPersist { + // ignore error + _ = control(fds[0], func(fd uintptr) { + _, _, _ = unix.Syscall(unix.SYS_IOCTL, fd, uintptr(unix.TUNSETPERSIST), 0) + }) + } + cleanupFds(fds) + return err + } + } + + if tuntap.Queues == 0 { + cleanupFds(fds) + } else { + tuntap.Fds = fds + } + + return nil + } + + req := h.newNetlinkRequest(unix.RTM_NEWLINK, flags) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + // TODO: make it shorter + if base.Flags&net.FlagUp != 0 { + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + } + if base.Flags&net.FlagBroadcast != 0 { + msg.Change |= unix.IFF_BROADCAST + msg.Flags |= unix.IFF_BROADCAST + } + if base.Flags&net.FlagLoopback != 0 { + msg.Change |= unix.IFF_LOOPBACK + msg.Flags |= unix.IFF_LOOPBACK + } + if base.Flags&net.FlagPointToPoint != 0 { + msg.Change |= unix.IFF_POINTOPOINT + msg.Flags |= unix.IFF_POINTOPOINT + } + if base.Flags&net.FlagMulticast != 0 { + msg.Change |= unix.IFF_MULTICAST + msg.Flags |= unix.IFF_MULTICAST + } + if base.Index != 0 { + msg.Index = int32(base.Index) + } + + req.AddData(msg) + + if base.ParentIndex != 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(base.ParentIndex)) + data := nl.NewRtAttr(unix.IFLA_LINK, b) + req.AddData(data) + } else if link.Type() == "ipvlan" || link.Type() == "ipoib" { + return fmt.Errorf("Can't create %s link without ParentIndex", link.Type()) + } + + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + req.AddData(nameData) + + if base.Alias != "" { + alias := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(base.Alias)) + req.AddData(alias) + } + + if base.MTU > 0 { + mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + req.AddData(mtu) + } + + if base.TxQLen >= 0 { + qlen := nl.NewRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + req.AddData(qlen) + } + + if base.HardwareAddr != nil { + hwaddr := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(base.HardwareAddr)) + req.AddData(hwaddr) + } + + if base.NumTxQueues > 0 { + txqueues := nl.NewRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + req.AddData(txqueues) + } + + if base.NumRxQueues > 0 { + rxqueues := nl.NewRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + req.AddData(rxqueues) + } + + if base.GSOMaxSegs > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(base.GSOMaxSegs)) + req.AddData(gsoAttr) + } + + if base.GSOMaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(base.GSOMaxSize)) + req.AddData(gsoAttr) + } + + if base.GROMaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize)) + req.AddData(groAttr) + } + + if base.GSOIPv4MaxSize > 0 { + gsoAttr := nl.NewRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GSOIPv4MaxSize)) + req.AddData(gsoAttr) + } + + if base.GROIPv4MaxSize > 0 { + groAttr := nl.NewRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(base.GROIPv4MaxSize)) + req.AddData(groAttr) + } + + if base.Group > 0 { + groupAttr := nl.NewRtAttr(unix.IFLA_GROUP, nl.Uint32Attr(base.Group)) + req.AddData(groupAttr) + } + + if base.Namespace != nil { + var attr *nl.RtAttr + switch ns := base.Namespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(ns)) + attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(ns)) + attr = nl.NewRtAttr(unix.IFLA_NET_NS_FD, val) + } + + req.AddData(attr) + } + + if base.Xdp != nil { + addXdpAttrs(base.Xdp, req) + } + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo.AddRtAttr(nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + switch link := link.(type) { + case *Vlan: + b := make([]byte, 2) + native.PutUint16(b, uint16(link.VlanId)) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_VLAN_ID, b) + + if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { + data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) + } + case *Netkit: + if err := addNetkitAttrs(link, linkInfo, flags); err != nil { + return err + } + case *Veth: + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) + nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + if base.TxQLen >= 0 { + peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + } + if base.NumTxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) + } + if base.NumRxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) + } + if base.MTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + } + if link.PeerHardwareAddr != nil { + peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(link.PeerHardwareAddr)) + } + if link.PeerNamespace != nil { + switch ns := link.PeerNamespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(ns)) + peer.AddRtAttr(unix.IFLA_NET_NS_FD, val) + } + } + case *Vxlan: + addVxlanAttrs(link, linkInfo) + case *Bond: + addBondAttrs(link, linkInfo) + case *IPVlan: + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) + case *IPVtap: + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + data.AddRtAttr(nl.IFLA_IPVLAN_FLAG, nl.Uint16Attr(uint16(link.Flag))) + case *Macvlan: + addMacvlanAttrs(link, linkInfo) + case *Macvtap: + addMacvtapAttrs(link, linkInfo) + case *Geneve: + addGeneveAttrs(link, linkInfo) + case *Gretap: + addGretapAttrs(link, linkInfo) + case *Iptun: + addIptunAttrs(link, linkInfo) + case *Ip6tnl: + addIp6tnlAttrs(link, linkInfo) + case *Sittun: + addSittunAttrs(link, linkInfo) + case *Gretun: + addGretunAttrs(link, linkInfo) + case *Vti: + addVtiAttrs(link, linkInfo) + case *Vrf: + addVrfAttrs(link, linkInfo) + case *Bridge: + addBridgeAttrs(link, linkInfo) + case *GTP: + addGTPAttrs(link, linkInfo) + case *Xfrmi: + addXfrmiAttrs(link, linkInfo) + case *IPoIB: + addIPoIBAttrs(link, linkInfo) + case *BareUDP: + addBareUDPAttrs(link, linkInfo) + } + + req.AddData(linkInfo) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + if err != nil { + return err + } + + h.ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + return h.LinkSetMasterByIndex(link, base.MasterIndex) + } + return nil +} + +// LinkDel deletes link device. Either Index or Name must be set in +// the link object for it to be deleted. The other values are ignored. +// Equivalent to: `ip link del $link` +func LinkDel(link Link) error { + return pkgHandle.LinkDel(link) +} + +// LinkDel deletes link device. Either Index or Name must be set in +// the link object for it to be deleted. The other values are ignored. +// Equivalent to: `ip link del $link` +func (h *Handle) LinkDel(link Link) error { + base := link.Attrs() + + h.ensureIndex(base) + + req := h.newNetlinkRequest(unix.RTM_DELLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func (h *Handle) linkByNameDump(name string) (Link, error) { + links, err := h.LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Name == name { + return link, nil + } + + // support finding interfaces also via altnames + for _, altName := range link.Attrs().AltNames { + if altName == name { + return link, nil + } + } + } + return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)} +} + +func (h *Handle) linkByAliasDump(alias string) (Link, error) { + links, err := h.LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Alias == alias { + return link, nil + } + } + return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)} +} + +// LinkByName finds a link by name and returns a pointer to the object. +func LinkByName(name string) (Link, error) { + return pkgHandle.LinkByName(name) +} + +// LinkByName finds a link by name and returns a pointer to the object. +func (h *Handle) LinkByName(name string) (Link, error) { + if h.lookupByDump { + return h.linkByNameDump(name) + } + + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(msg) + + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + + nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) + if len(name) > 15 { + nameData = nl.NewRtAttr(unix.IFLA_ALT_IFNAME, nl.ZeroTerminated(name)) + } + req.AddData(nameData) + + link, err := execGetLink(req) + if err == unix.EINVAL { + // older kernels don't support looking up via IFLA_IFNAME + // so fall back to dumping all links + h.lookupByDump = true + return h.linkByNameDump(name) + } + + return link, err +} + +// LinkByAlias finds a link by its alias and returns a pointer to the object. +// If there are multiple links with the alias it returns the first one +func LinkByAlias(alias string) (Link, error) { + return pkgHandle.LinkByAlias(alias) +} + +// LinkByAlias finds a link by its alias and returns a pointer to the object. +// If there are multiple links with the alias it returns the first one +func (h *Handle) LinkByAlias(alias string) (Link, error) { + if h.lookupByDump { + return h.linkByAliasDump(alias) + } + + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(msg) + + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + + nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + req.AddData(nameData) + + link, err := execGetLink(req) + if err == unix.EINVAL { + // older kernels don't support looking up via IFLA_IFALIAS + // so fall back to dumping all links + h.lookupByDump = true + return h.linkByAliasDump(alias) + } + + return link, err +} + +// LinkByIndex finds a link by index and returns a pointer to the object. +func LinkByIndex(index int) (Link, error) { + return pkgHandle.LinkByIndex(index) +} + +// LinkByIndex finds a link by index and returns a pointer to the object. +func (h *Handle) LinkByIndex(index int) (Link, error) { + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(index) + req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + + return execGetLink(req) +} + +func execGetLink(req *nl.NetlinkRequest) (Link, error) { + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == unix.ENODEV { + return nil, LinkNotFoundError{fmt.Errorf("Link not found")} + } + } + return nil, err + } + + switch { + case len(msgs) == 0: + return nil, LinkNotFoundError{fmt.Errorf("Link not found")} + + case len(msgs) == 1: + return LinkDeserialize(nil, msgs[0]) + + default: + return nil, fmt.Errorf("More than one link found") + } +} + +// LinkDeserialize deserializes a raw message received from netlink into +// a link object. +func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := NewLinkAttrs() + base.Index = int(msg.Index) + base.RawFlags = msg.Flags + base.Flags = linkFlags(msg.Flags) + base.EncapType = msg.EncapType() + base.NetNsID = -1 + if msg.Flags&unix.IFF_ALLMULTI != 0 { + base.Allmulti = 1 + } + if msg.Flags&unix.IFF_MULTICAST != 0 { + base.Multi = 1 + } + + var ( + link Link + stats32 *LinkStatistics32 + stats64 *LinkStatistics64 + linkType string + linkSlave LinkSlave + slaveType string + ) + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.IFLA_LINKINFO: + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_INFO_KIND: + linkType = string(info.Value[:len(info.Value)-1]) + switch linkType { + case "dummy": + link = &Dummy{} + case "ifb": + link = &Ifb{} + case "bridge": + link = &Bridge{} + case "vlan": + link = &Vlan{} + case "netkit": + link = &Netkit{} + case "veth": + link = &Veth{} + case "wireguard": + link = &Wireguard{} + case "vxlan": + link = &Vxlan{} + case "bond": + link = &Bond{} + case "ipvlan": + link = &IPVlan{} + case "ipvtap": + link = &IPVtap{} + case "macvlan": + link = &Macvlan{} + case "macvtap": + link = &Macvtap{} + case "geneve": + link = &Geneve{} + case "gretap": + link = &Gretap{} + case "ip6gretap": + link = &Gretap{} + case "ipip": + link = &Iptun{} + case "ip6tnl": + link = &Ip6tnl{} + case "sit": + link = &Sittun{} + case "gre": + link = &Gretun{} + case "ip6gre": + link = &Gretun{} + case "vti", "vti6": + link = &Vti{} + case "vrf": + link = &Vrf{} + case "gtp": + link = >P{} + case "xfrm": + link = &Xfrmi{} + case "tun": + link = &Tuntap{} + case "ipoib": + link = &IPoIB{} + case "can": + link = &Can{} + case "bareudp": + link = &BareUDP{} + default: + link = &GenericLink{LinkType: linkType} + } + case nl.IFLA_INFO_DATA: + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + switch linkType { + case "netkit": + parseNetkitData(link, data) + case "vlan": + parseVlanData(link, data) + case "vxlan": + parseVxlanData(link, data) + case "bond": + parseBondData(link, data) + case "ipvlan": + parseIPVlanData(link, data) + case "ipvtap": + parseIPVtapData(link, data) + case "macvlan": + parseMacvlanData(link, data) + case "macvtap": + parseMacvtapData(link, data) + case "geneve": + parseGeneveData(link, data) + case "gretap": + parseGretapData(link, data) + case "ip6gretap": + parseGretapData(link, data) + case "ipip": + parseIptunData(link, data) + case "ip6tnl": + parseIp6tnlData(link, data) + case "sit": + parseSittunData(link, data) + case "gre": + parseGretunData(link, data) + case "ip6gre": + parseGretunData(link, data) + case "vti", "vti6": + parseVtiData(link, data) + case "vrf": + parseVrfData(link, data) + case "bridge": + parseBridgeData(link, data) + case "gtp": + parseGTPData(link, data) + case "xfrm": + parseXfrmiData(link, data) + case "tun": + parseTuntapData(link, data) + case "ipoib": + parseIPoIBData(link, data) + case "can": + parseCanData(link, data) + case "bareudp": + parseBareUDPData(link, data) + } + + case nl.IFLA_INFO_SLAVE_KIND: + slaveType = string(info.Value[:len(info.Value)-1]) + switch slaveType { + case "bond": + linkSlave = &BondSlave{} + case "vrf": + linkSlave = &VrfSlave{} + } + + case nl.IFLA_INFO_SLAVE_DATA: + switch slaveType { + case "bond": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseBondSlaveData(linkSlave, data) + case "vrf": + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + parseVrfSlaveData(linkSlave, data) + } + } + } + case unix.IFLA_ADDRESS: + var nonzero bool + for _, b := range attr.Value { + if b != 0 { + nonzero = true + } + } + if nonzero { + base.HardwareAddr = attr.Value[:] + } + case unix.IFLA_IFNAME: + base.Name = string(attr.Value[:len(attr.Value)-1]) + case unix.IFLA_MTU: + base.MTU = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_PROMISCUITY: + base.Promisc = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_LINK: + base.ParentIndex = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_MASTER: + base.MasterIndex = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_TXQLEN: + base.TxQLen = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_IFALIAS: + base.Alias = string(attr.Value[:len(attr.Value)-1]) + case unix.IFLA_STATS: + stats32 = new(LinkStatistics32) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats32); err != nil { + return nil, err + } + case unix.IFLA_STATS64: + stats64 = new(LinkStatistics64) + if err := binary.Read(bytes.NewBuffer(attr.Value[:]), nl.NativeEndian(), stats64); err != nil { + return nil, err + } + case unix.IFLA_XDP: + xdp, err := parseLinkXdp(attr.Value[:]) + if err != nil { + return nil, err + } + base.Xdp = xdp + case unix.IFLA_PROTINFO | unix.NLA_F_NESTED: + if hdr != nil && hdr.Type == unix.RTM_NEWLINK && + msg.Family == unix.AF_BRIDGE { + attrs, err := nl.ParseRouteAttr(attr.Value[:]) + if err != nil { + return nil, err + } + protinfo := parseProtinfo(attrs) + base.Protinfo = &protinfo + } + case unix.IFLA_PROP_LIST | unix.NLA_F_NESTED: + attrs, err := nl.ParseRouteAttr(attr.Value[:]) + if err != nil { + return nil, err + } + + base.AltNames = []string{} + for _, attr := range attrs { + if attr.Attr.Type == unix.IFLA_ALT_IFNAME { + base.AltNames = append(base.AltNames, nl.BytesToString(attr.Value)) + } + } + case unix.IFLA_OPERSTATE: + base.OperState = LinkOperState(uint8(attr.Value[0])) + case unix.IFLA_PHYS_SWITCH_ID: + base.PhysSwitchID = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_LINK_NETNSID: + base.NetNsID = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_TSO_MAX_SEGS: + base.TSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_TSO_MAX_SIZE: + base.TSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SEGS: + base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_MAX_SIZE: + base.GSOMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_MAX_SIZE: + base.GROMaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GSO_IPV4_MAX_SIZE: + base.GSOIPv4MaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_GRO_IPV4_MAX_SIZE: + base.GROIPv4MaxSize = native.Uint32(attr.Value[0:4]) + case unix.IFLA_VFINFO_LIST: + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + vfs, err := parseVfInfoList(data) + if err != nil { + return nil, err + } + base.Vfs = vfs + case unix.IFLA_NUM_TX_QUEUES: + base.NumTxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_NUM_RX_QUEUES: + base.NumRxQueues = int(native.Uint32(attr.Value[0:4])) + case unix.IFLA_GROUP: + base.Group = native.Uint32(attr.Value[0:4]) + case unix.IFLA_PERM_ADDRESS: + for _, b := range attr.Value { + if b != 0 { + base.PermHWAddr = attr.Value[:] + break + } + } + } + } + + if stats64 != nil { + base.Statistics = (*LinkStatistics)(stats64) + } else if stats32 != nil { + base.Statistics = (*LinkStatistics)(stats32.to64()) + } + + // Links that don't have IFLA_INFO_KIND are hardware devices + if link == nil { + link = &Device{} + } + *link.Attrs() = base + link.Attrs().Slave = linkSlave + + // If the tuntap attributes are not updated by netlink due to + // an older driver, use sysfs + if link != nil && linkType == "tun" { + tuntap := link.(*Tuntap) + + if tuntap.Mode == 0 { + ifname := tuntap.Attrs().Name + if flags, err := readSysPropAsInt64(ifname, "tun_flags"); err == nil { + + if flags&unix.IFF_TUN != 0 { + tuntap.Mode = unix.IFF_TUN + } else if flags&unix.IFF_TAP != 0 { + tuntap.Mode = unix.IFF_TAP + } + + tuntap.NonPersist = false + if flags&unix.IFF_PERSIST == 0 { + tuntap.NonPersist = true + } + } + + // The sysfs interface for owner/group returns -1 for root user, instead of returning 0. + // So explicitly check for negative value, before assigning the owner uid/gid. + if owner, err := readSysPropAsInt64(ifname, "owner"); err == nil && owner > 0 { + tuntap.Owner = uint32(owner) + } + + if group, err := readSysPropAsInt64(ifname, "group"); err == nil && group > 0 { + tuntap.Group = uint32(group) + } + } + } + + return link, nil +} + +func readSysPropAsInt64(ifname, prop string) (int64, error) { + fname := fmt.Sprintf("/sys/class/net/%s/%s", ifname, prop) + contents, err := ioutil.ReadFile(fname) + if err != nil { + return 0, err + } + + num, err := strconv.ParseInt(strings.TrimSpace(string(contents)), 0, 64) + if err == nil { + return num, nil + } + + return 0, err +} + +// LinkList gets a list of link devices. +// Equivalent to: `ip link show` +func LinkList() ([]Link, error) { + return pkgHandle.LinkList() +} + +// LinkList gets a list of link devices. +// Equivalent to: `ip link show` +func (h *Handle) LinkList() ([]Link, error) { + // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need + // to get the message ourselves to parse link type. + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(msg) + attr := nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(nl.RTEXT_FILTER_VF)) + req.AddData(attr) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if err != nil { + return nil, err + } + + var res []Link + for _, m := range msgs { + link, err := LinkDeserialize(nil, m) + if err != nil { + return nil, err + } + res = append(res, link) + } + + return res, nil +} + +// LinkUpdate is used to pass information back from LinkSubscribe() +type LinkUpdate struct { + nl.IfInfomsg + Header unix.NlMsghdr + Link +} + +// LinkSubscribe takes a chan down which notifications will be sent +// when links change. Close the 'done' chan to stop subscription. +func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) +} + +// LinkSubscribeAt works like LinkSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { + return linkSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) +} + +// LinkSubscribeOptions contains a set of options to use with +// LinkSubscribeWithOptions. +type LinkSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval +} + +// LinkSubscribeWithOptions work like LinkSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) +} + +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) + if err != nil { + return err + } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETLINK, + unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(msg) + if err := s.Send(req); err != nil { + return err + } + } + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(fmt.Errorf("Receive failed: %v", + err)) + } + return + } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } + for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) + } + continue + } + ifmsg := nl.DeserializeIfInfomsg(m.Data) + header := unix.NlMsghdr(m.Header) + link, err := LinkDeserialize(&header, m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + continue + } + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} + } + } + }() + + return nil +} + +func LinkSetHairpin(link Link, mode bool) error { + return pkgHandle.LinkSetHairpin(link, mode) +} + +func (h *Handle) LinkSetHairpin(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE) +} + +func LinkSetGuard(link Link, mode bool) error { + return pkgHandle.LinkSetGuard(link, mode) +} + +func (h *Handle) LinkSetGuard(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD) +} + +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return pkgHandle.LinkSetBRSlaveGroupFwdMask(link, mask) +} + +// LinkSetBRSlaveGroupFwdMask set the group_fwd_mask of a bridge slave interface +func (h *Handle) LinkSetBRSlaveGroupFwdMask(link Link, mask uint16) error { + return h.setProtinfoAttrRawVal(link, nl.Uint16Attr(mask), nl.IFLA_BRPORT_GROUP_FWD_MASK) +} + +func LinkSetFastLeave(link Link, mode bool) error { + return pkgHandle.LinkSetFastLeave(link, mode) +} + +func (h *Handle) LinkSetFastLeave(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE) +} + +func LinkSetLearning(link Link, mode bool) error { + return pkgHandle.LinkSetLearning(link, mode) +} + +func (h *Handle) LinkSetLearning(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING) +} + +func LinkSetRootBlock(link Link, mode bool) error { + return pkgHandle.LinkSetRootBlock(link, mode) +} + +func (h *Handle) LinkSetRootBlock(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT) +} + +func LinkSetFlood(link Link, mode bool) error { + return pkgHandle.LinkSetFlood(link, mode) +} + +func (h *Handle) LinkSetFlood(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD) +} + +func LinkSetIsolated(link Link, mode bool) error { + return pkgHandle.LinkSetIsolated(link, mode) +} + +func (h *Handle) LinkSetIsolated(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_ISOLATED) +} + +func LinkSetBrProxyArp(link Link, mode bool) error { + return pkgHandle.LinkSetBrProxyArp(link, mode) +} + +func (h *Handle) LinkSetBrProxyArp(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP) +} + +func LinkSetBrProxyArpWiFi(link Link, mode bool) error { + return pkgHandle.LinkSetBrProxyArpWiFi(link, mode) +} + +func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI) +} + +func LinkSetBrNeighSuppress(link Link, mode bool) error { + return pkgHandle.LinkSetBrNeighSuppress(link, mode) +} + +func (h *Handle) LinkSetBrNeighSuppress(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_NEIGH_SUPPRESS) +} + +func (h *Handle) setProtinfoAttrRawVal(link Link, val []byte, attr int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg.Index = int32(base.Index) + req.AddData(msg) + + br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) + br.AddRtAttr(attr, val) + req.AddData(br) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + if err != nil { + return err + } + return nil +} +func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { + return h.setProtinfoAttrRawVal(link, boolToByte(mode), attr) +} + +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func LinkSetTxQLen(link Link, qlen int) error { + return pkgHandle.LinkSetTxQLen(link, qlen) +} + +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(qlen)) + + data := nl.NewRtAttr(unix.IFLA_TXQLEN, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func LinkSetGroup(link Link, group int) error { + return pkgHandle.LinkSetGroup(link, group) +} + +// LinkSetGroup sets the link group id which can be used to perform mass actions +// with iproute2 as well use it as a reference in nft filters. +// Equivalent to: `ip link set $link group $id` +func (h *Handle) LinkSetGroup(link Link, group int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(group)) + + data := nl.NewRtAttr(unix.IFLA_GROUP, b) + req.AddData(data) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { + if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { + return fmt.Errorf("netkit doesn't support setting Ethernet") + } + + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + // Kernel will return error if trying to change the mode of an existing netkit device + data.AddRtAttr(nl.IFLA_NETKIT_MODE, nl.Uint32Attr(uint32(nk.Mode))) + data.AddRtAttr(nl.IFLA_NETKIT_POLICY, nl.Uint32Attr(uint32(nk.Policy))) + data.AddRtAttr(nl.IFLA_NETKIT_PEER_POLICY, nl.Uint32Attr(uint32(nk.PeerPolicy))) + + if (flag & unix.NLM_F_EXCL) == 0 { + // Modifying peer link attributes will not take effect + return nil + } + + peer := data.AddRtAttr(nl.IFLA_NETKIT_PEER_INFO, nil) + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + if nk.peerLinkAttrs.Flags&net.FlagUp != 0 { + msg.Change = unix.IFF_UP + msg.Flags = unix.IFF_UP + } + if nk.peerLinkAttrs.Index != 0 { + msg.Index = int32(nk.peerLinkAttrs.Index) + } + peer.AddChild(msg) + if nk.peerLinkAttrs.Name != "" { + peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(nk.peerLinkAttrs.Name)) + } + if nk.peerLinkAttrs.MTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(nk.peerLinkAttrs.MTU))) + } + if nk.peerLinkAttrs.GSOMaxSegs > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SEGS, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSegs)) + } + if nk.peerLinkAttrs.GSOMaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOMaxSize)) + } + if nk.peerLinkAttrs.GSOIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GSO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GSOIPv4MaxSize)) + } + if nk.peerLinkAttrs.GROIPv4MaxSize > 0 { + peer.AddRtAttr(unix.IFLA_GRO_IPV4_MAX_SIZE, nl.Uint32Attr(nk.peerLinkAttrs.GROIPv4MaxSize)) + } + if nk.peerLinkAttrs.Namespace != nil { + switch ns := nk.peerLinkAttrs.Namespace.(type) { + case NsPid: + peer.AddRtAttr(unix.IFLA_NET_NS_PID, nl.Uint32Attr(uint32(ns))) + case NsFd: + peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) + } + } + return nil +} + +func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { + netkit := link.(*Netkit) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_NETKIT_PRIMARY: + isPrimary := datum.Value[0:1][0] + if isPrimary != 0 { + netkit.isPrimary = true + } + case nl.IFLA_NETKIT_MODE: + netkit.Mode = NetkitMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_POLICY: + netkit.Policy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + case nl.IFLA_NETKIT_PEER_POLICY: + netkit.PeerPolicy = NetkitPolicy(native.Uint32(datum.Value[0:4])) + } + } +} + +func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { + vlan := link.(*Vlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VLAN_ID: + vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + case nl.IFLA_VLAN_PROTOCOL: + vlan.VlanProtocol = VlanProtocol(int(ntohs(datum.Value[0:2]))) + } + } +} + +func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { + vxlan := link.(*Vxlan) + for _, datum := range data { + // NOTE(vish): Apparently some messages can be sent with no value. + // We special case GBP here to not change existing + // functionality. It appears that GBP sends a datum.Value + // of null. + if len(datum.Value) == 0 && datum.Attr.Type != nl.IFLA_VXLAN_GBP { + continue + } + switch datum.Attr.Type { + case nl.IFLA_VXLAN_ID: + vxlan.VxlanId = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LINK: + vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LOCAL: + vxlan.SrcAddr = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_LOCAL6: + vxlan.SrcAddr = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_GROUP: + vxlan.Group = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_GROUP6: + vxlan.Group = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_TTL: + vxlan.TTL = int(datum.Value[0]) + case nl.IFLA_VXLAN_TOS: + vxlan.TOS = int(datum.Value[0]) + case nl.IFLA_VXLAN_LEARNING: + vxlan.Learning = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_PROXY: + vxlan.Proxy = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_RSC: + vxlan.RSC = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L2MISS: + vxlan.L2miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L3MISS: + vxlan.L3miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_CSUM: + vxlan.UDPCSum = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX: + vxlan.UDP6ZeroCSumTx = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX: + vxlan.UDP6ZeroCSumRx = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_GBP: + vxlan.GBP = true + case nl.IFLA_VXLAN_FLOWBASED: + vxlan.FlowBased = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_AGEING: + vxlan.Age = int(native.Uint32(datum.Value[0:4])) + vxlan.NoAge = vxlan.Age == 0 + case nl.IFLA_VXLAN_LIMIT: + vxlan.Limit = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_PORT: + vxlan.Port = int(ntohs(datum.Value[0:2])) + case nl.IFLA_VXLAN_PORT_RANGE: + buf := bytes.NewBuffer(datum.Value[0:4]) + var pr vxlanPortRange + if binary.Read(buf, binary.BigEndian, &pr) != nil { + vxlan.PortLow = int(pr.Lo) + vxlan.PortHigh = int(pr.Hi) + } + } + } +} + +func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { + bond := link.(*Bond) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_MODE: + bond.Mode = BondMode(data[i].Value[0]) + case nl.IFLA_BOND_ACTIVE_SLAVE: + bond.ActiveSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_MIIMON: + bond.Miimon = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_UPDELAY: + bond.UpDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_DOWNDELAY: + bond.DownDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_USE_CARRIER: + bond.UseCarrier = int(data[i].Value[0]) + case nl.IFLA_BOND_ARP_INTERVAL: + bond.ArpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_IP_TARGET: + bond.ArpIpTargets = parseBondArpIpTargets(data[i].Value) + case nl.IFLA_BOND_ARP_VALIDATE: + bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_ALL_TARGETS: + bond.ArpAllTargets = BondArpAllTargets(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY: + bond.Primary = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY_RESELECT: + bond.PrimaryReselect = BondPrimaryReselect(data[i].Value[0]) + case nl.IFLA_BOND_FAIL_OVER_MAC: + bond.FailOverMac = BondFailOverMac(data[i].Value[0]) + case nl.IFLA_BOND_XMIT_HASH_POLICY: + bond.XmitHashPolicy = BondXmitHashPolicy(data[i].Value[0]) + case nl.IFLA_BOND_RESEND_IGMP: + bond.ResendIgmp = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_NUM_PEER_NOTIF: + bond.NumPeerNotif = int(data[i].Value[0]) + case nl.IFLA_BOND_ALL_SLAVES_ACTIVE: + bond.AllSlavesActive = int(data[i].Value[0]) + case nl.IFLA_BOND_MIN_LINKS: + bond.MinLinks = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_LP_INTERVAL: + bond.LpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PACKETS_PER_SLAVE: + bond.PacketsPerSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_AD_LACP_RATE: + bond.LacpRate = BondLacpRate(data[i].Value[0]) + case nl.IFLA_BOND_AD_SELECT: + bond.AdSelect = BondAdSelect(data[i].Value[0]) + case nl.IFLA_BOND_AD_INFO: + // TODO: implement + case nl.IFLA_BOND_AD_ACTOR_SYS_PRIO: + bond.AdActorSysPrio = int(native.Uint16(data[i].Value[0:2])) + case nl.IFLA_BOND_AD_USER_PORT_KEY: + bond.AdUserPortKey = int(native.Uint16(data[i].Value[0:2])) + case nl.IFLA_BOND_AD_ACTOR_SYSTEM: + bond.AdActorSystem = net.HardwareAddr(data[i].Value[0:6]) + case nl.IFLA_BOND_TLB_DYNAMIC_LB: + bond.TlbDynamicLb = int(data[i].Value[0]) + } + } +} + +func parseBondArpIpTargets(value []byte) []net.IP { + data, err := nl.ParseRouteAttr(value) + if err != nil { + return nil + } + + targets := []net.IP{} + for i := range data { + target := net.IP(data[i].Value) + if ip := target.To4(); ip != nil { + targets = append(targets, ip) + continue + } + if ip := target.To16(); ip != nil { + targets = append(targets, ip) + } + } + + return targets +} + +func addBondSlaveAttrs(bondSlave *BondSlave, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + + data.AddRtAttr(nl.IFLA_BOND_SLAVE_STATE, nl.Uint8Attr(uint8(bondSlave.State))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_MII_STATUS, nl.Uint8Attr(uint8(bondSlave.MiiStatus))) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT, nl.Uint32Attr(bondSlave.LinkFailureCount)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(bondSlave.QueueId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, nl.Uint16Attr(bondSlave.AggregatorId)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, nl.Uint8Attr(bondSlave.AdActorOperPortState)) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, nl.Uint16Attr(bondSlave.AdPartnerOperPortState)) + + if mac := bondSlave.PermHardwareAddr; mac != nil { + data.AddRtAttr(nl.IFLA_BOND_SLAVE_PERM_HWADDR, []byte(mac)) + } +} + +func parseBondSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + bondSlave := slave.(*BondSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + bondSlave.State = BondSlaveState(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_MII_STATUS: + bondSlave.MiiStatus = BondSlaveMiiStatus(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_LINK_FAILURE_COUNT: + bondSlave.LinkFailureCount = native.Uint32(data[i].Value[0:4]) + case nl.IFLA_BOND_SLAVE_PERM_HWADDR: + bondSlave.PermHardwareAddr = net.HardwareAddr(data[i].Value[0:6]) + case nl.IFLA_BOND_SLAVE_QUEUE_ID: + bondSlave.QueueId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_AGGREGATOR_ID: + bondSlave.AggregatorId = native.Uint16(data[i].Value[0:2]) + case nl.IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE: + bondSlave.AdActorOperPortState = uint8(data[i].Value[0]) + case nl.IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE: + bondSlave.AdPartnerOperPortState = native.Uint16(data[i].Value[0:2]) + } + } +} + +func parseVrfSlaveData(slave LinkSlave, data []syscall.NetlinkRouteAttr) { + vrfSlave := slave.(*VrfSlave) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_SLAVE_STATE: + vrfSlave.Table = native.Uint32(data[i].Value[0:4]) + } + } +} + +func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) + } + } +} + +func parseIPVtapData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVtap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPVLAN_MODE: + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + case nl.IFLA_IPVLAN_FLAG: + ipv.Flag = IPVlanFlag(native.Uint32(datum.Value[0:4])) + } + } +} + +func addMacvtapAttrs(macvtap *Macvtap, linkInfo *nl.RtAttr) { + addMacvlanAttrs(&macvtap.Macvlan, linkInfo) +} + +func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvtap) + parseMacvlanData(&macv.Macvlan, data) +} + +func addMacvlanAttrs(macvlan *Macvlan, linkInfo *nl.RtAttr) { + var data *nl.RtAttr + + if macvlan.Mode != MACVLAN_MODE_DEFAULT || macvlan.BCQueueLen > 0 { + data = linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + } + + if macvlan.Mode != MACVLAN_MODE_DEFAULT { + data.AddRtAttr(nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macvlan.Mode])) + } + if macvlan.BCQueueLen > 0 { + data.AddRtAttr(nl.IFLA_MACVLAN_BC_QUEUE_LEN, nl.Uint32Attr(macvlan.BCQueueLen)) + } +} + +func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_MACVLAN_MODE: + switch native.Uint32(datum.Value[0:4]) { + case nl.MACVLAN_MODE_PRIVATE: + macv.Mode = MACVLAN_MODE_PRIVATE + case nl.MACVLAN_MODE_VEPA: + macv.Mode = MACVLAN_MODE_VEPA + case nl.MACVLAN_MODE_BRIDGE: + macv.Mode = MACVLAN_MODE_BRIDGE + case nl.MACVLAN_MODE_PASSTHRU: + macv.Mode = MACVLAN_MODE_PASSTHRU + case nl.MACVLAN_MODE_SOURCE: + macv.Mode = MACVLAN_MODE_SOURCE + } + case nl.IFLA_MACVLAN_MACADDR_COUNT: + macv.MACAddrs = make([]net.HardwareAddr, 0, int(native.Uint32(datum.Value[0:4]))) + case nl.IFLA_MACVLAN_MACADDR_DATA: + macs, err := nl.ParseRouteAttr(datum.Value[:]) + if err != nil { + panic(fmt.Sprintf("failed to ParseRouteAttr for IFLA_MACVLAN_MACADDR_DATA: %v", err)) + } + for _, macDatum := range macs { + macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) + } + case nl.IFLA_MACVLAN_BC_QUEUE_LEN: + macv.BCQueueLen = native.Uint32(datum.Value[0:4]) + case nl.IFLA_MACVLAN_BC_QUEUE_LEN_USED: + macv.UsedBCQueueLen = native.Uint32(datum.Value[0:4]) + } + } +} + +// copied from pkg/net_linux.go +func linkFlags(rawFlags uint32) net.Flags { + var f net.Flags + if rawFlags&unix.IFF_UP != 0 { + f |= net.FlagUp + } + if rawFlags&unix.IFF_BROADCAST != 0 { + f |= net.FlagBroadcast + } + if rawFlags&unix.IFF_LOOPBACK != 0 { + f |= net.FlagLoopback + } + if rawFlags&unix.IFF_POINTOPOINT != 0 { + f |= net.FlagPointToPoint + } + if rawFlags&unix.IFF_MULTICAST != 0 { + f |= net.FlagMulticast + } + return f +} + +func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if geneve.InnerProtoInherit { + data.AddRtAttr(nl.IFLA_GENEVE_INNER_PROTO_INHERIT, []byte{}) + } + + if geneve.FlowBased { + geneve.ID = 0 + data.AddRtAttr(nl.IFLA_GENEVE_COLLECT_METADATA, []byte{}) + } + + if ip := geneve.Remote; ip != nil { + if ip4 := ip.To4(); ip4 != nil { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE, ip.To4()) + } else { + data.AddRtAttr(nl.IFLA_GENEVE_REMOTE6, []byte(ip)) + } + } + + if geneve.ID != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_ID, nl.Uint32Attr(geneve.ID)) + } + + if geneve.Dport != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_PORT, htons(geneve.Dport)) + } + + if geneve.Ttl != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TTL, nl.Uint8Attr(geneve.Ttl)) + } + + if geneve.Tos != 0 { + data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) + } + + data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df))) +} + +func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { + geneve := link.(*Geneve) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GENEVE_ID: + geneve.ID = native.Uint32(datum.Value[0:4]) + case nl.IFLA_GENEVE_REMOTE, nl.IFLA_GENEVE_REMOTE6: + geneve.Remote = datum.Value + case nl.IFLA_GENEVE_PORT: + geneve.Dport = ntohs(datum.Value[0:2]) + case nl.IFLA_GENEVE_TTL: + geneve.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_TOS: + geneve.Tos = uint8(datum.Value[0]) + case nl.IFLA_GENEVE_COLLECT_METADATA: + geneve.FlowBased = true + case nl.IFLA_GENEVE_INNER_PROTO_INHERIT: + geneve.InnerProtoInherit = true + } + } +} + +func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if gretap.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) + return + } + + if ip := gretap.Local; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } + data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip)) + } + + if ip := gretap.Remote; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } + data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gretap.IKey != 0 { + data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gretap.IKey)) + gretap.IFlags |= uint16(nl.GRE_KEY) + } + + if gretap.OKey != 0 { + data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gretap.OKey)) + gretap.OFlags |= uint16(nl.GRE_KEY) + } + + data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags)) + data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags)) + + if gretap.Link != 0 { + data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link)) + } + + data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc)) + data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl)) + data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport)) +} + +func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_OKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_IKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = true + } + } +} + +func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if gre.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) + return + } + + if ip := gre.Local; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } + data.AddRtAttr(nl.IFLA_GRE_LOCAL, []byte(ip)) + } + + if ip := gre.Remote; ip != nil { + if ip.To4() != nil { + ip = ip.To4() + } + data.AddRtAttr(nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gre.IKey != 0 { + data.AddRtAttr(nl.IFLA_GRE_IKEY, htonl(gre.IKey)) + gre.IFlags |= uint16(nl.GRE_KEY) + } + + if gre.OKey != 0 { + data.AddRtAttr(nl.IFLA_GRE_OKEY, htonl(gre.OKey)) + gre.OFlags |= uint16(nl.GRE_KEY) + } + + data.AddRtAttr(nl.IFLA_GRE_IFLAGS, htons(gre.IFlags)) + data.AddRtAttr(nl.IFLA_GRE_OFLAGS, htons(gre.OFlags)) + + if gre.Link != 0 { + data.AddRtAttr(nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link)) + } + + data.AddRtAttr(nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) + data.AddRtAttr(nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) + data.AddRtAttr(nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) + data.AddRtAttr(nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) +} + +func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_IKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_OKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = true + } + } +} + +func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { + attrs := nl.NewRtAttr(unix.IFLA_XDP|unix.NLA_F_NESTED, nil) + b := make([]byte, 4) + native.PutUint32(b, uint32(xdp.Fd)) + attrs.AddRtAttr(nl.IFLA_XDP_FD, b) + if xdp.Flags != 0 { + b := make([]byte, 4) + native.PutUint32(b, xdp.Flags) + attrs.AddRtAttr(nl.IFLA_XDP_FLAGS, b) + } + req.AddData(attrs) +} + +func parseLinkXdp(data []byte) (*LinkXdp, error) { + attrs, err := nl.ParseRouteAttr(data) + if err != nil { + return nil, err + } + xdp := &LinkXdp{} + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.IFLA_XDP_FD: + xdp.Fd = int(native.Uint32(attr.Value[0:4])) + case nl.IFLA_XDP_ATTACHED: + xdp.AttachMode = uint32(attr.Value[0]) + xdp.Attached = xdp.AttachMode != 0 + case nl.IFLA_XDP_FLAGS: + xdp.Flags = native.Uint32(attr.Value[0:4]) + case nl.IFLA_XDP_PROG_ID: + xdp.ProgId = native.Uint32(attr.Value[0:4]) + } + } + return xdp, nil +} + +func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if iptun.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) + return + } + + ip := iptun.Local.To4() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = iptun.Remote.To4() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + if iptun.Link != 0 { + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link)) + } + data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(iptun.Proto)) +} + +func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { + iptun := link.(*Iptun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + iptun.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_REMOTE: + iptun.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_TTL: + iptun.Ttl = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_TOS: + iptun.Tos = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_PMTUDISC: + iptun.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + iptun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + iptun.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_TYPE: + iptun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + iptun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + iptun.FlowBased = true + case nl.IFLA_IPTUN_PROTO: + iptun.Proto = datum.Value[0] + } + } +} + +func addIp6tnlAttrs(ip6tnl *Ip6tnl, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if ip6tnl.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_IPTUN_COLLECT_METADATA, []byte{}) + return + } + + if ip6tnl.Link != 0 { + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(ip6tnl.Link)) + } + + ip := ip6tnl.Local.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = ip6tnl.Remote.To16() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(ip6tnl.Ttl)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(ip6tnl.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_FLAGS, nl.Uint32Attr(ip6tnl.Flags)) + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(ip6tnl.Proto)) + data.AddRtAttr(nl.IFLA_IPTUN_FLOWINFO, nl.Uint32Attr(ip6tnl.FlowInfo)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(ip6tnl.EncapLimit)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(ip6tnl.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(ip6tnl.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(ip6tnl.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(ip6tnl.EncapDport)) +} + +func parseIp6tnlData(link Link, data []syscall.NetlinkRouteAttr) { + ip6tnl := link.(*Ip6tnl) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + ip6tnl.Local = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_REMOTE: + ip6tnl.Remote = net.IP(datum.Value[:16]) + case nl.IFLA_IPTUN_TTL: + ip6tnl.Ttl = datum.Value[0] + case nl.IFLA_IPTUN_TOS: + ip6tnl.Tos = datum.Value[0] + case nl.IFLA_IPTUN_FLAGS: + ip6tnl.Flags = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_PROTO: + ip6tnl.Proto = datum.Value[0] + case nl.IFLA_IPTUN_FLOWINFO: + ip6tnl.FlowInfo = native.Uint32(datum.Value[:4]) + case nl.IFLA_IPTUN_ENCAP_LIMIT: + ip6tnl.EncapLimit = datum.Value[0] + case nl.IFLA_IPTUN_ENCAP_TYPE: + ip6tnl.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + ip6tnl.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + ip6tnl.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + ip6tnl.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_COLLECT_METADATA: + ip6tnl.FlowBased = true + } + } +} + +func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + if sittun.Link != 0 { + data.AddRtAttr(nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) + } + + ip := sittun.Local.To4() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = sittun.Remote.To4() + if ip != nil { + data.AddRtAttr(nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + if sittun.Ttl > 0 { + // Would otherwise fail on 3.10 kernel + data.AddRtAttr(nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) + } + + data.AddRtAttr(nl.IFLA_IPTUN_PROTO, nl.Uint8Attr(sittun.Proto)) + data.AddRtAttr(nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) + data.AddRtAttr(nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_LIMIT, nl.Uint8Attr(sittun.EncapLimit)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) + data.AddRtAttr(nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) +} + +func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { + sittun := link.(*Sittun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + sittun.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_REMOTE: + sittun.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_TTL: + sittun.Ttl = datum.Value[0] + case nl.IFLA_IPTUN_TOS: + sittun.Tos = datum.Value[0] + case nl.IFLA_IPTUN_PMTUDISC: + sittun.PMtuDisc = datum.Value[0] + case nl.IFLA_IPTUN_PROTO: + sittun.Proto = datum.Value[0] + case nl.IFLA_IPTUN_ENCAP_TYPE: + sittun.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_FLAGS: + sittun.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_SPORT: + sittun.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_IPTUN_ENCAP_DPORT: + sittun.EncapDport = ntohs(datum.Value[0:2]) + } + } +} + +func addVtiAttrs(vti *Vti, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + family := FAMILY_V4 + if vti.Local.To4() == nil { + family = FAMILY_V6 + } + + var ip net.IP + + if family == FAMILY_V4 { + ip = vti.Local.To4() + } else { + ip = vti.Local + } + if ip != nil { + data.AddRtAttr(nl.IFLA_VTI_LOCAL, []byte(ip)) + } + + if family == FAMILY_V4 { + ip = vti.Remote.To4() + } else { + ip = vti.Remote + } + if ip != nil { + data.AddRtAttr(nl.IFLA_VTI_REMOTE, []byte(ip)) + } + + if vti.Link != 0 { + data.AddRtAttr(nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link)) + } + + data.AddRtAttr(nl.IFLA_VTI_IKEY, htonl(vti.IKey)) + data.AddRtAttr(nl.IFLA_VTI_OKEY, htonl(vti.OKey)) +} + +func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) { + vti := link.(*Vti) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VTI_LOCAL: + vti.Local = net.IP(datum.Value) + case nl.IFLA_VTI_REMOTE: + vti.Remote = net.IP(datum.Value) + case nl.IFLA_VTI_IKEY: + vti.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_VTI_OKEY: + vti.OKey = ntohl(datum.Value[0:4]) + } + } +} + +func addVrfAttrs(vrf *Vrf, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + b := make([]byte, 4) + native.PutUint32(b, uint32(vrf.Table)) + data.AddRtAttr(nl.IFLA_VRF_TABLE, b) +} + +func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) { + vrf := link.(*Vrf) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VRF_TABLE: + vrf.Table = native.Uint32(datum.Value[0:4]) + } + } +} + +func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if bridge.MulticastSnooping != nil { + data.AddRtAttr(nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) + } + if bridge.AgeingTime != nil { + data.AddRtAttr(nl.IFLA_BR_AGEING_TIME, nl.Uint32Attr(*bridge.AgeingTime)) + } + if bridge.HelloTime != nil { + data.AddRtAttr(nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) + } + if bridge.VlanFiltering != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering)) + } + if bridge.VlanDefaultPVID != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_DEFAULT_PVID, nl.Uint16Attr(*bridge.VlanDefaultPVID)) + } + if bridge.GroupFwdMask != nil { + data.AddRtAttr(nl.IFLA_BR_GROUP_FWD_MASK, nl.Uint16Attr(*bridge.GroupFwdMask)) + } +} + +func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { + br := bridge.(*Bridge) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_BR_AGEING_TIME: + ageingTime := native.Uint32(datum.Value[0:4]) + br.AgeingTime = &ageingTime + case nl.IFLA_BR_HELLO_TIME: + helloTime := native.Uint32(datum.Value[0:4]) + br.HelloTime = &helloTime + case nl.IFLA_BR_MCAST_SNOOPING: + mcastSnooping := datum.Value[0] == 1 + br.MulticastSnooping = &mcastSnooping + case nl.IFLA_BR_VLAN_FILTERING: + vlanFiltering := datum.Value[0] == 1 + br.VlanFiltering = &vlanFiltering + case nl.IFLA_BR_VLAN_DEFAULT_PVID: + vlanDefaultPVID := native.Uint16(datum.Value[0:2]) + br.VlanDefaultPVID = &vlanDefaultPVID + case nl.IFLA_BR_GROUP_FWD_MASK: + mask := native.Uint16(datum.Value[0:2]) + br.GroupFwdMask = &mask + } + } +} + +func addGTPAttrs(gtp *GTP, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0))) + data.AddRtAttr(nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1))) + data.AddRtAttr(nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072)) + if gtp.Role != nl.GTP_ROLE_GGSN { + data.AddRtAttr(nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role))) + } +} + +func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) { + gtp := link.(*GTP) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GTP_FD0: + gtp.FD0 = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_FD1: + gtp.FD1 = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_PDP_HASHSIZE: + gtp.PDPHashsize = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_ROLE: + gtp.Role = int(native.Uint32(datum.Value)) + } + } +} + +func parseVfInfoList(data []syscall.NetlinkRouteAttr) ([]VfInfo, error) { + var vfs []VfInfo + + for i, element := range data { + if element.Attr.Type != nl.IFLA_VF_INFO { + return nil, fmt.Errorf("Incorrect element type in vf info list: %d", element.Attr.Type) + } + vfAttrs, err := nl.ParseRouteAttr(element.Value) + if err != nil { + return nil, err + } + + vf, err := parseVfInfo(vfAttrs, i) + if err != nil { + return nil, err + } + vfs = append(vfs, vf) + } + return vfs, nil +} + +func parseVfInfo(data []syscall.NetlinkRouteAttr, id int) (VfInfo, error) { + vf := VfInfo{ID: id} + for _, element := range data { + switch element.Attr.Type { + case nl.IFLA_VF_MAC: + mac := nl.DeserializeVfMac(element.Value[:]) + vf.Mac = mac.Mac[:6] + case nl.IFLA_VF_VLAN: + vl := nl.DeserializeVfVlan(element.Value[:]) + vf.Vlan = int(vl.Vlan) + vf.Qos = int(vl.Qos) + case nl.IFLA_VF_VLAN_LIST: + vfVlanInfoList, err := nl.DeserializeVfVlanList(element.Value[:]) + if err != nil { + return vf, err + } + vf.VlanProto = int(vfVlanInfoList[0].VlanProto) + case nl.IFLA_VF_TX_RATE: + txr := nl.DeserializeVfTxRate(element.Value[:]) + vf.TxRate = int(txr.Rate) + case nl.IFLA_VF_SPOOFCHK: + sp := nl.DeserializeVfSpoofchk(element.Value[:]) + vf.Spoofchk = sp.Setting != 0 + case nl.IFLA_VF_LINK_STATE: + ls := nl.DeserializeVfLinkState(element.Value[:]) + vf.LinkState = ls.LinkState + case nl.IFLA_VF_RATE: + vfr := nl.DeserializeVfRate(element.Value[:]) + vf.MaxTxRate = vfr.MaxTxRate + vf.MinTxRate = vfr.MinTxRate + case nl.IFLA_VF_STATS: + vfstats := nl.DeserializeVfStats(element.Value[:]) + vf.RxPackets = vfstats.RxPackets + vf.TxPackets = vfstats.TxPackets + vf.RxBytes = vfstats.RxBytes + vf.TxBytes = vfstats.TxBytes + vf.Multicast = vfstats.Multicast + vf.Broadcast = vfstats.Broadcast + vf.RxDropped = vfstats.RxDropped + vf.TxDropped = vfstats.TxDropped + + case nl.IFLA_VF_RSS_QUERY_EN: + result := nl.DeserializeVfRssQueryEn(element.Value) + vf.RssQuery = result.Setting + + case nl.IFLA_VF_TRUST: + result := nl.DeserializeVfTrust(element.Value) + vf.Trust = result.Setting + } + } + return vf, nil +} + +func addXfrmiAttrs(xfrmi *Xfrmi, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_XFRM_LINK, nl.Uint32Attr(uint32(xfrmi.ParentIndex))) + if xfrmi.Ifid != 0 { + data.AddRtAttr(nl.IFLA_XFRM_IF_ID, nl.Uint32Attr(xfrmi.Ifid)) + } +} + +func parseXfrmiData(link Link, data []syscall.NetlinkRouteAttr) { + xfrmi := link.(*Xfrmi) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_XFRM_LINK: + xfrmi.ParentIndex = int(native.Uint32(datum.Value)) + case nl.IFLA_XFRM_IF_ID: + xfrmi.Ifid = native.Uint32(datum.Value) + } + } +} + +func ioctlBondSlave(cmd uintptr, link Link, master *Bond) error { + fd, err := getSocketUDP() + if err != nil { + return err + } + defer syscall.Close(fd) + + ifreq := newIocltSlaveReq(link.Attrs().Name, master.Attrs().Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), cmd, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return fmt.Errorf("errno=%v", errno) + } + return nil +} + +// LinkSetBondSlaveActive sets specified slave to ACTIVE in an `active-backup` bond link via ioctl interface. +// +// Multiple calls keeps the status unchanged(shown in the unit test). +func LinkSetBondSlaveActive(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDCHANGEACTIVE, link, master) + if err != nil { + return fmt.Errorf("Failed to set slave %q active in %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave add slave to bond link via ioctl interface. +func LinkSetBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDENSLAVE, link, master) + if err != nil { + return fmt.Errorf("Failed to enslave %q to %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlave removes specified slave from bond link via ioctl interface. +func LinkDelBondSlave(link Link, master *Bond) error { + err := ioctlBondSlave(unix.SIOCBONDRELEASE, link, master) + if err != nil { + return fmt.Errorf("Failed to del slave %q from %q, %v", link.Attrs().Name, master.Attrs().Name, err) + } + return nil +} + +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func (h *Handle) LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + data := linkInfo.AddRtAttr(nl.IFLA_INFO_SLAVE_DATA, nil) + data.AddRtAttr(nl.IFLA_BOND_SLAVE_QUEUE_ID, nl.Uint16Attr(queueId)) + + req.AddData(linkInfo) + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// LinkSetBondSlaveQueueId modify bond slave queue-id. +func LinkSetBondSlaveQueueId(link Link, queueId uint16) error { + return pkgHandle.LinkSetBondSlaveQueueId(link, queueId) +} + +func vethStatsSerialize(stats ethtoolStats) ([]byte, error) { + statsSize := int(unsafe.Sizeof(stats)) + int(stats.nStats)*int(unsafe.Sizeof(uint64(0))) + b := make([]byte, 0, statsSize) + buf := bytes.NewBuffer(b) + err := binary.Write(buf, nl.NativeEndian(), stats) + return buf.Bytes()[:statsSize], err +} + +type vethEthtoolStats struct { + Cmd uint32 + NStats uint32 + Peer uint64 + // Newer kernels have XDP stats in here, but we only care + // to extract the peer ifindex here. +} + +func vethStatsDeserialize(b []byte) (vethEthtoolStats, error) { + var stats = vethEthtoolStats{} + err := binary.Read(bytes.NewReader(b), nl.NativeEndian(), &stats) + return stats, err +} + +// VethPeerIndex get veth peer index. +func VethPeerIndex(link *Veth) (int, error) { + fd, err := getSocketUDP() + if err != nil { + return -1, err + } + defer syscall.Close(fd) + + ifreq, sSet := newIocltStringSetReq(link.Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + } + + stats := ethtoolStats{ + cmd: ETHTOOL_GSTATS, + nStats: sSet.data[0], + } + + buffer, err := vethStatsSerialize(stats) + if err != nil { + return -1, err + } + + ifreq.Data = uintptr(unsafe.Pointer(&buffer[0])) + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), SIOCETHTOOL, uintptr(unsafe.Pointer(ifreq))) + if errno != 0 { + return -1, fmt.Errorf("SIOCETHTOOL request for %q failed, errno=%v", link.Attrs().Name, errno) + } + + vstats, err := vethStatsDeserialize(buffer) + if err != nil { + return -1, err + } + + return int(vstats.Peer), nil +} + +func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) { + tuntap := link.(*Tuntap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_TUN_OWNER: + tuntap.Owner = native.Uint32(datum.Value) + case nl.IFLA_TUN_GROUP: + tuntap.Group = native.Uint32(datum.Value) + case nl.IFLA_TUN_TYPE: + tuntap.Mode = TuntapMode(uint8(datum.Value[0])) + case nl.IFLA_TUN_PERSIST: + tuntap.NonPersist = false + if uint8(datum.Value[0]) == 0 { + tuntap.NonPersist = true + } + } + } +} + +func parseIPoIBData(link Link, data []syscall.NetlinkRouteAttr) { + ipoib := link.(*IPoIB) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPOIB_PKEY: + ipoib.Pkey = uint16(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_MODE: + ipoib.Mode = IPoIBMode(native.Uint16(datum.Value)) + case nl.IFLA_IPOIB_UMCAST: + ipoib.Umcast = uint16(native.Uint16(datum.Value)) + } + } +} + +func parseCanData(link Link, data []syscall.NetlinkRouteAttr) { + can := link.(*Can) + for _, datum := range data { + + switch datum.Attr.Type { + case nl.IFLA_CAN_BITTIMING: + can.BitRate = native.Uint32(datum.Value) + can.SamplePoint = native.Uint32(datum.Value[4:]) + can.TimeQuanta = native.Uint32(datum.Value[8:]) + can.PropagationSegment = native.Uint32(datum.Value[12:]) + can.PhaseSegment1 = native.Uint32(datum.Value[16:]) + can.PhaseSegment2 = native.Uint32(datum.Value[20:]) + can.SyncJumpWidth = native.Uint32(datum.Value[24:]) + can.BitRatePreScaler = native.Uint32(datum.Value[28:]) + case nl.IFLA_CAN_BITTIMING_CONST: + can.Name = string(datum.Value[:16]) + can.TimeSegment1Min = native.Uint32(datum.Value[16:]) + can.TimeSegment1Max = native.Uint32(datum.Value[20:]) + can.TimeSegment2Min = native.Uint32(datum.Value[24:]) + can.TimeSegment2Max = native.Uint32(datum.Value[28:]) + can.SyncJumpWidthMax = native.Uint32(datum.Value[32:]) + can.BitRatePreScalerMin = native.Uint32(datum.Value[36:]) + can.BitRatePreScalerMax = native.Uint32(datum.Value[40:]) + can.BitRatePreScalerInc = native.Uint32(datum.Value[44:]) + case nl.IFLA_CAN_CLOCK: + can.ClockFrequency = native.Uint32(datum.Value) + case nl.IFLA_CAN_STATE: + can.State = native.Uint32(datum.Value) + case nl.IFLA_CAN_CTRLMODE: + can.Mask = native.Uint32(datum.Value) + can.Flags = native.Uint32(datum.Value[4:]) + case nl.IFLA_CAN_BERR_COUNTER: + can.TxError = native.Uint16(datum.Value) + can.RxError = native.Uint16(datum.Value[2:]) + case nl.IFLA_CAN_RESTART_MS: + can.RestartMs = native.Uint32(datum.Value) + case nl.IFLA_CAN_DATA_BITTIMING_CONST: + case nl.IFLA_CAN_RESTART: + case nl.IFLA_CAN_DATA_BITTIMING: + case nl.IFLA_CAN_TERMINATION: + case nl.IFLA_CAN_TERMINATION_CONST: + case nl.IFLA_CAN_BITRATE_CONST: + case nl.IFLA_CAN_DATA_BITRATE_CONST: + case nl.IFLA_CAN_BITRATE_MAX: + } + } +} + +func addIPoIBAttrs(ipoib *IPoIB, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + data.AddRtAttr(nl.IFLA_IPOIB_PKEY, nl.Uint16Attr(uint16(ipoib.Pkey))) + data.AddRtAttr(nl.IFLA_IPOIB_MODE, nl.Uint16Attr(uint16(ipoib.Mode))) + data.AddRtAttr(nl.IFLA_IPOIB_UMCAST, nl.Uint16Attr(uint16(ipoib.Umcast))) +} + +func addBareUDPAttrs(bareudp *BareUDP, linkInfo *nl.RtAttr) { + data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + + data.AddRtAttr(nl.IFLA_BAREUDP_PORT, nl.Uint16Attr(nl.Swap16(bareudp.Port))) + data.AddRtAttr(nl.IFLA_BAREUDP_ETHERTYPE, nl.Uint16Attr(nl.Swap16(bareudp.EtherType))) + if bareudp.SrcPortMin != 0 { + data.AddRtAttr(nl.IFLA_BAREUDP_SRCPORT_MIN, nl.Uint16Attr(bareudp.SrcPortMin)) + } + if bareudp.MultiProto { + data.AddRtAttr(nl.IFLA_BAREUDP_MULTIPROTO_MODE, []byte{}) + } +} + +func parseBareUDPData(link Link, data []syscall.NetlinkRouteAttr) { + bareudp := link.(*BareUDP) + for _, attr := range data { + switch attr.Attr.Type { + case nl.IFLA_BAREUDP_PORT: + bareudp.Port = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_ETHERTYPE: + bareudp.EtherType = binary.BigEndian.Uint16(attr.Value) + case nl.IFLA_BAREUDP_SRCPORT_MIN: + bareudp.SrcPortMin = native.Uint16(attr.Value) + case nl.IFLA_BAREUDP_MULTIPROTO_MODE: + bareudp.MultiProto = true + } + } +} diff --git a/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go new file mode 100644 index 000000000..310bd33d8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go @@ -0,0 +1,14 @@ +package netlink + +// ideally golang.org/x/sys/unix would define IfReq but it only has +// IFNAMSIZ, hence this minimalistic implementation +const ( + SizeOfIfReq = 40 + IFNAMSIZ = 16 +) + +type ifReq struct { + Name [IFNAMSIZ]byte + Flags uint16 + pad [SizeOfIfReq - IFNAMSIZ - 2]byte +} diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go new file mode 100644 index 000000000..32d722e88 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -0,0 +1,33 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Neigh represents a link layer neighbor from netlink. +type Neigh struct { + LinkIndex int + Family int + State int + Type int + Flags int + FlagsExt int + IP net.IP + HardwareAddr net.HardwareAddr + LLIPAddr net.IP //Used in the case of NHRP + Vlan int + VNI int + MasterIndex int +} + +// String returns $ip/$hwaddr $label +func (neigh *Neigh) String() string { + return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr) +} + +// NeighUpdate is sent when a neighbor changes - type is RTM_NEWNEIGH or RTM_DELNEIGH. +type NeighUpdate struct { + Type uint16 + Neigh +} diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go new file mode 100644 index 000000000..2d93044a6 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -0,0 +1,469 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const ( + NDA_UNSPEC = iota + NDA_DST + NDA_LLADDR + NDA_CACHEINFO + NDA_PROBES + NDA_VLAN + NDA_PORT + NDA_VNI + NDA_IFINDEX + NDA_MASTER + NDA_LINK_NETNSID + NDA_SRC_VNI + NDA_PROTOCOL + NDA_NH_ID + NDA_FDB_EXT_ATTRS + NDA_FLAGS_EXT + NDA_MAX = NDA_FLAGS_EXT +) + +// Neighbor Cache Entry States. +const ( + NUD_NONE = 0x00 + NUD_INCOMPLETE = 0x01 + NUD_REACHABLE = 0x02 + NUD_STALE = 0x04 + NUD_DELAY = 0x08 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 +) + +// Neighbor Flags +const ( + NTF_USE = 0x01 + NTF_SELF = 0x02 + NTF_MASTER = 0x04 + NTF_PROXY = 0x08 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_STICKY = 0x40 + NTF_ROUTER = 0x80 +) + +// Extended Neighbor Flags +const ( + NTF_EXT_MANAGED = 0x00000001 +) + +// Ndmsg is for adding, removing or receiving information about a neighbor table entry +type Ndmsg struct { + Family uint8 + Index uint32 + State uint16 + Flags uint8 + Type uint8 +} + +func deserializeNdmsg(b []byte) *Ndmsg { + var dummy Ndmsg + return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0])) +} + +func (msg *Ndmsg) Serialize() []byte { + return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *Ndmsg) Len() int { + return int(unsafe.Sizeof(*msg)) +} + +// NeighAdd will add an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh add ....` +func NeighAdd(neigh *Neigh) error { + return pkgHandle.NeighAdd(neigh) +} + +// NeighAdd will add an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh add ....` +func (h *Handle) NeighAdd(neigh *Neigh) error { + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_EXCL) +} + +// NeighSet will add or replace an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh replace....` +func NeighSet(neigh *Neigh) error { + return pkgHandle.NeighSet(neigh) +} + +// NeighSet will add or replace an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh replace....` +func (h *Handle) NeighSet(neigh *Neigh) error { + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_REPLACE) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func NeighAppend(neigh *Neigh) error { + return pkgHandle.NeighAppend(neigh) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func (h *Handle) NeighAppend(neigh *Neigh) error { + return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_APPEND) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func neighAdd(neigh *Neigh, mode int) error { + return pkgHandle.neighAdd(neigh, mode) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func (h *Handle) neighAdd(neigh *Neigh, mode int) error { + req := h.newNetlinkRequest(unix.RTM_NEWNEIGH, mode|unix.NLM_F_ACK) + return neighHandle(neigh, req) +} + +// NeighDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func NeighDel(neigh *Neigh) error { + return pkgHandle.NeighDel(neigh) +} + +// NeighDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func (h *Handle) NeighDel(neigh *Neigh) error { + req := h.newNetlinkRequest(unix.RTM_DELNEIGH, unix.NLM_F_ACK) + return neighHandle(neigh, req) +} + +func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { + var family int + + if neigh.Family > 0 { + family = neigh.Family + } else { + family = nl.GetIPFamily(neigh.IP) + } + + msg := Ndmsg{ + Family: uint8(family), + Index: uint32(neigh.LinkIndex), + State: uint16(neigh.State), + Type: uint8(neigh.Type), + Flags: uint8(neigh.Flags), + } + req.AddData(&msg) + + ipData := neigh.IP.To4() + if ipData == nil { + ipData = neigh.IP.To16() + } + + dstData := nl.NewRtAttr(NDA_DST, ipData) + req.AddData(dstData) + + if neigh.LLIPAddr != nil { + llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4()) + req.AddData(llIPData) + } else if neigh.HardwareAddr != nil { + hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) + req.AddData(hwData) + } + + if neigh.FlagsExt != 0 { + flagsExtData := nl.NewRtAttr(NDA_FLAGS_EXT, nl.Uint32Attr(uint32(neigh.FlagsExt))) + req.AddData(flagsExtData) + } + + if neigh.Vlan != 0 { + vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) + req.AddData(vlanData) + } + + if neigh.VNI != 0 { + vniData := nl.NewRtAttr(NDA_VNI, nl.Uint32Attr(uint32(neigh.VNI))) + req.AddData(vniData) + } + + if neigh.MasterIndex != 0 { + masterData := nl.NewRtAttr(NDA_MASTER, nl.Uint32Attr(uint32(neigh.MasterIndex))) + req.AddData(masterData) + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// NeighList returns a list of IP-MAC mappings in the system (ARP table). +// Equivalent to: `ip neighbor show`. +// The list can be filtered by link and ip family. +func NeighList(linkIndex, family int) ([]Neigh, error) { + return pkgHandle.NeighList(linkIndex, family) +} + +// NeighProxyList returns a list of neighbor proxies in the system. +// Equivalent to: `ip neighbor show proxy`. +// The list can be filtered by link and ip family. +func NeighProxyList(linkIndex, family int) ([]Neigh, error) { + return pkgHandle.NeighProxyList(linkIndex, family) +} + +// NeighList returns a list of IP-MAC mappings in the system (ARP table). +// Equivalent to: `ip neighbor show`. +// The list can be filtered by link and ip family. +func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + }) +} + +// NeighProxyList returns a list of neighbor proxies in the system. +// Equivalent to: `ip neighbor show proxy`. +// The list can be filtered by link, ip family. +func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { + return h.NeighListExecute(Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + Flags: NTF_PROXY, + }) +} + +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func NeighListExecute(msg Ndmsg) ([]Neigh, error) { + return pkgHandle.NeighListExecute(msg) +} + +// NeighListExecute returns a list of neighbour entries filtered by link, ip family, flag and state. +func (h *Handle) NeighListExecute(msg Ndmsg) ([]Neigh, error) { + req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + req.AddData(&msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) + if err != nil { + return nil, err + } + + var res []Neigh + for _, m := range msgs { + ndm := deserializeNdmsg(m) + if msg.Index != 0 && ndm.Index != msg.Index { + // Ignore messages from other interfaces + continue + } + if msg.Family != 0 && ndm.Family != msg.Family { + continue + } + if msg.State != 0 && ndm.State != msg.State { + continue + } + if msg.Type != 0 && ndm.Type != msg.Type { + continue + } + if msg.Flags != 0 && ndm.Flags != msg.Flags { + continue + } + + neigh, err := NeighDeserialize(m) + if err != nil { + continue + } + + res = append(res, *neigh) + } + + return res, nil +} + +func NeighDeserialize(m []byte) (*Neigh, error) { + msg := deserializeNdmsg(m) + + neigh := Neigh{ + LinkIndex: int(msg.Index), + Family: int(msg.Family), + State: int(msg.State), + Type: int(msg.Type), + Flags: int(msg.Flags), + } + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case NDA_DST: + neigh.IP = net.IP(attr.Value) + case NDA_LLADDR: + // BUG: Is this a bug in the netlink library? + // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) + // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) + attrLen := attr.Attr.Len - unix.SizeofRtAttr + if attrLen == 4 { + neigh.LLIPAddr = net.IP(attr.Value) + } else if attrLen == 16 { + // Can be IPv6 or FireWire HWAddr + link, err := LinkByIndex(neigh.LinkIndex) + if err == nil && link.Attrs().EncapType == "tunnel6" { + neigh.IP = net.IP(attr.Value) + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } + case NDA_FLAGS_EXT: + neigh.FlagsExt = int(native.Uint32(attr.Value[0:4])) + case NDA_VLAN: + neigh.Vlan = int(native.Uint16(attr.Value[0:2])) + case NDA_VNI: + neigh.VNI = int(native.Uint32(attr.Value[0:4])) + case NDA_MASTER: + neigh.MasterIndex = int(native.Uint32(attr.Value[0:4])) + } + } + + return &neigh, nil +} + +// NeighSubscribe takes a chan down which notifications will be sent +// when neighbors are added or deleted. Close the 'done' chan to stop subscription. +func NeighSubscribe(ch chan<- NeighUpdate, done <-chan struct{}) error { + return neighSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) +} + +// NeighSubscribeAt works like NeighSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func NeighSubscribeAt(ns netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}) error { + return neighSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) +} + +// NeighSubscribeOptions contains a set of options to use with +// NeighSubscribeWithOptions. +type NeighSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + + // max size is based on value of /proc/sys/net/core/rmem_max + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval +} + +// NeighSubscribeWithOptions work like NeighSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, options NeighSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return neighSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) +} + +func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) + makeRequest := func(family int) error { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + ndmsg := &Ndmsg{Family: uint8(family)} + req.AddData(ndmsg) + if err := s.Send(req); err != nil { + return err + } + return nil + } + if err != nil { + return err + } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + if listExisting { + if err := makeRequest(unix.AF_UNSPEC); err != nil { + return err + } + // We have to wait for NLMSG_DONE before making AF_BRIDGE request + } + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } + for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + if listExisting { + // This will be called after handling AF_UNSPEC + // list request, we have to wait for NLMSG_DONE + // before making another request + if err := makeRequest(unix.AF_BRIDGE); err != nil { + if cberr != nil { + cberr(err) + } + return + } + listExisting = false + } + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + nError := int32(native.Uint32(m.Data[0:4])) + if nError == 0 { + continue + } + if cberr != nil { + cberr(syscall.Errno(-nError)) + } + return + } + neigh, err := NeighDeserialize(m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + ch <- NeighUpdate{Type: m.Header.Type, Neigh: *neigh} + } + } + }() + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/netlink.go b/vendor/github.com/vishvananda/netlink/netlink.go new file mode 100644 index 000000000..9cb685dc8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink.go @@ -0,0 +1,40 @@ +// Package netlink provides a simple library for netlink. Netlink is +// the interface a user-space program in linux uses to communicate with +// the kernel. It can be used to add and remove interfaces, set up ip +// addresses and routes, and confiugre ipsec. Netlink communication +// requires elevated privileges, so in most cases this code needs to +// be run as root. The low level primitives for netlink are contained +// in the nl subpackage. This package attempts to provide a high-level +// interface that is loosly modeled on the iproute2 cli. +package netlink + +import ( + "errors" + "net" +) + +var ( + // ErrNotImplemented is returned when a requested feature is not implemented. + ErrNotImplemented = errors.New("not implemented") +) + +// ParseIPNet parses a string in ip/net format and returns a net.IPNet. +// This is valuable because addresses in netlink are often IPNets and +// ParseCIDR returns an IPNet with the IP part set to the base IP of the +// range. +func ParseIPNet(s string) (*net.IPNet, error) { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + ipNet.IP = ip + return ipNet, nil +} + +// NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128. +func NewIPNet(ip net.IP) *net.IPNet { + if ip.To4() != nil { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(128, 128)} +} diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go new file mode 100644 index 000000000..a20d293d8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go @@ -0,0 +1,11 @@ +package netlink + +import "github.com/vishvananda/netlink/nl" + +// Family type definitions +const ( + FAMILY_ALL = nl.FAMILY_ALL + FAMILY_V4 = nl.FAMILY_V4 + FAMILY_V6 = nl.FAMILY_V6 + FAMILY_MPLS = nl.FAMILY_MPLS +) diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go new file mode 100644 index 000000000..da12c42a5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -0,0 +1,289 @@ +// +build !linux + +package netlink + +import "net" + +func LinkSetUp(link Link) error { + return ErrNotImplemented +} + +func LinkSetDown(link Link) error { + return ErrNotImplemented +} + +func LinkSetMTU(link Link, mtu int) error { + return ErrNotImplemented +} + +func LinkSetMaster(link Link, master Link) error { + return ErrNotImplemented +} + +func LinkSetNsPid(link Link, nspid int) error { + return ErrNotImplemented +} + +func LinkSetNsFd(link Link, fd int) error { + return ErrNotImplemented +} + +func LinkSetName(link Link, name string) error { + return ErrNotImplemented +} + +func LinkSetAlias(link Link, name string) error { + return ErrNotImplemented +} + +func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { + return ErrNotImplemented +} + +func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { + return ErrNotImplemented +} + +func LinkSetVfVlan(link Link, vf, vlan int) error { + return ErrNotImplemented +} + +func LinkSetVfVlanQos(link Link, vf, vlan, qos int) error { + return ErrNotImplemented +} + +func LinkSetVfVlanQosProto(link Link, vf, vlan, qos, proto int) error { + return ErrNotImplemented +} + +func LinkSetVfTxRate(link Link, vf, rate int) error { + return ErrNotImplemented +} + +func LinkSetVfRate(link Link, vf, minRate, maxRate int) error { + return ErrNotImplemented +} + +func LinkSetNoMaster(link Link) error { + return ErrNotImplemented +} + +func LinkSetMasterByIndex(link Link, masterIndex int) error { + return ErrNotImplemented +} + +func LinkSetXdpFd(link Link, fd int) error { + return ErrNotImplemented +} + +func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { + return ErrNotImplemented +} + +func LinkSetARPOff(link Link) error { + return ErrNotImplemented +} + +func LinkSetARPOn(link Link) error { + return ErrNotImplemented +} + +func LinkByName(name string) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkByAlias(alias string) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkByIndex(index int) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkSetHairpin(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetGuard(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetFastLeave(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetLearning(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetRootBlock(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetFlood(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetTxQLen(link Link, qlen int) error { + return ErrNotImplemented +} + +func LinkSetGSOMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROMaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGSOIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { + return ErrNotImplemented +} + +func LinkAdd(link Link) error { + return ErrNotImplemented +} + +func LinkDel(link Link) error { + return ErrNotImplemented +} + +func SetHairpin(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetGuard(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFastLeave(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetLearning(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetRootBlock(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFlood(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkList() ([]Link, error) { + return nil, ErrNotImplemented +} + +func AddrAdd(link Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrReplace(link Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrDel(link Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrList(link Link, family int) ([]Addr, error) { + return nil, ErrNotImplemented +} + +func RouteAdd(route *Route) error { + return ErrNotImplemented +} + +func RouteAppend(route *Route) error { + return ErrNotImplemented +} + +func RouteChange(route *Route) error { + return ErrNotImplemented +} + +func RouteDel(route *Route) error { + return ErrNotImplemented +} + +func RouteGet(destination net.IP) ([]Route, error) { + return nil, ErrNotImplemented +} + +func RouteList(link Link, family int) ([]Route, error) { + return nil, ErrNotImplemented +} + +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return nil, ErrNotImplemented +} + +func RouteReplace(route *Route) error { + return ErrNotImplemented +} + +func XfrmPolicyAdd(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyDel(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + return nil, ErrNotImplemented +} + +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return nil, ErrNotImplemented +} + +func XfrmStateAdd(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateDel(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateList(family int) ([]XfrmState, error) { + return nil, ErrNotImplemented +} + +func NeighAdd(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighSet(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighAppend(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighDel(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighList(linkIndex, family int) ([]Neigh, error) { + return nil, ErrNotImplemented +} + +func NeighDeserialize(m []byte) (*Neigh, error) { + return nil, ErrNotImplemented +} + +func SocketGet(local, remote net.Addr) (*Socket, error) { + return nil, ErrNotImplemented +} + +func SocketDestroy(local, remote net.Addr) (*Socket, error) { + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/netns_linux.go b/vendor/github.com/vishvananda/netlink/netns_linux.go new file mode 100644 index 000000000..2eb29c7ce --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netns_linux.go @@ -0,0 +1,141 @@ +package netlink + +// Network namespace ID functions +// +// The kernel has a weird concept called the network namespace ID. +// This is different from the file reference in proc (and any bind-mounted +// namespaces, etc.) +// +// Instead, namespaces can be assigned a numeric ID at any time. Once set, +// the ID is fixed. The ID can either be set manually by the user, or +// automatically, triggered by certain kernel actions. The most common kernel +// action that triggers namespace ID creation is moving one end of a veth pair +// in to that namespace. + +import ( + "fmt" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// These can be replaced by the values from sys/unix when it is next released. +const ( + _ = iota + NETNSA_NSID + NETNSA_PID + NETNSA_FD +) + +// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id). +// Returns -1 if the namespace does not have an ID set. +func (h *Handle) GetNetNsIdByPid(pid int) (int, error) { + return h.getNetNsId(NETNSA_PID, uint32(pid)) +} + +// GetNetNsIdByPid looks up the network namespace ID for a given pid (really thread id). +// Returns -1 if the namespace does not have an ID set. +func GetNetNsIdByPid(pid int) (int, error) { + return pkgHandle.GetNetNsIdByPid(pid) +} + +// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id). +// The ID can only be set for namespaces without an ID already set. +func (h *Handle) SetNetNsIdByPid(pid, nsid int) error { + return h.setNetNsId(NETNSA_PID, uint32(pid), uint32(nsid)) +} + +// SetNetNSIdByPid sets the ID of the network namespace for a given pid (really thread id). +// The ID can only be set for namespaces without an ID already set. +func SetNetNsIdByPid(pid, nsid int) error { + return pkgHandle.SetNetNsIdByPid(pid, nsid) +} + +// GetNetNsIdByFd looks up the network namespace ID for a given fd. +// fd must be an open file descriptor to a namespace file. +// Returns -1 if the namespace does not have an ID set. +func (h *Handle) GetNetNsIdByFd(fd int) (int, error) { + return h.getNetNsId(NETNSA_FD, uint32(fd)) +} + +// GetNetNsIdByFd looks up the network namespace ID for a given fd. +// fd must be an open file descriptor to a namespace file. +// Returns -1 if the namespace does not have an ID set. +func GetNetNsIdByFd(fd int) (int, error) { + return pkgHandle.GetNetNsIdByFd(fd) +} + +// SetNetNSIdByFd sets the ID of the network namespace for a given fd. +// fd must be an open file descriptor to a namespace file. +// The ID can only be set for namespaces without an ID already set. +func (h *Handle) SetNetNsIdByFd(fd, nsid int) error { + return h.setNetNsId(NETNSA_FD, uint32(fd), uint32(nsid)) +} + +// SetNetNSIdByFd sets the ID of the network namespace for a given fd. +// fd must be an open file descriptor to a namespace file. +// The ID can only be set for namespaces without an ID already set. +func SetNetNsIdByFd(fd, nsid int) error { + return pkgHandle.SetNetNsIdByFd(fd, nsid) +} + +// getNetNsId requests the netnsid for a given type-val pair +// type should be either NETNSA_PID or NETNSA_FD +func (h *Handle) getNetNsId(attrType int, val uint32) (int, error) { + req := h.newNetlinkRequest(unix.RTM_GETNSID, unix.NLM_F_REQUEST) + + rtgen := nl.NewRtGenMsg() + req.AddData(rtgen) + + b := make([]byte, 4) + native.PutUint32(b, val) + attr := nl.NewRtAttr(attrType, b) + req.AddData(attr) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID) + + if err != nil { + return 0, err + } + + for _, m := range msgs { + msg := nl.DeserializeRtGenMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return 0, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case NETNSA_NSID: + return int(int32(native.Uint32(attr.Value))), nil + } + } + } + + return 0, fmt.Errorf("unexpected empty result") +} + +// setNetNsId sets the netnsid for a given type-val pair +// type should be either NETNSA_PID or NETNSA_FD +// The ID can only be set for namespaces without an ID already set +func (h *Handle) setNetNsId(attrType int, val uint32, newnsid uint32) error { + req := h.newNetlinkRequest(unix.RTM_NEWNSID, unix.NLM_F_REQUEST|unix.NLM_F_ACK) + + rtgen := nl.NewRtGenMsg() + req.AddData(rtgen) + + b := make([]byte, 4) + native.PutUint32(b, val) + attr := nl.NewRtAttr(attrType, b) + req.AddData(attr) + + b1 := make([]byte, 4) + native.PutUint32(b1, newnsid) + attr1 := nl.NewRtAttr(NETNSA_NSID, b1) + req.AddData(attr1) + + _, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNSID) + return err +} diff --git a/vendor/github.com/vishvananda/netlink/netns_unspecified.go b/vendor/github.com/vishvananda/netlink/netns_unspecified.go new file mode 100644 index 000000000..5c5899e36 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netns_unspecified.go @@ -0,0 +1,19 @@ +// +build !linux + +package netlink + +func GetNetNsIdByPid(pid int) (int, error) { + return 0, ErrNotImplemented +} + +func SetNetNsIdByPid(pid, nsid int) error { + return ErrNotImplemented +} + +func GetNetNsIdByFd(fd int) (int, error) { + return 0, ErrNotImplemented +} + +func SetNetNsIdByFd(fd, nsid int) error { + return ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go new file mode 100644 index 000000000..6bea4ed02 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -0,0 +1,71 @@ +package nl + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type IfAddrmsg struct { + unix.IfAddrmsg +} + +func NewIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: unix.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +// struct ifaddrmsg { +// __u8 ifa_family; +// __u8 ifa_prefixlen; /* The prefix length */ +// __u8 ifa_flags; /* Flags */ +// __u8 ifa_scope; /* Address scope */ +// __u32 ifa_index; /* Link index */ +// }; + +// type IfAddrmsg struct { +// Family uint8 +// Prefixlen uint8 +// Flags uint8 +// Scope uint8 +// Index uint32 +// } +// SizeofIfAddrmsg = 0x8 + +func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { + return (*IfAddrmsg)(unsafe.Pointer(&b[0:unix.SizeofIfAddrmsg][0])) +} + +func (msg *IfAddrmsg) Serialize() []byte { + return (*(*[unix.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfAddrmsg) Len() int { + return unix.SizeofIfAddrmsg +} + +// struct ifa_cacheinfo { +// __u32 ifa_prefered; +// __u32 ifa_valid; +// __u32 cstamp; /* created timestamp, hundredths of seconds */ +// __u32 tstamp; /* updated timestamp, hundredths of seconds */ +// }; + +type IfaCacheInfo struct { + unix.IfaCacheinfo +} + +func (msg *IfaCacheInfo) Len() int { + return unix.SizeofIfaCacheinfo +} + +func DeserializeIfaCacheInfo(b []byte) *IfaCacheInfo { + return (*IfaCacheInfo)(unsafe.Pointer(&b[0:unix.SizeofIfaCacheinfo][0])) +} + +func (msg *IfaCacheInfo) Serialize() []byte { + return (*(*[unix.SizeofIfaCacheinfo]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go new file mode 100644 index 000000000..34e78ba8d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go @@ -0,0 +1,74 @@ +package nl + +import ( + "fmt" + "unsafe" +) + +const ( + SizeofBridgeVlanInfo = 0x04 +) + +/* Bridge Flags */ +const ( + BRIDGE_FLAGS_MASTER = iota + 1 /* Bridge command to/from master */ + BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */ +) + +/* Bridge management nested attributes + * [IFLA_AF_SPEC] = { + * [IFLA_BRIDGE_FLAGS] + * [IFLA_BRIDGE_MODE] + * [IFLA_BRIDGE_VLAN_INFO] + * } + */ +const ( + IFLA_BRIDGE_FLAGS = iota + IFLA_BRIDGE_MODE + IFLA_BRIDGE_VLAN_INFO +) + +const ( + BRIDGE_VLAN_INFO_MASTER = 1 << iota + BRIDGE_VLAN_INFO_PVID + BRIDGE_VLAN_INFO_UNTAGGED + BRIDGE_VLAN_INFO_RANGE_BEGIN + BRIDGE_VLAN_INFO_RANGE_END +) + +// struct bridge_vlan_info { +// __u16 flags; +// __u16 vid; +// }; + +type BridgeVlanInfo struct { + Flags uint16 + Vid uint16 +} + +func (b *BridgeVlanInfo) Serialize() []byte { + return (*(*[SizeofBridgeVlanInfo]byte)(unsafe.Pointer(b)))[:] +} + +func DeserializeBridgeVlanInfo(b []byte) *BridgeVlanInfo { + return (*BridgeVlanInfo)(unsafe.Pointer(&b[0:SizeofBridgeVlanInfo][0])) +} + +func (b *BridgeVlanInfo) PortVID() bool { + return b.Flags&BRIDGE_VLAN_INFO_PVID > 0 +} + +func (b *BridgeVlanInfo) EngressUntag() bool { + return b.Flags&BRIDGE_VLAN_INFO_UNTAGGED > 0 +} + +func (b *BridgeVlanInfo) String() string { + return fmt.Sprintf("%+v", *b) +} + +/* New extended info filters for IFLA_EXT_MASK */ +const ( + RTEXT_FILTER_VF = 1 << iota + RTEXT_FILTER_BRVLAN + RTEXT_FILTER_BRVLAN_COMPRESSED +) diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go new file mode 100644 index 000000000..6989d1edc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -0,0 +1,258 @@ +package nl + +import "unsafe" + +// Track the message sizes for the correct serialization/deserialization +const ( + SizeofNfgenmsg = 4 + SizeofNfattr = 4 + SizeofNfConntrack = 376 + SizeofNfctTupleHead = 52 +) + +var L4ProtoMap = map[uint8]string{ + 6: "tcp", + 17: "udp", +} + +// From https://git.netfilter.org/libnetfilter_conntrack/tree/include/libnetfilter_conntrack/libnetfilter_conntrack_tcp.h +// enum tcp_state { +// TCP_CONNTRACK_NONE, +// TCP_CONNTRACK_SYN_SENT, +// TCP_CONNTRACK_SYN_RECV, +// TCP_CONNTRACK_ESTABLISHED, +// TCP_CONNTRACK_FIN_WAIT, +// TCP_CONNTRACK_CLOSE_WAIT, +// TCP_CONNTRACK_LAST_ACK, +// TCP_CONNTRACK_TIME_WAIT, +// TCP_CONNTRACK_CLOSE, +// TCP_CONNTRACK_LISTEN, /* obsolete */ +// #define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN +// TCP_CONNTRACK_MAX, +// TCP_CONNTRACK_IGNORE +// }; +const ( + TCP_CONNTRACK_NONE = 0 + TCP_CONNTRACK_SYN_SENT = 1 + TCP_CONNTRACK_SYN_RECV = 2 + TCP_CONNTRACK_ESTABLISHED = 3 + TCP_CONNTRACK_FIN_WAIT = 4 + TCP_CONNTRACK_CLOSE_WAIT = 5 + TCP_CONNTRACK_LAST_ACK = 6 + TCP_CONNTRACK_TIME_WAIT = 7 + TCP_CONNTRACK_CLOSE = 8 + TCP_CONNTRACK_LISTEN = 9 + TCP_CONNTRACK_SYN_SENT2 = 9 + TCP_CONNTRACK_MAX = 10 + TCP_CONNTRACK_IGNORE = 11 +) + +// All the following constants are coming from: +// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h + +// enum cntl_msg_types { +// IPCTNL_MSG_CT_NEW, +// IPCTNL_MSG_CT_GET, +// IPCTNL_MSG_CT_DELETE, +// IPCTNL_MSG_CT_GET_CTRZERO, +// IPCTNL_MSG_CT_GET_STATS_CPU, +// IPCTNL_MSG_CT_GET_STATS, +// IPCTNL_MSG_CT_GET_DYING, +// IPCTNL_MSG_CT_GET_UNCONFIRMED, +// +// IPCTNL_MSG_MAX +// }; +const ( + IPCTNL_MSG_CT_NEW = 0 + IPCTNL_MSG_CT_GET = 1 + IPCTNL_MSG_CT_DELETE = 2 +) + +// #define NFNETLINK_V0 0 +const ( + NFNETLINK_V0 = 0 +) + +const ( + NLA_F_NESTED uint16 = (1 << 15) // #define NLA_F_NESTED (1 << 15) + NLA_F_NET_BYTEORDER uint16 = (1 << 14) // #define NLA_F_NESTED (1 << 14) + NLA_TYPE_MASK = ^(NLA_F_NESTED | NLA_F_NET_BYTEORDER) + NLA_ALIGNTO uint16 = 4 // #define NLA_ALIGNTO 4 +) + +// enum ctattr_type { +// CTA_UNSPEC, +// CTA_TUPLE_ORIG, +// CTA_TUPLE_REPLY, +// CTA_STATUS, +// CTA_PROTOINFO, +// CTA_HELP, +// CTA_NAT_SRC, +// #define CTA_NAT CTA_NAT_SRC /* backwards compatibility */ +// CTA_TIMEOUT, +// CTA_MARK, +// CTA_COUNTERS_ORIG, +// CTA_COUNTERS_REPLY, +// CTA_USE, +// CTA_ID, +// CTA_NAT_DST, +// CTA_TUPLE_MASTER, +// CTA_SEQ_ADJ_ORIG, +// CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG, +// CTA_SEQ_ADJ_REPLY, +// CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY, +// CTA_SECMARK, /* obsolete */ +// CTA_ZONE, +// CTA_SECCTX, +// CTA_TIMESTAMP, +// CTA_MARK_MASK, +// CTA_LABELS, +// CTA_LABELS_MASK, +// __CTA_MAX +// }; +const ( + CTA_TUPLE_ORIG = 1 + CTA_TUPLE_REPLY = 2 + CTA_STATUS = 3 + CTA_PROTOINFO = 4 + CTA_TIMEOUT = 7 + CTA_MARK = 8 + CTA_COUNTERS_ORIG = 9 + CTA_COUNTERS_REPLY = 10 + CTA_USE = 11 + CTA_ID = 12 + CTA_ZONE = 18 + CTA_TIMESTAMP = 20 + CTA_LABELS = 22 + CTA_LABELS_MASK = 23 +) + +// enum ctattr_tuple { +// CTA_TUPLE_UNSPEC, +// CTA_TUPLE_IP, +// CTA_TUPLE_PROTO, +// CTA_TUPLE_ZONE, +// __CTA_TUPLE_MAX +// }; +// #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) +const ( + CTA_TUPLE_IP = 1 + CTA_TUPLE_PROTO = 2 +) + +// enum ctattr_ip { +// CTA_IP_UNSPEC, +// CTA_IP_V4_SRC, +// CTA_IP_V4_DST, +// CTA_IP_V6_SRC, +// CTA_IP_V6_DST, +// __CTA_IP_MAX +// }; +// #define CTA_IP_MAX (__CTA_IP_MAX - 1) +const ( + CTA_IP_V4_SRC = 1 + CTA_IP_V4_DST = 2 + CTA_IP_V6_SRC = 3 + CTA_IP_V6_DST = 4 +) + +// enum ctattr_l4proto { +// CTA_PROTO_UNSPEC, +// CTA_PROTO_NUM, +// CTA_PROTO_SRC_PORT, +// CTA_PROTO_DST_PORT, +// CTA_PROTO_ICMP_ID, +// CTA_PROTO_ICMP_TYPE, +// CTA_PROTO_ICMP_CODE, +// CTA_PROTO_ICMPV6_ID, +// CTA_PROTO_ICMPV6_TYPE, +// CTA_PROTO_ICMPV6_CODE, +// __CTA_PROTO_MAX +// }; +// #define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1) +const ( + CTA_PROTO_NUM = 1 + CTA_PROTO_SRC_PORT = 2 + CTA_PROTO_DST_PORT = 3 +) + +// enum ctattr_protoinfo { +// CTA_PROTOINFO_UNSPEC, +// CTA_PROTOINFO_TCP, +// CTA_PROTOINFO_DCCP, +// CTA_PROTOINFO_SCTP, +// __CTA_PROTOINFO_MAX +// }; +// #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) +const ( + CTA_PROTOINFO_UNSPEC = 0 + CTA_PROTOINFO_TCP = 1 + CTA_PROTOINFO_DCCP = 2 + CTA_PROTOINFO_SCTP = 3 +) + +// enum ctattr_protoinfo_tcp { +// CTA_PROTOINFO_TCP_UNSPEC, +// CTA_PROTOINFO_TCP_STATE, +// CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, +// CTA_PROTOINFO_TCP_WSCALE_REPLY, +// CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, +// CTA_PROTOINFO_TCP_FLAGS_REPLY, +// __CTA_PROTOINFO_TCP_MAX +// }; +// #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) +const ( + CTA_PROTOINFO_TCP_STATE = 1 + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2 + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3 + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4 + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5 +) + +// enum ctattr_counters { +// CTA_COUNTERS_UNSPEC, +// CTA_COUNTERS_PACKETS, /* 64bit counters */ +// CTA_COUNTERS_BYTES, /* 64bit counters */ +// CTA_COUNTERS32_PACKETS, /* old 32bit counters, unused */ +// CTA_COUNTERS32_BYTES, /* old 32bit counters, unused */ +// CTA_COUNTERS_PAD, +// __CTA_COUNTERS_M +// }; +// #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) +const ( + CTA_COUNTERS_PACKETS = 1 + CTA_COUNTERS_BYTES = 2 +) + +// enum CTA TIMESTAMP TLVs +// CTA_TIMESTAMP_START /* 64bit value */ +// CTA_TIMESTAMP_STOP /* 64bit value */ +const ( + CTA_TIMESTAMP_START = 1 + CTA_TIMESTAMP_STOP = 2 +) + +// /* General form of address family dependent message. +// */ +// struct nfgenmsg { +// __u8 nfgen_family; /* AF_xxx */ +// __u8 version; /* nfnetlink version */ +// __be16 res_id; /* resource id */ +// }; +type Nfgenmsg struct { + NfgenFamily uint8 + Version uint8 + ResId uint16 // big endian +} + +func (msg *Nfgenmsg) Len() int { + return SizeofNfgenmsg +} + +func DeserializeNfgenmsg(b []byte) *Nfgenmsg { + return (*Nfgenmsg)(unsafe.Pointer(&b[0:SizeofNfgenmsg][0])) +} + +func (msg *Nfgenmsg) Serialize() []byte { + return (*(*[SizeofNfgenmsg]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go new file mode 100644 index 000000000..956367b29 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/devlink_linux.go @@ -0,0 +1,142 @@ +package nl + +// All the following constants are coming from: +// https://github.com/torvalds/linux/blob/master/include/uapi/linux/devlink.h + +const ( + GENL_DEVLINK_VERSION = 1 + GENL_DEVLINK_NAME = "devlink" +) + +const ( + DEVLINK_CMD_GET = 1 + DEVLINK_CMD_PORT_GET = 5 + DEVLINK_CMD_PORT_SET = 6 + DEVLINK_CMD_PORT_NEW = 7 + DEVLINK_CMD_PORT_DEL = 8 + DEVLINK_CMD_ESWITCH_GET = 29 + DEVLINK_CMD_ESWITCH_SET = 30 + DEVLINK_CMD_RESOURCE_DUMP = 36 + DEVLINK_CMD_PARAM_GET = 38 + DEVLINK_CMD_PARAM_SET = 39 + DEVLINK_CMD_INFO_GET = 51 +) + +const ( + DEVLINK_ATTR_BUS_NAME = 1 + DEVLINK_ATTR_DEV_NAME = 2 + DEVLINK_ATTR_PORT_INDEX = 3 + DEVLINK_ATTR_PORT_TYPE = 4 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 8 + DEVLINK_ATTR_ESWITCH_MODE = 25 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26 + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62 + DEVLINK_ATTR_RESOURCE_LIST = 63 /* nested */ + DEVLINK_ATTR_RESOURCE = 64 /* nested */ + DEVLINK_ATTR_RESOURCE_NAME = 65 /* string */ + DEVLINK_ATTR_RESOURCE_ID = 66 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE = 67 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69 /* u8 */ + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71 /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72 /* u64 */ + DEVLINK_ATTR_RESOURCE_UNIT = 73 /* u8 */ + DEVLINK_ATTR_RESOURCE_OCC = 74 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75 /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76 /* u64 */ + DEVLINK_ATTR_PORT_FLAVOUR = 77 + DEVLINK_ATTR_INFO_DRIVER_NAME = 98 + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 99 + DEVLINK_ATTR_INFO_VERSION_FIXED = 100 + DEVLINK_ATTR_INFO_VERSION_RUNNING = 101 + DEVLINK_ATTR_INFO_VERSION_STORED = 102 + DEVLINK_ATTR_INFO_VERSION_NAME = 103 + DEVLINK_ATTR_INFO_VERSION_VALUE = 104 + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 127 + DEVLINK_ATTR_PORT_FUNCTION = 145 + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 150 + DEVLINK_ATTR_PORT_PCI_SF_NUMBER = 164 +) + +const ( + DEVLINK_ESWITCH_MODE_LEGACY = 0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 1 +) + +const ( + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 3 +) + +const ( + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 1 +) + +const ( + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0 + DEVLINK_PORT_FLAVOUR_CPU = 1 + DEVLINK_PORT_FLAVOUR_DSA = 2 + DEVLINK_PORT_FLAVOUR_PCI_PF = 3 + DEVLINK_PORT_FLAVOUR_PCI_VF = 4 + DEVLINK_PORT_FLAVOUR_VIRTUAL = 5 + DEVLINK_PORT_FLAVOUR_UNUSED = 6 + DEVLINK_PORT_FLAVOUR_PCI_SF = 7 +) + +const ( + DEVLINK_PORT_TYPE_NOTSET = 0 + DEVLINK_PORT_TYPE_AUTO = 1 + DEVLINK_PORT_TYPE_ETH = 2 + DEVLINK_PORT_TYPE_IB = 3 +) + +const ( + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 1 + DEVLINK_PORT_FN_ATTR_STATE = 2 + DEVLINK_PORT_FN_ATTR_OPSTATE = 3 +) + +const ( + DEVLINK_PORT_FN_STATE_INACTIVE = 0 + DEVLINK_PORT_FN_STATE_ACTIVE = 1 +) + +const ( + DEVLINK_PORT_FN_OPSTATE_DETACHED = 0 + DEVLINK_PORT_FN_OPSTATE_ATTACHED = 1 +) + +const ( + DEVLINK_RESOURCE_UNIT_ENTRY uint8 = 0 +) + +const ( + DEVLINK_ATTR_PARAM = iota + 80 /* nested */ + DEVLINK_ATTR_PARAM_NAME /* string */ + DEVLINK_ATTR_PARAM_GENERIC /* flag */ + DEVLINK_ATTR_PARAM_TYPE /* u8 */ + DEVLINK_ATTR_PARAM_VALUES_LIST /* nested */ + DEVLINK_ATTR_PARAM_VALUE /* nested */ + DEVLINK_ATTR_PARAM_VALUE_DATA /* dynamic */ + DEVLINK_ATTR_PARAM_VALUE_CMODE /* u8 */ +) + +const ( + DEVLINK_PARAM_TYPE_U8 = 1 + DEVLINK_PARAM_TYPE_U16 = 2 + DEVLINK_PARAM_TYPE_U32 = 3 + DEVLINK_PARAM_TYPE_STRING = 5 + DEVLINK_PARAM_TYPE_BOOL = 6 +) + +const ( + DEVLINK_PARAM_CMODE_RUNTIME = iota + DEVLINK_PARAM_CMODE_DRIVERINIT + DEVLINK_PARAM_CMODE_PERMANENT +) diff --git a/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go new file mode 100644 index 000000000..81b46f2c7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go @@ -0,0 +1,89 @@ +package nl + +import ( + "unsafe" +) + +const SizeofGenlmsg = 4 + +const ( + GENL_ID_CTRL = 0x10 + GENL_CTRL_VERSION = 2 + GENL_CTRL_NAME = "nlctrl" +) + +const ( + GENL_CTRL_CMD_GETFAMILY = 3 +) + +const ( + GENL_CTRL_ATTR_UNSPEC = iota + GENL_CTRL_ATTR_FAMILY_ID + GENL_CTRL_ATTR_FAMILY_NAME + GENL_CTRL_ATTR_VERSION + GENL_CTRL_ATTR_HDRSIZE + GENL_CTRL_ATTR_MAXATTR + GENL_CTRL_ATTR_OPS + GENL_CTRL_ATTR_MCAST_GROUPS +) + +const ( + GENL_CTRL_ATTR_OP_UNSPEC = iota + GENL_CTRL_ATTR_OP_ID + GENL_CTRL_ATTR_OP_FLAGS +) + +const ( + GENL_ADMIN_PERM = 1 << iota + GENL_CMD_CAP_DO + GENL_CMD_CAP_DUMP + GENL_CMD_CAP_HASPOL +) + +const ( + GENL_CTRL_ATTR_MCAST_GRP_UNSPEC = iota + GENL_CTRL_ATTR_MCAST_GRP_NAME + GENL_CTRL_ATTR_MCAST_GRP_ID +) + +const ( + GENL_GTP_VERSION = 0 + GENL_GTP_NAME = "gtp" +) + +const ( + GENL_GTP_CMD_NEWPDP = iota + GENL_GTP_CMD_DELPDP + GENL_GTP_CMD_GETPDP +) + +const ( + GENL_GTP_ATTR_UNSPEC = iota + GENL_GTP_ATTR_LINK + GENL_GTP_ATTR_VERSION + GENL_GTP_ATTR_TID + GENL_GTP_ATTR_PEER_ADDRESS + GENL_GTP_ATTR_MS_ADDRESS + GENL_GTP_ATTR_FLOW + GENL_GTP_ATTR_NET_NS_FD + GENL_GTP_ATTR_I_TEI + GENL_GTP_ATTR_O_TEI + GENL_GTP_ATTR_PAD +) + +type Genlmsg struct { + Command uint8 + Version uint8 +} + +func (msg *Genlmsg) Len() int { + return SizeofGenlmsg +} + +func DeserializeGenlmsg(b []byte) *Genlmsg { + return (*Genlmsg)(unsafe.Pointer(&b[0:SizeofGenlmsg][0])) +} + +func (msg *Genlmsg) Serialize() []byte { + return (*(*[SizeofGenlmsg]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go new file mode 100644 index 000000000..d5dd69e0c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go @@ -0,0 +1,21 @@ +package nl + +// id's of route attribute from https://elixir.bootlin.com/linux/v5.17.3/source/include/uapi/linux/lwtunnel.h#L38 +// the value's size are specified in https://elixir.bootlin.com/linux/v5.17.3/source/net/ipv4/ip_tunnel_core.c#L928 + +const ( + LWTUNNEL_IP6_UNSPEC = iota + LWTUNNEL_IP6_ID + LWTUNNEL_IP6_DST + LWTUNNEL_IP6_SRC + LWTUNNEL_IP6_HOPLIMIT + LWTUNNEL_IP6_TC + LWTUNNEL_IP6_FLAGS + LWTUNNEL_IP6_PAD // not implemented + LWTUNNEL_IP6_OPTS // not implemented + __LWTUNNEL_IP6_MAX +) + + + + diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go new file mode 100644 index 000000000..89dd009df --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go @@ -0,0 +1,227 @@ +package nl + +import ( + "strconv" + + "golang.org/x/sys/unix" +) + +const ( + /* The protocol version */ + IPSET_PROTOCOL = 6 + + /* The max length of strings including NUL: set and type identifiers */ + IPSET_MAXNAMELEN = 32 + + /* The maximum permissible comment length we will accept over netlink */ + IPSET_MAX_COMMENT_SIZE = 255 +) + +const ( + _ = iota + IPSET_CMD_PROTOCOL /* 1: Return protocol version */ + IPSET_CMD_CREATE /* 2: Create a new (empty) set */ + IPSET_CMD_DESTROY /* 3: Destroy a (empty) set */ + IPSET_CMD_FLUSH /* 4: Remove all elements from a set */ + IPSET_CMD_RENAME /* 5: Rename a set */ + IPSET_CMD_SWAP /* 6: Swap two sets */ + IPSET_CMD_LIST /* 7: List sets */ + IPSET_CMD_SAVE /* 8: Save sets */ + IPSET_CMD_ADD /* 9: Add an element to a set */ + IPSET_CMD_DEL /* 10: Delete an element from a set */ + IPSET_CMD_TEST /* 11: Test an element in a set */ + IPSET_CMD_HEADER /* 12: Get set header data only */ + IPSET_CMD_TYPE /* 13: Get set type */ +) + +/* Attributes at command level */ +const ( + _ = iota + IPSET_ATTR_PROTOCOL /* 1: Protocol version */ + IPSET_ATTR_SETNAME /* 2: Name of the set */ + IPSET_ATTR_TYPENAME /* 3: Typename */ + IPSET_ATTR_REVISION /* 4: Settype revision */ + IPSET_ATTR_FAMILY /* 5: Settype family */ + IPSET_ATTR_FLAGS /* 6: Flags at command level */ + IPSET_ATTR_DATA /* 7: Nested attributes */ + IPSET_ATTR_ADT /* 8: Multiple data containers */ + IPSET_ATTR_LINENO /* 9: Restore lineno */ + IPSET_ATTR_PROTOCOL_MIN /* 10: Minimal supported version number */ + + IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME /* Setname at rename/swap */ + IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN /* type rev min */ +) + +/* CADT specific attributes */ +const ( + IPSET_ATTR_IP = 1 + IPSET_ATTR_IP_FROM = 1 + IPSET_ATTR_IP_TO = 2 + IPSET_ATTR_CIDR = 3 + IPSET_ATTR_PORT = 4 + IPSET_ATTR_PORT_FROM = 4 + IPSET_ATTR_PORT_TO = 5 + IPSET_ATTR_TIMEOUT = 6 + IPSET_ATTR_PROTO = 7 + IPSET_ATTR_CADT_FLAGS = 8 + IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO /* 9 */ + IPSET_ATTR_MARK = 10 + IPSET_ATTR_MARKMASK = 11 + + /* Reserve empty slots */ + IPSET_ATTR_CADT_MAX = 16 + + /* Create-only specific attributes */ + IPSET_ATTR_GC = 3 + iota + IPSET_ATTR_HASHSIZE + IPSET_ATTR_MAXELEM + IPSET_ATTR_NETMASK + IPSET_ATTR_PROBES + IPSET_ATTR_RESIZE + IPSET_ATTR_SIZE + + /* Kernel-only */ + IPSET_ATTR_ELEMENTS + IPSET_ATTR_REFERENCES + IPSET_ATTR_MEMSIZE + + SET_ATTR_CREATE_MAX +) + +const ( + IPSET_ATTR_IPADDR_IPV4 = 1 + IPSET_ATTR_IPADDR_IPV6 = 2 +) + +/* ADT specific attributes */ +const ( + IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1 + IPSET_ATTR_NAME + IPSET_ATTR_NAMEREF + IPSET_ATTR_IP2 + IPSET_ATTR_CIDR2 + IPSET_ATTR_IP2_TO + IPSET_ATTR_IFACE + IPSET_ATTR_BYTES + IPSET_ATTR_PACKETS + IPSET_ATTR_COMMENT + IPSET_ATTR_SKBMARK + IPSET_ATTR_SKBPRIO + IPSET_ATTR_SKBQUEUE +) + +/* Flags at CADT attribute level, upper half of cmdattrs */ +const ( + IPSET_FLAG_BIT_BEFORE = 0 + IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE) + IPSET_FLAG_BIT_PHYSDEV = 1 + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV) + IPSET_FLAG_BIT_NOMATCH = 2 + IPSET_FLAG_NOMATCH = (1 << IPSET_FLAG_BIT_NOMATCH) + IPSET_FLAG_BIT_WITH_COUNTERS = 3 + IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS) + IPSET_FLAG_BIT_WITH_COMMENT = 4 + IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT) + IPSET_FLAG_BIT_WITH_FORCEADD = 5 + IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD) + IPSET_FLAG_BIT_WITH_SKBINFO = 6 + IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO) + IPSET_FLAG_CADT_MAX = 15 +) + +const ( + IPSET_ERR_PRIVATE = 4096 + iota + IPSET_ERR_PROTOCOL + IPSET_ERR_FIND_TYPE + IPSET_ERR_MAX_SETS + IPSET_ERR_BUSY + IPSET_ERR_EXIST_SETNAME2 + IPSET_ERR_TYPE_MISMATCH + IPSET_ERR_EXIST + IPSET_ERR_INVALID_CIDR + IPSET_ERR_INVALID_NETMASK + IPSET_ERR_INVALID_FAMILY + IPSET_ERR_TIMEOUT + IPSET_ERR_REFERENCED + IPSET_ERR_IPADDR_IPV4 + IPSET_ERR_IPADDR_IPV6 + IPSET_ERR_COUNTER + IPSET_ERR_COMMENT + IPSET_ERR_INVALID_MARKMASK + IPSET_ERR_SKBINFO + + /* Type specific error codes */ + IPSET_ERR_TYPE_SPECIFIC = 4352 +) + +type IPSetError uintptr + +func (e IPSetError) Error() string { + switch int(e) { + case IPSET_ERR_PRIVATE: + return "private" + case IPSET_ERR_PROTOCOL: + return "invalid protocol" + case IPSET_ERR_FIND_TYPE: + return "invalid type" + case IPSET_ERR_MAX_SETS: + return "max sets reached" + case IPSET_ERR_BUSY: + return "busy" + case IPSET_ERR_EXIST_SETNAME2: + return "exist_setname2" + case IPSET_ERR_TYPE_MISMATCH: + return "type mismatch" + case IPSET_ERR_EXIST: + return "exist" + case IPSET_ERR_INVALID_CIDR: + return "invalid cidr" + case IPSET_ERR_INVALID_NETMASK: + return "invalid netmask" + case IPSET_ERR_INVALID_FAMILY: + return "invalid family" + case IPSET_ERR_TIMEOUT: + return "timeout" + case IPSET_ERR_REFERENCED: + return "referenced" + case IPSET_ERR_IPADDR_IPV4: + return "invalid ipv4 address" + case IPSET_ERR_IPADDR_IPV6: + return "invalid ipv6 address" + case IPSET_ERR_COUNTER: + return "invalid counter" + case IPSET_ERR_COMMENT: + return "invalid comment" + case IPSET_ERR_INVALID_MARKMASK: + return "invalid markmask" + case IPSET_ERR_SKBINFO: + return "skbinfo" + default: + return "errno " + strconv.Itoa(int(e)) + } +} + +func GetIpsetFlags(cmd int) int { + switch cmd { + case IPSET_CMD_CREATE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_CREATE + case IPSET_CMD_DESTROY, + IPSET_CMD_FLUSH, + IPSET_CMD_RENAME, + IPSET_CMD_SWAP, + IPSET_CMD_TEST: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_LIST, + IPSET_CMD_SAVE: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK | unix.NLM_F_ROOT | unix.NLM_F_MATCH | unix.NLM_F_DUMP + case IPSET_CMD_ADD, + IPSET_CMD_DEL: + return unix.NLM_F_REQUEST | unix.NLM_F_ACK + case IPSET_CMD_HEADER, + IPSET_CMD_TYPE, + IPSET_CMD_PROTOCOL: + return unix.NLM_F_REQUEST + default: + return 0 + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go new file mode 100644 index 000000000..0b5be470c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -0,0 +1,818 @@ +package nl + +import ( + "bytes" + "encoding/binary" + "fmt" + "unsafe" +) + +const ( + DEFAULT_CHANGE = 0xFFFFFFFF +) + +const ( + IFLA_INFO_UNSPEC = iota + IFLA_INFO_KIND + IFLA_INFO_DATA + IFLA_INFO_XSTATS + IFLA_INFO_SLAVE_KIND + IFLA_INFO_SLAVE_DATA + IFLA_INFO_MAX = IFLA_INFO_SLAVE_DATA +) + +const ( + IFLA_VLAN_UNSPEC = iota + IFLA_VLAN_ID + IFLA_VLAN_FLAGS + IFLA_VLAN_EGRESS_QOS + IFLA_VLAN_INGRESS_QOS + IFLA_VLAN_PROTOCOL + IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL +) + +const ( + IFLA_NETKIT_UNSPEC = iota + IFLA_NETKIT_PEER_INFO + IFLA_NETKIT_PRIMARY + IFLA_NETKIT_POLICY + IFLA_NETKIT_PEER_POLICY + IFLA_NETKIT_MODE + IFLA_NETKIT_MAX = IFLA_NETKIT_MODE +) + +const ( + VETH_INFO_UNSPEC = iota + VETH_INFO_PEER + VETH_INFO_MAX = VETH_INFO_PEER +) + +const ( + IFLA_VXLAN_UNSPEC = iota + IFLA_VXLAN_ID + IFLA_VXLAN_GROUP + IFLA_VXLAN_LINK + IFLA_VXLAN_LOCAL + IFLA_VXLAN_TTL + IFLA_VXLAN_TOS + IFLA_VXLAN_LEARNING + IFLA_VXLAN_AGEING + IFLA_VXLAN_LIMIT + IFLA_VXLAN_PORT_RANGE + IFLA_VXLAN_PROXY + IFLA_VXLAN_RSC + IFLA_VXLAN_L2MISS + IFLA_VXLAN_L3MISS + IFLA_VXLAN_PORT + IFLA_VXLAN_GROUP6 + IFLA_VXLAN_LOCAL6 + IFLA_VXLAN_UDP_CSUM + IFLA_VXLAN_UDP_ZERO_CSUM6_TX + IFLA_VXLAN_UDP_ZERO_CSUM6_RX + IFLA_VXLAN_REMCSUM_TX + IFLA_VXLAN_REMCSUM_RX + IFLA_VXLAN_GBP + IFLA_VXLAN_REMCSUM_NOPARTIAL + IFLA_VXLAN_FLOWBASED + IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED +) + +const ( + BRIDGE_MODE_UNSPEC = iota + BRIDGE_MODE_HAIRPIN +) + +const ( + IFLA_BRPORT_UNSPEC = iota + IFLA_BRPORT_STATE + IFLA_BRPORT_PRIORITY + IFLA_BRPORT_COST + IFLA_BRPORT_MODE + IFLA_BRPORT_GUARD + IFLA_BRPORT_PROTECT + IFLA_BRPORT_FAST_LEAVE + IFLA_BRPORT_LEARNING + IFLA_BRPORT_UNICAST_FLOOD + IFLA_BRPORT_PROXYARP + IFLA_BRPORT_LEARNING_SYNC + IFLA_BRPORT_PROXYARP_WIFI + IFLA_BRPORT_ROOT_ID + IFLA_BRPORT_BRIDGE_ID + IFLA_BRPORT_DESIGNATED_PORT + IFLA_BRPORT_DESIGNATED_COST + IFLA_BRPORT_ID + IFLA_BRPORT_NO + IFLA_BRPORT_TOPOLOGY_CHANGE_ACK + IFLA_BRPORT_CONFIG_PENDING + IFLA_BRPORT_MESSAGE_AGE_TIMER + IFLA_BRPORT_FORWARD_DELAY_TIMER + IFLA_BRPORT_HOLD_TIMER + IFLA_BRPORT_FLUSH + IFLA_BRPORT_MULTICAST_ROUTER + IFLA_BRPORT_PAD + IFLA_BRPORT_MCAST_FLOOD + IFLA_BRPORT_MCAST_TO_UCAST + IFLA_BRPORT_VLAN_TUNNEL + IFLA_BRPORT_BCAST_FLOOD + IFLA_BRPORT_GROUP_FWD_MASK + IFLA_BRPORT_NEIGH_SUPPRESS + IFLA_BRPORT_ISOLATED + IFLA_BRPORT_BACKUP_PORT + IFLA_BRPORT_MRP_RING_OPEN + IFLA_BRPORT_MRP_IN_OPEN + IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT + IFLA_BRPORT_MCAST_EHT_HOSTS_CNT + IFLA_BRPORT_LOCKED + IFLA_BRPORT_MAB + IFLA_BRPORT_MCAST_N_GROUPS + IFLA_BRPORT_MCAST_MAX_GROUPS + IFLA_BRPORT_MAX = IFLA_BRPORT_MCAST_MAX_GROUPS +) + +const ( + IFLA_IPVLAN_UNSPEC = iota + IFLA_IPVLAN_MODE + IFLA_IPVLAN_FLAG + IFLA_IPVLAN_MAX = IFLA_IPVLAN_FLAG +) + +const ( + IFLA_MACVLAN_UNSPEC = iota + IFLA_MACVLAN_MODE + IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_MACADDR_MODE + IFLA_MACVLAN_MACADDR + IFLA_MACVLAN_MACADDR_DATA + IFLA_MACVLAN_MACADDR_COUNT + IFLA_MACVLAN_BC_QUEUE_LEN + IFLA_MACVLAN_BC_QUEUE_LEN_USED + IFLA_MACVLAN_MAX = IFLA_MACVLAN_BC_QUEUE_LEN_USED +) + +const ( + MACVLAN_MODE_PRIVATE = 1 + MACVLAN_MODE_VEPA = 2 + MACVLAN_MODE_BRIDGE = 4 + MACVLAN_MODE_PASSTHRU = 8 + MACVLAN_MODE_SOURCE = 16 +) + +const ( + MACVLAN_MACADDR_ADD = iota + MACVLAN_MACADDR_DEL + MACVLAN_MACADDR_FLUSH + MACVLAN_MACADDR_SET +) + +const ( + IFLA_BOND_UNSPEC = iota + IFLA_BOND_MODE + IFLA_BOND_ACTIVE_SLAVE + IFLA_BOND_MIIMON + IFLA_BOND_UPDELAY + IFLA_BOND_DOWNDELAY + IFLA_BOND_USE_CARRIER + IFLA_BOND_ARP_INTERVAL + IFLA_BOND_ARP_IP_TARGET + IFLA_BOND_ARP_VALIDATE + IFLA_BOND_ARP_ALL_TARGETS + IFLA_BOND_PRIMARY + IFLA_BOND_PRIMARY_RESELECT + IFLA_BOND_FAIL_OVER_MAC + IFLA_BOND_XMIT_HASH_POLICY + IFLA_BOND_RESEND_IGMP + IFLA_BOND_NUM_PEER_NOTIF + IFLA_BOND_ALL_SLAVES_ACTIVE + IFLA_BOND_MIN_LINKS + IFLA_BOND_LP_INTERVAL + IFLA_BOND_PACKETS_PER_SLAVE + IFLA_BOND_AD_LACP_RATE + IFLA_BOND_AD_SELECT + IFLA_BOND_AD_INFO + IFLA_BOND_AD_ACTOR_SYS_PRIO + IFLA_BOND_AD_USER_PORT_KEY + IFLA_BOND_AD_ACTOR_SYSTEM + IFLA_BOND_TLB_DYNAMIC_LB +) + +const ( + IFLA_BOND_AD_INFO_UNSPEC = iota + IFLA_BOND_AD_INFO_AGGREGATOR + IFLA_BOND_AD_INFO_NUM_PORTS + IFLA_BOND_AD_INFO_ACTOR_KEY + IFLA_BOND_AD_INFO_PARTNER_KEY + IFLA_BOND_AD_INFO_PARTNER_MAC +) + +const ( + IFLA_BOND_SLAVE_UNSPEC = iota + IFLA_BOND_SLAVE_STATE + IFLA_BOND_SLAVE_MII_STATUS + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT + IFLA_BOND_SLAVE_PERM_HWADDR + IFLA_BOND_SLAVE_QUEUE_ID + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID + IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE + IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE +) + +const ( + IFLA_GENEVE_UNSPEC = iota + IFLA_GENEVE_ID // vni + IFLA_GENEVE_REMOTE + IFLA_GENEVE_TTL + IFLA_GENEVE_TOS + IFLA_GENEVE_PORT // destination port + IFLA_GENEVE_COLLECT_METADATA + IFLA_GENEVE_REMOTE6 + IFLA_GENEVE_UDP_CSUM + IFLA_GENEVE_UDP_ZERO_CSUM6_TX + IFLA_GENEVE_UDP_ZERO_CSUM6_RX + IFLA_GENEVE_LABEL + IFLA_GENEVE_TTL_INHERIT + IFLA_GENEVE_DF + IFLA_GENEVE_INNER_PROTO_INHERIT + IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT +) + +const ( + IFLA_GRE_UNSPEC = iota + IFLA_GRE_LINK + IFLA_GRE_IFLAGS + IFLA_GRE_OFLAGS + IFLA_GRE_IKEY + IFLA_GRE_OKEY + IFLA_GRE_LOCAL + IFLA_GRE_REMOTE + IFLA_GRE_TTL + IFLA_GRE_TOS + IFLA_GRE_PMTUDISC + IFLA_GRE_ENCAP_LIMIT + IFLA_GRE_FLOWINFO + IFLA_GRE_FLAGS + IFLA_GRE_ENCAP_TYPE + IFLA_GRE_ENCAP_FLAGS + IFLA_GRE_ENCAP_SPORT + IFLA_GRE_ENCAP_DPORT + IFLA_GRE_COLLECT_METADATA + IFLA_GRE_MAX = IFLA_GRE_COLLECT_METADATA +) + +const ( + GRE_CSUM = 0x8000 + GRE_ROUTING = 0x4000 + GRE_KEY = 0x2000 + GRE_SEQ = 0x1000 + GRE_STRICT = 0x0800 + GRE_REC = 0x0700 + GRE_FLAGS = 0x00F8 + GRE_VERSION = 0x0007 +) + +const ( + IFLA_VF_INFO_UNSPEC = iota + IFLA_VF_INFO + IFLA_VF_INFO_MAX = IFLA_VF_INFO +) + +const ( + IFLA_VF_UNSPEC = iota + IFLA_VF_MAC /* Hardware queue specific attributes */ + IFLA_VF_VLAN + IFLA_VF_TX_RATE /* Max TX Bandwidth Allocation */ + IFLA_VF_SPOOFCHK /* Spoof Checking on/off switch */ + IFLA_VF_LINK_STATE /* link state enable/disable/auto switch */ + IFLA_VF_RATE /* Min and Max TX Bandwidth Allocation */ + IFLA_VF_RSS_QUERY_EN /* RSS Redirection Table and Hash Key query + * on/off switch + */ + IFLA_VF_STATS /* network device statistics */ + IFLA_VF_TRUST /* Trust state of VF */ + IFLA_VF_IB_NODE_GUID /* VF Infiniband node GUID */ + IFLA_VF_IB_PORT_GUID /* VF Infiniband port GUID */ + IFLA_VF_VLAN_LIST /* nested list of vlans, option for QinQ */ + + IFLA_VF_MAX = IFLA_VF_IB_PORT_GUID +) + +const ( + IFLA_VF_VLAN_INFO_UNSPEC = iota + IFLA_VF_VLAN_INFO /* VLAN ID, QoS and VLAN protocol */ + __IFLA_VF_VLAN_INFO_MAX +) + +const ( + IFLA_VF_LINK_STATE_AUTO = iota /* link state of the uplink */ + IFLA_VF_LINK_STATE_ENABLE /* link always up */ + IFLA_VF_LINK_STATE_DISABLE /* link always down */ + IFLA_VF_LINK_STATE_MAX = IFLA_VF_LINK_STATE_DISABLE +) + +const ( + IFLA_VF_STATS_RX_PACKETS = iota + IFLA_VF_STATS_TX_PACKETS + IFLA_VF_STATS_RX_BYTES + IFLA_VF_STATS_TX_BYTES + IFLA_VF_STATS_BROADCAST + IFLA_VF_STATS_MULTICAST + IFLA_VF_STATS_RX_DROPPED + IFLA_VF_STATS_TX_DROPPED + IFLA_VF_STATS_MAX = IFLA_VF_STATS_TX_DROPPED +) + +const ( + SizeofVfMac = 0x24 + SizeofVfVlan = 0x0c + SizeofVfVlanInfo = 0x10 + SizeofVfTxRate = 0x08 + SizeofVfRate = 0x0c + SizeofVfSpoofchk = 0x08 + SizeofVfLinkState = 0x08 + SizeofVfRssQueryEn = 0x08 + SizeofVfTrust = 0x08 + SizeofVfGUID = 0x10 +) + +// struct ifla_vf_mac { +// __u32 vf; +// __u8 mac[32]; /* MAX_ADDR_LEN */ +// }; + +type VfMac struct { + Vf uint32 + Mac [32]byte +} + +func (msg *VfMac) Len() int { + return SizeofVfMac +} + +func DeserializeVfMac(b []byte) *VfMac { + return (*VfMac)(unsafe.Pointer(&b[0:SizeofVfMac][0])) +} + +func (msg *VfMac) Serialize() []byte { + return (*(*[SizeofVfMac]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_vlan { +// __u32 vf; +// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ +// __u32 qos; +// }; + +type VfVlan struct { + Vf uint32 + Vlan uint32 + Qos uint32 +} + +func (msg *VfVlan) Len() int { + return SizeofVfVlan +} + +func DeserializeVfVlan(b []byte) *VfVlan { + return (*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])) +} + +func (msg *VfVlan) Serialize() []byte { + return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:] +} + +func DeserializeVfVlanList(b []byte) ([]*VfVlanInfo, error) { + var vfVlanInfoList []*VfVlanInfo + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, element := range attrs { + if element.Attr.Type == IFLA_VF_VLAN_INFO { + vfVlanInfoList = append(vfVlanInfoList, DeserializeVfVlanInfo(element.Value)) + } + } + + if len(vfVlanInfoList) == 0 { + return nil, fmt.Errorf("VF vlan list is defined but no vf vlan info elements were found") + } + + return vfVlanInfoList, nil +} + +// struct ifla_vf_vlan_info { +// __u32 vf; +// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ +// __u32 qos; +// __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */ +// }; + +type VfVlanInfo struct { + VfVlan + VlanProto uint16 +} + +func DeserializeVfVlanInfo(b []byte) *VfVlanInfo { + return &VfVlanInfo{ + *(*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])), + binary.BigEndian.Uint16(b[SizeofVfVlan:SizeofVfVlanInfo]), + } +} + +func (msg *VfVlanInfo) Serialize() []byte { + return (*(*[SizeofVfVlanInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_tx_rate { +// __u32 vf; +// __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ +// }; + +type VfTxRate struct { + Vf uint32 + Rate uint32 +} + +func (msg *VfTxRate) Len() int { + return SizeofVfTxRate +} + +func DeserializeVfTxRate(b []byte) *VfTxRate { + return (*VfTxRate)(unsafe.Pointer(&b[0:SizeofVfTxRate][0])) +} + +func (msg *VfTxRate) Serialize() []byte { + return (*(*[SizeofVfTxRate]byte)(unsafe.Pointer(msg)))[:] +} + +//struct ifla_vf_stats { +// __u64 rx_packets; +// __u64 tx_packets; +// __u64 rx_bytes; +// __u64 tx_bytes; +// __u64 broadcast; +// __u64 multicast; +//}; + +type VfStats struct { + RxPackets uint64 + TxPackets uint64 + RxBytes uint64 + TxBytes uint64 + Multicast uint64 + Broadcast uint64 + RxDropped uint64 + TxDropped uint64 +} + +func DeserializeVfStats(b []byte) VfStats { + var vfstat VfStats + stats, err := ParseRouteAttr(b) + if err != nil { + return vfstat + } + var valueVar uint64 + for _, stat := range stats { + if err := binary.Read(bytes.NewBuffer(stat.Value), NativeEndian(), &valueVar); err != nil { + break + } + switch stat.Attr.Type { + case IFLA_VF_STATS_RX_PACKETS: + vfstat.RxPackets = valueVar + case IFLA_VF_STATS_TX_PACKETS: + vfstat.TxPackets = valueVar + case IFLA_VF_STATS_RX_BYTES: + vfstat.RxBytes = valueVar + case IFLA_VF_STATS_TX_BYTES: + vfstat.TxBytes = valueVar + case IFLA_VF_STATS_MULTICAST: + vfstat.Multicast = valueVar + case IFLA_VF_STATS_BROADCAST: + vfstat.Broadcast = valueVar + case IFLA_VF_STATS_RX_DROPPED: + vfstat.RxDropped = valueVar + case IFLA_VF_STATS_TX_DROPPED: + vfstat.TxDropped = valueVar + } + } + return vfstat +} + +// struct ifla_vf_rate { +// __u32 vf; +// __u32 min_tx_rate; /* Min Bandwidth in Mbps */ +// __u32 max_tx_rate; /* Max Bandwidth in Mbps */ +// }; + +type VfRate struct { + Vf uint32 + MinTxRate uint32 + MaxTxRate uint32 +} + +func (msg *VfRate) Len() int { + return SizeofVfRate +} + +func DeserializeVfRate(b []byte) *VfRate { + return (*VfRate)(unsafe.Pointer(&b[0:SizeofVfRate][0])) +} + +func (msg *VfRate) Serialize() []byte { + return (*(*[SizeofVfRate]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_spoofchk { +// __u32 vf; +// __u32 setting; +// }; + +type VfSpoofchk struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfSpoofchk) Len() int { + return SizeofVfSpoofchk +} + +func DeserializeVfSpoofchk(b []byte) *VfSpoofchk { + return (*VfSpoofchk)(unsafe.Pointer(&b[0:SizeofVfSpoofchk][0])) +} + +func (msg *VfSpoofchk) Serialize() []byte { + return (*(*[SizeofVfSpoofchk]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_link_state { +// __u32 vf; +// __u32 link_state; +// }; + +type VfLinkState struct { + Vf uint32 + LinkState uint32 +} + +func (msg *VfLinkState) Len() int { + return SizeofVfLinkState +} + +func DeserializeVfLinkState(b []byte) *VfLinkState { + return (*VfLinkState)(unsafe.Pointer(&b[0:SizeofVfLinkState][0])) +} + +func (msg *VfLinkState) Serialize() []byte { + return (*(*[SizeofVfLinkState]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_rss_query_en { +// __u32 vf; +// __u32 setting; +// }; + +type VfRssQueryEn struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfRssQueryEn) Len() int { + return SizeofVfRssQueryEn +} + +func DeserializeVfRssQueryEn(b []byte) *VfRssQueryEn { + return (*VfRssQueryEn)(unsafe.Pointer(&b[0:SizeofVfRssQueryEn][0])) +} + +func (msg *VfRssQueryEn) Serialize() []byte { + return (*(*[SizeofVfRssQueryEn]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_trust { +// __u32 vf; +// __u32 setting; +// }; + +type VfTrust struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfTrust) Len() int { + return SizeofVfTrust +} + +func DeserializeVfTrust(b []byte) *VfTrust { + return (*VfTrust)(unsafe.Pointer(&b[0:SizeofVfTrust][0])) +} + +func (msg *VfTrust) Serialize() []byte { + return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_guid { +// __u32 vf; +// __u32 rsvd; +// __u64 guid; +// }; + +type VfGUID struct { + Vf uint32 + Rsvd uint32 + GUID uint64 +} + +func (msg *VfGUID) Len() int { + return SizeofVfGUID +} + +func DeserializeVfGUID(b []byte) *VfGUID { + return (*VfGUID)(unsafe.Pointer(&b[0:SizeofVfGUID][0])) +} + +func (msg *VfGUID) Serialize() []byte { + return (*(*[SizeofVfGUID]byte)(unsafe.Pointer(msg)))[:] +} + +const ( + XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota + XDP_FLAGS_SKB_MODE + XDP_FLAGS_DRV_MODE + XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE +) + +const ( + IFLA_XDP_UNSPEC = iota + IFLA_XDP_FD /* fd of xdp program to attach, or -1 to remove */ + IFLA_XDP_ATTACHED /* read-only bool indicating if prog is attached */ + IFLA_XDP_FLAGS /* xdp prog related flags */ + IFLA_XDP_PROG_ID /* xdp prog id */ + IFLA_XDP_MAX = IFLA_XDP_PROG_ID +) + +// XDP program attach mode (used as dump value for IFLA_XDP_ATTACHED) +const ( + XDP_ATTACHED_NONE = iota + XDP_ATTACHED_DRV + XDP_ATTACHED_SKB + XDP_ATTACHED_HW +) + +const ( + IFLA_IPTUN_UNSPEC = iota + IFLA_IPTUN_LINK + IFLA_IPTUN_LOCAL + IFLA_IPTUN_REMOTE + IFLA_IPTUN_TTL + IFLA_IPTUN_TOS + IFLA_IPTUN_ENCAP_LIMIT + IFLA_IPTUN_FLOWINFO + IFLA_IPTUN_FLAGS + IFLA_IPTUN_PROTO + IFLA_IPTUN_PMTUDISC + IFLA_IPTUN_6RD_PREFIX + IFLA_IPTUN_6RD_RELAY_PREFIX + IFLA_IPTUN_6RD_PREFIXLEN + IFLA_IPTUN_6RD_RELAY_PREFIXLEN + IFLA_IPTUN_ENCAP_TYPE + IFLA_IPTUN_ENCAP_FLAGS + IFLA_IPTUN_ENCAP_SPORT + IFLA_IPTUN_ENCAP_DPORT + IFLA_IPTUN_COLLECT_METADATA + IFLA_IPTUN_MAX = IFLA_IPTUN_COLLECT_METADATA +) + +const ( + IFLA_VTI_UNSPEC = iota + IFLA_VTI_LINK + IFLA_VTI_IKEY + IFLA_VTI_OKEY + IFLA_VTI_LOCAL + IFLA_VTI_REMOTE + IFLA_VTI_MAX = IFLA_VTI_REMOTE +) + +const ( + IFLA_VRF_UNSPEC = iota + IFLA_VRF_TABLE +) + +const ( + IFLA_BR_UNSPEC = iota + IFLA_BR_FORWARD_DELAY + IFLA_BR_HELLO_TIME + IFLA_BR_MAX_AGE + IFLA_BR_AGEING_TIME + IFLA_BR_STP_STATE + IFLA_BR_PRIORITY + IFLA_BR_VLAN_FILTERING + IFLA_BR_VLAN_PROTOCOL + IFLA_BR_GROUP_FWD_MASK + IFLA_BR_ROOT_ID + IFLA_BR_BRIDGE_ID + IFLA_BR_ROOT_PORT + IFLA_BR_ROOT_PATH_COST + IFLA_BR_TOPOLOGY_CHANGE + IFLA_BR_TOPOLOGY_CHANGE_DETECTED + IFLA_BR_HELLO_TIMER + IFLA_BR_TCN_TIMER + IFLA_BR_TOPOLOGY_CHANGE_TIMER + IFLA_BR_GC_TIMER + IFLA_BR_GROUP_ADDR + IFLA_BR_FDB_FLUSH + IFLA_BR_MCAST_ROUTER + IFLA_BR_MCAST_SNOOPING + IFLA_BR_MCAST_QUERY_USE_IFADDR + IFLA_BR_MCAST_QUERIER + IFLA_BR_MCAST_HASH_ELASTICITY + IFLA_BR_MCAST_HASH_MAX + IFLA_BR_MCAST_LAST_MEMBER_CNT + IFLA_BR_MCAST_STARTUP_QUERY_CNT + IFLA_BR_MCAST_LAST_MEMBER_INTVL + IFLA_BR_MCAST_MEMBERSHIP_INTVL + IFLA_BR_MCAST_QUERIER_INTVL + IFLA_BR_MCAST_QUERY_INTVL + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL + IFLA_BR_MCAST_STARTUP_QUERY_INTVL + IFLA_BR_NF_CALL_IPTABLES + IFLA_BR_NF_CALL_IP6TABLES + IFLA_BR_NF_CALL_ARPTABLES + IFLA_BR_VLAN_DEFAULT_PVID + IFLA_BR_PAD + IFLA_BR_VLAN_STATS_ENABLED + IFLA_BR_MCAST_STATS_ENABLED + IFLA_BR_MCAST_IGMP_VERSION + IFLA_BR_MCAST_MLD_VERSION + IFLA_BR_MAX = IFLA_BR_MCAST_MLD_VERSION +) + +const ( + IFLA_GTP_UNSPEC = iota + IFLA_GTP_FD0 + IFLA_GTP_FD1 + IFLA_GTP_PDP_HASHSIZE + IFLA_GTP_ROLE +) + +const ( + GTP_ROLE_GGSN = iota + GTP_ROLE_SGSN +) + +const ( + IFLA_XFRM_UNSPEC = iota + IFLA_XFRM_LINK + IFLA_XFRM_IF_ID + + IFLA_XFRM_MAX = iota - 1 +) + +const ( + IFLA_TUN_UNSPEC = iota + IFLA_TUN_OWNER + IFLA_TUN_GROUP + IFLA_TUN_TYPE + IFLA_TUN_PI + IFLA_TUN_VNET_HDR + IFLA_TUN_PERSIST + IFLA_TUN_MULTI_QUEUE + IFLA_TUN_NUM_QUEUES + IFLA_TUN_NUM_DISABLED_QUEUES + IFLA_TUN_MAX = IFLA_TUN_NUM_DISABLED_QUEUES +) + +const ( + IFLA_IPOIB_UNSPEC = iota + IFLA_IPOIB_PKEY + IFLA_IPOIB_MODE + IFLA_IPOIB_UMCAST + IFLA_IPOIB_MAX = IFLA_IPOIB_UMCAST +) + +const ( + IFLA_CAN_UNSPEC = iota + IFLA_CAN_BITTIMING + IFLA_CAN_BITTIMING_CONST + IFLA_CAN_CLOCK + IFLA_CAN_STATE + IFLA_CAN_CTRLMODE + IFLA_CAN_RESTART_MS + IFLA_CAN_RESTART + IFLA_CAN_BERR_COUNTER + IFLA_CAN_DATA_BITTIMING + IFLA_CAN_DATA_BITTIMING_CONST + IFLA_CAN_TERMINATION + IFLA_CAN_TERMINATION_CONST + IFLA_CAN_BITRATE_CONST + IFLA_CAN_DATA_BITRATE_CONST + IFLA_CAN_BITRATE_MAX + IFLA_CAN_MAX = IFLA_CAN_BITRATE_MAX +) + +const ( + IFLA_BAREUDP_UNSPEC = iota + IFLA_BAREUDP_PORT + IFLA_BAREUDP_ETHERTYPE + IFLA_BAREUDP_SRCPORT_MIN + IFLA_BAREUDP_MULTIPROTO_MODE + IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE +) diff --git a/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go b/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go new file mode 100644 index 000000000..bafd593c4 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/lwt_linux.go @@ -0,0 +1,29 @@ +package nl + +const ( + LWT_BPF_PROG_UNSPEC = iota + LWT_BPF_PROG_FD + LWT_BPF_PROG_NAME + __LWT_BPF_PROG_MAX +) + +const ( + LWT_BPF_PROG_MAX = __LWT_BPF_PROG_MAX - 1 +) + +const ( + LWT_BPF_UNSPEC = iota + LWT_BPF_IN + LWT_BPF_OUT + LWT_BPF_XMIT + LWT_BPF_XMIT_HEADROOM + __LWT_BPF_MAX +) + +const ( + LWT_BPF_MAX = __LWT_BPF_MAX - 1 +) + +const ( + LWT_BPF_MAX_HEADROOM = 256 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go new file mode 100644 index 000000000..3915b7eec --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go @@ -0,0 +1,36 @@ +package nl + +import "encoding/binary" + +const ( + MPLS_LS_LABEL_SHIFT = 12 + MPLS_LS_S_SHIFT = 8 +) + +func EncodeMPLSStack(labels ...int) []byte { + b := make([]byte, 4*len(labels)) + for idx, label := range labels { + l := label << MPLS_LS_LABEL_SHIFT + if idx == len(labels)-1 { + l |= 1 << MPLS_LS_S_SHIFT + } + binary.BigEndian.PutUint32(b[idx*4:], uint32(l)) + } + return b +} + +func DecodeMPLSStack(buf []byte) []int { + if len(buf)%4 != 0 { + return nil + } + stack := make([]int, 0, len(buf)/4) + for len(buf) > 0 { + l := binary.BigEndian.Uint32(buf[:4]) + buf = buf[4:] + stack = append(stack, int(l)>>MPLS_LS_LABEL_SHIFT) + if (l>>MPLS_LS_S_SHIFT)&1 > 0 { + break + } + } + return stack +} diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go new file mode 100644 index 000000000..6cecc4517 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -0,0 +1,1008 @@ +// Package nl has low level primitives for making Netlink calls. +package nl + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "os" + "runtime" + "sync" + "sync/atomic" + "syscall" + "unsafe" + + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const ( + // Family type definitions + FAMILY_ALL = unix.AF_UNSPEC + FAMILY_V4 = unix.AF_INET + FAMILY_V6 = unix.AF_INET6 + FAMILY_MPLS = unix.AF_MPLS + // Arbitrary set value (greater than default 4k) to allow receiving + // from kernel more verbose messages e.g. for statistics, + // tc rules or filters, or other more memory requiring data. + RECEIVE_BUFFER_SIZE = 65536 + // Kernel netlink pid + PidKernel uint32 = 0 + SizeofCnMsgOp = 0x18 +) + +// SupportedNlFamilies contains the list of netlink families this netlink package supports +var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETLINK_NETFILTER} + +var nextSeqNr uint32 + +// Default netlink socket timeout, 60s +var SocketTimeoutTv = unix.Timeval{Sec: 60, Usec: 0} + +// ErrorMessageReporting is the default error message reporting configuration for the new netlink sockets +var EnableErrorMessageReporting bool = false + +// GetIPFamily returns the family type of a net.IP. +func GetIPFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return FAMILY_V4 + } + if ip.To4() != nil { + return FAMILY_V4 + } + return FAMILY_V6 +} + +var nativeEndian binary.ByteOrder + +// NativeEndian gets native endianness for the system +func NativeEndian() binary.ByteOrder { + if nativeEndian == nil { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + } + return nativeEndian +} + +// Byte swap a 16 bit value if we aren't big endian +func Swap16(i uint16) uint16 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff00)>>8 | (i&0xff)<<8 +} + +// Byte swap a 32 bit value if aren't big endian +func Swap32(i uint32) uint32 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 +} + +const ( + NLMSGERR_ATTR_UNUSED = 0 + NLMSGERR_ATTR_MSG = 1 + NLMSGERR_ATTR_OFFS = 2 + NLMSGERR_ATTR_COOKIE = 3 + NLMSGERR_ATTR_POLICY = 4 +) + +type NetlinkRequestData interface { + Len() int + Serialize() []byte +} + +const ( + PROC_CN_MCAST_LISTEN = 1 + PROC_CN_MCAST_IGNORE +) + +type CbID struct { + Idx uint32 + Val uint32 +} + +type CnMsg struct { + ID CbID + Seq uint32 + Ack uint32 + Length uint16 + Flags uint16 +} + +type CnMsgOp struct { + CnMsg + // here we differ from the C header + Op uint32 +} + +func NewCnMsg(idx, val, op uint32) *CnMsgOp { + var cm CnMsgOp + + cm.ID.Idx = idx + cm.ID.Val = val + + cm.Ack = 0 + cm.Seq = 1 + cm.Length = uint16(binary.Size(op)) + cm.Op = op + + return &cm +} + +func (msg *CnMsgOp) Serialize() []byte { + return (*(*[SizeofCnMsgOp]byte)(unsafe.Pointer(msg)))[:] +} + +func DeserializeCnMsgOp(b []byte) *CnMsgOp { + return (*CnMsgOp)(unsafe.Pointer(&b[0:SizeofCnMsgOp][0])) +} + +func (msg *CnMsgOp) Len() int { + return SizeofCnMsgOp +} + +// IfInfomsg is related to links, but it is used for list requests as well +type IfInfomsg struct { + unix.IfInfomsg +} + +// Create an IfInfomsg with family specified +func NewIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: unix.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func DeserializeIfInfomsg(b []byte) *IfInfomsg { + return (*IfInfomsg)(unsafe.Pointer(&b[0:unix.SizeofIfInfomsg][0])) +} + +func (msg *IfInfomsg) Serialize() []byte { + return (*(*[unix.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfInfomsg) Len() int { + return unix.SizeofIfInfomsg +} + +func (msg *IfInfomsg) EncapType() string { + switch msg.Type { + case 0: + return "generic" + case unix.ARPHRD_ETHER: + return "ether" + case unix.ARPHRD_EETHER: + return "eether" + case unix.ARPHRD_AX25: + return "ax25" + case unix.ARPHRD_PRONET: + return "pronet" + case unix.ARPHRD_CHAOS: + return "chaos" + case unix.ARPHRD_IEEE802: + return "ieee802" + case unix.ARPHRD_ARCNET: + return "arcnet" + case unix.ARPHRD_APPLETLK: + return "atalk" + case unix.ARPHRD_DLCI: + return "dlci" + case unix.ARPHRD_ATM: + return "atm" + case unix.ARPHRD_METRICOM: + return "metricom" + case unix.ARPHRD_IEEE1394: + return "ieee1394" + case unix.ARPHRD_INFINIBAND: + return "infiniband" + case unix.ARPHRD_SLIP: + return "slip" + case unix.ARPHRD_CSLIP: + return "cslip" + case unix.ARPHRD_SLIP6: + return "slip6" + case unix.ARPHRD_CSLIP6: + return "cslip6" + case unix.ARPHRD_RSRVD: + return "rsrvd" + case unix.ARPHRD_ADAPT: + return "adapt" + case unix.ARPHRD_ROSE: + return "rose" + case unix.ARPHRD_X25: + return "x25" + case unix.ARPHRD_HWX25: + return "hwx25" + case unix.ARPHRD_PPP: + return "ppp" + case unix.ARPHRD_HDLC: + return "hdlc" + case unix.ARPHRD_LAPB: + return "lapb" + case unix.ARPHRD_DDCMP: + return "ddcmp" + case unix.ARPHRD_RAWHDLC: + return "rawhdlc" + case unix.ARPHRD_TUNNEL: + return "ipip" + case unix.ARPHRD_TUNNEL6: + return "tunnel6" + case unix.ARPHRD_FRAD: + return "frad" + case unix.ARPHRD_SKIP: + return "skip" + case unix.ARPHRD_LOOPBACK: + return "loopback" + case unix.ARPHRD_LOCALTLK: + return "ltalk" + case unix.ARPHRD_FDDI: + return "fddi" + case unix.ARPHRD_BIF: + return "bif" + case unix.ARPHRD_SIT: + return "sit" + case unix.ARPHRD_IPDDP: + return "ip/ddp" + case unix.ARPHRD_IPGRE: + return "gre" + case unix.ARPHRD_PIMREG: + return "pimreg" + case unix.ARPHRD_HIPPI: + return "hippi" + case unix.ARPHRD_ASH: + return "ash" + case unix.ARPHRD_ECONET: + return "econet" + case unix.ARPHRD_IRDA: + return "irda" + case unix.ARPHRD_FCPP: + return "fcpp" + case unix.ARPHRD_FCAL: + return "fcal" + case unix.ARPHRD_FCPL: + return "fcpl" + case unix.ARPHRD_FCFABRIC: + return "fcfb0" + case unix.ARPHRD_FCFABRIC + 1: + return "fcfb1" + case unix.ARPHRD_FCFABRIC + 2: + return "fcfb2" + case unix.ARPHRD_FCFABRIC + 3: + return "fcfb3" + case unix.ARPHRD_FCFABRIC + 4: + return "fcfb4" + case unix.ARPHRD_FCFABRIC + 5: + return "fcfb5" + case unix.ARPHRD_FCFABRIC + 6: + return "fcfb6" + case unix.ARPHRD_FCFABRIC + 7: + return "fcfb7" + case unix.ARPHRD_FCFABRIC + 8: + return "fcfb8" + case unix.ARPHRD_FCFABRIC + 9: + return "fcfb9" + case unix.ARPHRD_FCFABRIC + 10: + return "fcfb10" + case unix.ARPHRD_FCFABRIC + 11: + return "fcfb11" + case unix.ARPHRD_FCFABRIC + 12: + return "fcfb12" + case unix.ARPHRD_IEEE802_TR: + return "tr" + case unix.ARPHRD_IEEE80211: + return "ieee802.11" + case unix.ARPHRD_IEEE80211_PRISM: + return "ieee802.11/prism" + case unix.ARPHRD_IEEE80211_RADIOTAP: + return "ieee802.11/radiotap" + case unix.ARPHRD_IEEE802154: + return "ieee802.15.4" + + case 65534: + return "none" + case 65535: + return "void" + } + return fmt.Sprintf("unknown%d", msg.Type) +} + +// Round the length of a netlink message up to align it properly. +// Taken from syscall/netlink_linux.go by The Go Authors under BSD-style license. +func nlmAlignOf(msglen int) int { + return (msglen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) +} + +func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := NewIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +type Uint32Bitfield struct { + Value uint32 + Selector uint32 +} + +func (a *Uint32Bitfield) Serialize() []byte { + return (*(*[SizeofUint32Bitfield]byte)(unsafe.Pointer(a)))[:] +} + +func DeserializeUint32Bitfield(data []byte) *Uint32Bitfield { + return (*Uint32Bitfield)(unsafe.Pointer(&data[0:SizeofUint32Bitfield][0])) +} + +type Uint32Attribute struct { + Type uint16 + Value uint32 +} + +func (a *Uint32Attribute) Serialize() []byte { + native := NativeEndian() + buf := make([]byte, rtaAlignOf(8)) + native.PutUint16(buf[0:2], 8) + native.PutUint16(buf[2:4], a.Type) + + if a.Type&NLA_F_NET_BYTEORDER != 0 { + binary.BigEndian.PutUint32(buf[4:], a.Value) + } else { + native.PutUint32(buf[4:], a.Value) + } + return buf +} + +func (a *Uint32Attribute) Len() int { + return 8 +} + +// Extend RtAttr to handle data and children +type RtAttr struct { + unix.RtAttr + Data []byte + children []NetlinkRequestData +} + +// Create a new Extended RtAttr object +func NewRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: unix.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +// NewRtAttrChild adds an RtAttr as a child to the parent and returns the new attribute +// +// Deprecated: Use AddRtAttr() on the parent object +func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + return parent.AddRtAttr(attrType, data) +} + +// AddRtAttr adds an RtAttr as a child and returns the new attribute +func (a *RtAttr) AddRtAttr(attrType int, data []byte) *RtAttr { + attr := NewRtAttr(attrType, data) + a.children = append(a.children, attr) + return attr +} + +// AddChild adds an existing NetlinkRequestData as a child. +func (a *RtAttr) AddChild(attr NetlinkRequestData) { + a.children = append(a.children, attr) +} + +func (a *RtAttr) Len() int { + if len(a.children) == 0 { + return (unix.SizeofRtAttr + len(a.Data)) + } + + l := 0 + for _, child := range a.children { + l += rtaAlignOf(child.Len()) + } + l += unix.SizeofRtAttr + return rtaAlignOf(l + len(a.Data)) +} + +// Serialize the RtAttr into a byte array +// This can't just unsafe.cast because it must iterate through children. +func (a *RtAttr) Serialize() []byte { + native := NativeEndian() + + length := a.Len() + buf := make([]byte, rtaAlignOf(length)) + + next := 4 + if a.Data != nil { + copy(buf[next:], a.Data) + next += rtaAlignOf(len(a.Data)) + } + if len(a.children) > 0 { + for _, child := range a.children { + childBuf := child.Serialize() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(length); l != 0 { + native.PutUint16(buf[0:2], l) + } + native.PutUint16(buf[2:4], a.Type) + return buf +} + +type NetlinkRequest struct { + unix.NlMsghdr + Data []NetlinkRequestData + RawData []byte + Sockets map[int]*SocketHandle +} + +// Serialize the Netlink Request into a byte array +func (req *NetlinkRequest) Serialize() []byte { + length := unix.SizeofNlMsghdr + dataBytes := make([][]byte, len(req.Data)) + for i, data := range req.Data { + dataBytes[i] = data.Serialize() + length = length + len(dataBytes[i]) + } + length += len(req.RawData) + + req.Len = uint32(length) + b := make([]byte, length) + hdr := (*(*[unix.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := unix.SizeofNlMsghdr + copy(b[0:next], hdr) + for _, data := range dataBytes { + for _, dataByte := range data { + b[next] = dataByte + next = next + 1 + } + } + // Add the raw data if any + if len(req.RawData) > 0 { + copy(b[next:length], req.RawData) + } + return b +} + +func (req *NetlinkRequest) AddData(data NetlinkRequestData) { + req.Data = append(req.Data, data) +} + +// AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization +func (req *NetlinkRequest) AddRawData(data []byte) { + req.RawData = append(req.RawData, data...) +} + +// Execute the request against the given sockType. +// Returns a list of netlink messages in serialized format, optionally filtered +// by resType. +func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { + var res [][]byte + err := req.ExecuteIter(sockType, resType, func(msg []byte) bool { + res = append(res, msg) + return true + }) + if err != nil { + return nil, err + } + return res, nil +} + +// ExecuteIter executes the request against the given sockType. +// Calls the provided callback func once for each netlink message. +// If the callback returns false, it is not called again, but +// the remaining messages are consumed/discarded. +// +// Thread safety: ExecuteIter holds a lock on the socket until +// it finishes iteration so the callback must not call back into +// the netlink API. +func (req *NetlinkRequest) ExecuteIter(sockType int, resType uint16, f func(msg []byte) bool) error { + var ( + s *NetlinkSocket + err error + ) + + if req.Sockets != nil { + if sh, ok := req.Sockets[sockType]; ok { + s = sh.Socket + req.Seq = atomic.AddUint32(&sh.Seq, 1) + } + } + sharedSocket := s != nil + + if s == nil { + s, err = getNetlinkSocket(sockType) + if err != nil { + return err + } + + if err := s.SetSendTimeout(&SocketTimeoutTv); err != nil { + return err + } + if err := s.SetReceiveTimeout(&SocketTimeoutTv); err != nil { + return err + } + if EnableErrorMessageReporting { + if err := s.SetExtAck(true); err != nil { + return err + } + } + + defer s.Close() + } else { + s.Lock() + defer s.Unlock() + } + + if err := s.Send(req); err != nil { + return err + } + + pid, err := s.GetPid() + if err != nil { + return err + } + +done: + for { + msgs, from, err := s.Receive() + if err != nil { + return err + } + if from.Pid != PidKernel { + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, PidKernel) + } + for _, m := range msgs { + if m.Header.Seq != req.Seq { + if sharedSocket { + continue + } + return fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) + } + if m.Header.Pid != pid { + continue + } + + if m.Header.Flags&unix.NLM_F_DUMP_INTR != 0 { + return syscall.Errno(unix.EINTR) + } + + if m.Header.Type == unix.NLMSG_DONE || m.Header.Type == unix.NLMSG_ERROR { + // NLMSG_DONE might have no payload, if so assume no error. + if m.Header.Type == unix.NLMSG_DONE && len(m.Data) == 0 { + break done + } + + native := NativeEndian() + errno := int32(native.Uint32(m.Data[0:4])) + if errno == 0 { + break done + } + var err error + err = syscall.Errno(-errno) + + unreadData := m.Data[4:] + if m.Header.Flags&unix.NLM_F_ACK_TLVS != 0 && len(unreadData) > syscall.SizeofNlMsghdr { + // Skip the echoed request message. + echoReqH := (*syscall.NlMsghdr)(unsafe.Pointer(&unreadData[0])) + unreadData = unreadData[nlmAlignOf(int(echoReqH.Len)):] + + // Annotate `err` using nlmsgerr attributes. + for len(unreadData) >= syscall.SizeofRtAttr { + attr := (*syscall.RtAttr)(unsafe.Pointer(&unreadData[0])) + attrData := unreadData[syscall.SizeofRtAttr:attr.Len] + + switch attr.Type { + case NLMSGERR_ATTR_MSG: + err = fmt.Errorf("%w: %s", err, unix.ByteSliceToString(attrData)) + default: + // TODO: handle other NLMSGERR_ATTR types + } + + unreadData = unreadData[rtaAlignOf(int(attr.Len)):] + } + } + + return err + } + if resType != 0 && m.Header.Type != resType { + continue + } + if cont := f(m.Data); !cont { + // Drain the rest of the messages from the kernel but don't + // pass them to the iterator func. + f = dummyMsgIterFunc + } + if m.Header.Flags&unix.NLM_F_MULTI == 0 { + break done + } + } + } + return nil +} + +func dummyMsgIterFunc(msg []byte) bool { + return true +} + +// Create a new netlink request from proto and flags +// Note the Len value will be inaccurate once data is added until +// the message is serialized +func NewNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: unix.NlMsghdr{ + Len: uint32(unix.SizeofNlMsghdr), + Type: uint16(proto), + Flags: unix.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int32 + file *os.File + lsa unix.SockaddrNetlink + sync.Mutex +} + +func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) + if err != nil { + return nil, err + } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), + } + s.lsa.Family = unix.AF_NETLINK + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) + return nil, err + } + + return s, nil +} + +// GetNetlinkSocketAt opens a netlink socket in the network namespace newNs +// and positions the thread back into the network namespace specified by curNs, +// when done. If curNs is close, the function derives the current namespace and +// moves back into it when done. If newNs is close, the socket will be opened +// in the current network namespace. +func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) { + c, err := executeInNetns(newNs, curNs) + if err != nil { + return nil, err + } + defer c() + return getNetlinkSocket(protocol) +} + +// executeInNetns sets execution of the code following this call to the +// network namespace newNs, then moves the thread back to curNs if open, +// otherwise to the current netns at the time the function was invoked +// In case of success, the caller is expected to execute the returned function +// at the end of the code that needs to be executed in the network namespace. +// Example: +// +// func jobAt(...) error { +// d, err := executeInNetns(...) +// if err != nil { return err} +// defer d() +// < code which needs to be executed in specific netns> +// } +// +// TODO: his function probably belongs to netns pkg. +func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { + var ( + err error + moveBack func(netns.NsHandle) error + closeNs func() error + unlockThd func() + ) + restore := func() { + // order matters + if moveBack != nil { + moveBack(curNs) + } + if closeNs != nil { + closeNs() + } + if unlockThd != nil { + unlockThd() + } + } + if newNs.IsOpen() { + runtime.LockOSThread() + unlockThd = runtime.UnlockOSThread + if !curNs.IsOpen() { + if curNs, err = netns.Get(); err != nil { + restore() + return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err) + } + closeNs = curNs.Close + } + if err := netns.Set(newNs); err != nil { + restore() + return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err) + } + moveBack = netns.Set + } + return restore, nil +} + +// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE) +// and subscribe it to multicast groups passed in variable argument list. +// Returns the netlink socket on which Receive() method can be called +// to retrieve the messages from the kernel. +func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) + if err != nil { + return nil, err + } + err = unix.SetNonblock(fd, true) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: int32(fd), + file: os.NewFile(uintptr(fd), "netlink"), + } + s.lsa.Family = unix.AF_NETLINK + + for _, g := range groups { + s.lsa.Groups |= (1 << (g - 1)) + } + + if err := unix.Bind(fd, &s.lsa); err != nil { + unix.Close(fd) + return nil, err + } + + return s, nil +} + +// SubscribeAt works like Subscribe plus let's the caller choose the network +// namespace in which the socket would be opened (newNs). Then control goes back +// to curNs if open, otherwise to the netns at the time this function was called. +func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*NetlinkSocket, error) { + c, err := executeInNetns(newNs, curNs) + if err != nil { + return nil, err + } + defer c() + return Subscribe(protocol, groups...) +} + +func (s *NetlinkSocket) Close() { + s.file.Close() +} + +func (s *NetlinkSocket) GetFd() int { + return int(s.fd) +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + return unix.Sendto(int(s.fd), request.Serialize(), 0, &s.lsa) +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, *unix.SockaddrNetlink, error) { + rawConn, err := s.file.SyscallConn() + if err != nil { + return nil, nil, err + } + var ( + fromAddr *unix.SockaddrNetlink + rb [RECEIVE_BUFFER_SIZE]byte + nr int + from unix.Sockaddr + innerErr error + ) + err = rawConn.Read(func(fd uintptr) (done bool) { + nr, from, innerErr = unix.Recvfrom(int(fd), rb[:], 0) + return innerErr != unix.EWOULDBLOCK + }) + if innerErr != nil { + err = innerErr + } + if err != nil { + return nil, nil, err + } + fromAddr, ok := from.(*unix.SockaddrNetlink) + if !ok { + return nil, nil, fmt.Errorf("Error converting to netlink sockaddr") + } + if nr < unix.NLMSG_HDRLEN { + return nil, nil, fmt.Errorf("Got short response from netlink") + } + msgLen := nlmAlignOf(nr) + rb2 := make([]byte, msgLen) + copy(rb2, rb[:msgLen]) + nl, err := syscall.ParseNetlinkMessage(rb2) + if err != nil { + return nil, nil, err + } + return nl, fromAddr, nil +} + +// SetSendTimeout allows to set a send timeout on the socket +func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { + // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine + // remains stuck on a send on a closed fd + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) +} + +// SetReceiveTimeout allows to set a receive timeout on the socket +func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { + // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine + // remains stuck on a recvmsg on a closed fd + return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) +} + +// SetReceiveBufferSize allows to set a receive buffer size on the socket +func (s *NetlinkSocket) SetReceiveBufferSize(size int, force bool) error { + opt := unix.SO_RCVBUF + if force { + opt = unix.SO_RCVBUFFORCE + } + return unix.SetsockoptInt(int(s.fd), unix.SOL_SOCKET, opt, size) +} + +// SetExtAck requests error messages to be reported on the socket +func (s *NetlinkSocket) SetExtAck(enable bool) error { + var enableN int + if enable { + enableN = 1 + } + + return unix.SetsockoptInt(int(s.fd), unix.SOL_NETLINK, unix.NETLINK_EXT_ACK, enableN) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + lsa, err := unix.Getsockname(int(s.fd)) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *unix.SockaddrNetlink: + return v.Pid, nil + } + return 0, fmt.Errorf("Wrong socket type") +} + +func ZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)+1) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + bytes[len(s)] = 0 + return bytes +} + +func NonZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + return bytes +} + +func BytesToString(b []byte) string { + n := bytes.Index(b, []byte{0}) + return string(b[:n]) +} + +func Uint8Attr(v uint8) []byte { + return []byte{byte(v)} +} + +func Uint16Attr(v uint16) []byte { + native := NativeEndian() + bytes := make([]byte, 2) + native.PutUint16(bytes, v) + return bytes +} + +func BEUint16Attr(v uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, v) + return bytes +} + +func Uint32Attr(v uint32) []byte { + native := NativeEndian() + bytes := make([]byte, 4) + native.PutUint32(bytes, v) + return bytes +} + +func BEUint32Attr(v uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, v) + return bytes +} + +func Uint64Attr(v uint64) []byte { + native := NativeEndian() + bytes := make([]byte, 8) + native.PutUint64(bytes, v) + return bytes +} + +func BEUint64Attr(v uint64) []byte { + bytes := make([]byte, 8) + binary.BigEndian.PutUint64(bytes, v) + return bytes +} + +func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { + var attrs []syscall.NetlinkRouteAttr + for len(b) >= unix.SizeofRtAttr { + a, vbuf, alen, err := netlinkRouteAttrAndValue(b) + if err != nil { + return nil, err + } + ra := syscall.NetlinkRouteAttr{Attr: syscall.RtAttr(*a), Value: vbuf[:int(a.Len)-unix.SizeofRtAttr]} + attrs = append(attrs, ra) + b = b[alen:] + } + return attrs, nil +} + +// ParseRouteAttrAsMap parses provided buffer that contains raw RtAttrs and returns a map of parsed +// atttributes indexed by attribute type or error if occured. +func ParseRouteAttrAsMap(b []byte) (map[uint16]syscall.NetlinkRouteAttr, error) { + attrMap := make(map[uint16]syscall.NetlinkRouteAttr) + + attrs, err := ParseRouteAttr(b) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + attrMap[attr.Attr.Type] = attr + } + return attrMap, nil +} + +func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { + a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, unix.EINVAL + } + return a, b[unix.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil +} + +// SocketHandle contains the netlink socket and the associated +// sequence counter for a specific netlink family +type SocketHandle struct { + Seq uint32 + Socket *NetlinkSocket +} + +// Close closes the netlink socket +func (sh *SocketHandle) Close() { + if sh.Socket != nil { + sh.Socket.Close() + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go new file mode 100644 index 000000000..dfc0be660 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go @@ -0,0 +1,11 @@ +// +build !linux + +package nl + +import "encoding/binary" + +var SupportedNlFamilies = []int{} + +func NativeEndian() binary.ByteOrder { + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go new file mode 100644 index 000000000..7f49125cf --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go @@ -0,0 +1,79 @@ +package nl + +import ( + "encoding/binary" + "fmt" + "log" +) + +type Attribute struct { + Type uint16 + Value []byte +} + +func ParseAttributes(data []byte) <-chan Attribute { + native := NativeEndian() + result := make(chan Attribute) + + go func() { + i := 0 + for i+4 < len(data) { + length := int(native.Uint16(data[i : i+2])) + attrType := native.Uint16(data[i+2 : i+4]) + + if length < 4 { + log.Printf("attribute 0x%02x has invalid length of %d bytes", attrType, length) + break + } + + if len(data) < i+length { + log.Printf("attribute 0x%02x of length %d is truncated, only %d bytes remaining", attrType, length, len(data)-i) + break + } + + result <- Attribute{ + Type: attrType, + Value: data[i+4 : i+length], + } + i += rtaAlignOf(length) + } + close(result) + }() + + return result +} + +func PrintAttributes(data []byte) { + printAttributes(data, 0) +} + +func printAttributes(data []byte, level int) { + for attr := range ParseAttributes(data) { + for i := 0; i < level; i++ { + print("> ") + } + nested := attr.Type&NLA_F_NESTED != 0 + fmt.Printf("type=%d nested=%v len=%v %v\n", attr.Type&NLA_TYPE_MASK, nested, len(attr.Value), attr.Value) + if nested { + printAttributes(attr.Value, level+1) + } + } +} + +// Uint32 returns the uint32 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint32() uint32 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint32(attr.Value) + } else { + return NativeEndian().Uint32(attr.Value) + } +} + +// Uint64 returns the uint64 value respecting the NET_BYTEORDER flag +func (attr *Attribute) Uint64() uint64 { + if attr.Type&NLA_F_NET_BYTEORDER != 0 { + return binary.BigEndian.Uint64(attr.Value) + } else { + return NativeEndian().Uint64(attr.Value) + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go new file mode 100644 index 000000000..ce43ee155 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -0,0 +1,39 @@ +package nl + +const ( + RDMA_NL_GET_CLIENT_SHIFT = 10 +) + +const ( + RDMA_NL_NLDEV = 5 +) + +const ( + RDMA_NLDEV_CMD_GET = 1 + RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_NEWLINK = 3 + RDMA_NLDEV_CMD_DELLINK = 4 + RDMA_NLDEV_CMD_SYS_GET = 6 + RDMA_NLDEV_CMD_SYS_SET = 7 +) + +const ( + RDMA_NLDEV_ATTR_DEV_INDEX = 1 + RDMA_NLDEV_ATTR_DEV_NAME = 2 + RDMA_NLDEV_ATTR_PORT_INDEX = 3 + RDMA_NLDEV_ATTR_CAP_FLAGS = 4 + RDMA_NLDEV_ATTR_FW_VERSION = 5 + RDMA_NLDEV_ATTR_NODE_GUID = 6 + RDMA_NLDEV_ATTR_SYS_IMAGE_GUID = 7 + RDMA_NLDEV_ATTR_SUBNET_PREFIX = 8 + RDMA_NLDEV_ATTR_LID = 9 + RDMA_NLDEV_ATTR_SM_LID = 10 + RDMA_NLDEV_ATTR_LMC = 11 + RDMA_NLDEV_ATTR_PORT_STATE = 12 + RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 + RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_ATTR_NDEV_NAME = 51 + RDMA_NLDEV_ATTR_LINK_TYPE = 65 + RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 + RDMA_NLDEV_NET_NS_FD = 68 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go new file mode 100644 index 000000000..c26f3bf91 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -0,0 +1,109 @@ +package nl + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +type RtMsg struct { + unix.RtMsg +} + +func NewRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_UNIVERSE, + Protocol: unix.RTPROT_BOOT, + Type: unix.RTN_UNICAST, + }, + } +} + +func NewRtDelMsg() *RtMsg { + return &RtMsg{ + RtMsg: unix.RtMsg{ + Table: unix.RT_TABLE_MAIN, + Scope: unix.RT_SCOPE_NOWHERE, + }, + } +} + +func (msg *RtMsg) Len() int { + return unix.SizeofRtMsg +} + +func DeserializeRtMsg(b []byte) *RtMsg { + return (*RtMsg)(unsafe.Pointer(&b[0:unix.SizeofRtMsg][0])) +} + +func (msg *RtMsg) Serialize() []byte { + return (*(*[unix.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] +} + +type RtNexthop struct { + unix.RtNexthop + Children []NetlinkRequestData +} + +func DeserializeRtNexthop(b []byte) *RtNexthop { + return &RtNexthop{ + RtNexthop: *((*unix.RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))), + } +} + +func (msg *RtNexthop) Len() int { + if len(msg.Children) == 0 { + return unix.SizeofRtNexthop + } + + l := 0 + for _, child := range msg.Children { + l += rtaAlignOf(child.Len()) + } + l += unix.SizeofRtNexthop + return rtaAlignOf(l) +} + +func (msg *RtNexthop) Serialize() []byte { + length := msg.Len() + msg.RtNexthop.Len = uint16(length) + buf := make([]byte, length) + copy(buf, (*(*[unix.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(unix.SizeofRtNexthop) + if len(msg.Children) > 0 { + for _, child := range msg.Children { + childBuf := child.Serialize() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + return buf +} + +type RtGenMsg struct { + unix.RtGenmsg +} + +func NewRtGenMsg() *RtGenMsg { + return &RtGenMsg{ + RtGenmsg: unix.RtGenmsg{ + Family: unix.AF_UNSPEC, + }, + } +} + +func (msg *RtGenMsg) Len() int { + return rtaAlignOf(unix.SizeofRtGenmsg) +} + +func DeserializeRtGenMsg(b []byte) *RtGenMsg { + return &RtGenMsg{RtGenmsg: unix.RtGenmsg{Family: b[0]}} +} + +func (msg *RtGenMsg) Serialize() []byte { + out := make([]byte, msg.Len()) + out[0] = msg.Family + return out +} diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go new file mode 100644 index 000000000..fe88285f2 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go @@ -0,0 +1,154 @@ +package nl + +import ( + "errors" + "fmt" + "net" +) + +type IPv6SrHdr struct { + nextHdr uint8 + hdrLen uint8 + routingType uint8 + segmentsLeft uint8 + firstSegment uint8 + flags uint8 + reserved uint16 + + Segments []net.IP +} + +func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { + if len(s1.Segments) != len(s2.Segments) { + return false + } + for i := range s1.Segments { + if !s1.Segments[i].Equal(s2.Segments[i]) { + return false + } + } + return s1.nextHdr == s2.nextHdr && + s1.hdrLen == s2.hdrLen && + s1.routingType == s2.routingType && + s1.segmentsLeft == s2.segmentsLeft && + s1.firstSegment == s2.firstSegment && + s1.flags == s2.flags + // reserved doesn't need to be identical. +} + +// seg6 encap mode +const ( + SEG6_IPTUN_MODE_INLINE = iota + SEG6_IPTUN_MODE_ENCAP +) + +// number of nested RTATTR +// from include/uapi/linux/seg6_iptunnel.h +const ( + SEG6_IPTUNNEL_UNSPEC = iota + SEG6_IPTUNNEL_SRH + __SEG6_IPTUNNEL_MAX +) +const ( + SEG6_IPTUNNEL_MAX = __SEG6_IPTUNNEL_MAX - 1 +) + +func EncodeSEG6Encap(mode int, segments []net.IP) ([]byte, error) { + nsegs := len(segments) // nsegs: number of segments + if nsegs == 0 { + return nil, errors.New("EncodeSEG6Encap: No Segment in srh") + } + b := make([]byte, 12, 12+len(segments)*16) + native := NativeEndian() + native.PutUint32(b, uint32(mode)) + b[4] = 0 // srh.nextHdr (0 when calling netlink) + b[5] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) + b[6] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) + b[7] = uint8(nsegs - 1) // srh.segmentsLeft + b[8] = uint8(nsegs - 1) // srh.firstSegment + b[9] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) + // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 + native.PutUint16(b[10:], 0) // srh.reserved + for _, netIP := range segments { + b = append(b, netIP...) // srh.Segments + } + return b, nil +} + +func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { + native := NativeEndian() + mode := int(native.Uint32(buf)) + srh := IPv6SrHdr{ + nextHdr: buf[4], + hdrLen: buf[5], + routingType: buf[6], + segmentsLeft: buf[7], + firstSegment: buf[8], + flags: buf[9], + reserved: native.Uint16(buf[10:12]), + } + buf = buf[12:] + if len(buf)%16 != 0 { + err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)", len(buf)) + return mode, nil, err + } + for len(buf) > 0 { + srh.Segments = append(srh.Segments, net.IP(buf[:16])) + buf = buf[16:] + } + return mode, srh.Segments, nil +} + +func DecodeSEG6Srh(buf []byte) ([]net.IP, error) { + native := NativeEndian() + srh := IPv6SrHdr{ + nextHdr: buf[0], + hdrLen: buf[1], + routingType: buf[2], + segmentsLeft: buf[3], + firstSegment: buf[4], + flags: buf[5], + reserved: native.Uint16(buf[6:8]), + } + buf = buf[8:] + if len(buf)%16 != 0 { + err := fmt.Errorf("DecodeSEG6Srh: error parsing Segment List (buf len: %d)", len(buf)) + return nil, err + } + for len(buf) > 0 { + srh.Segments = append(srh.Segments, net.IP(buf[:16])) + buf = buf[16:] + } + return srh.Segments, nil +} +func EncodeSEG6Srh(segments []net.IP) ([]byte, error) { + nsegs := len(segments) // nsegs: number of segments + if nsegs == 0 { + return nil, errors.New("EncodeSEG6Srh: No Segments") + } + b := make([]byte, 8, 8+len(segments)*16) + native := NativeEndian() + b[0] = 0 // srh.nextHdr (0 when calling netlink) + b[1] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) + b[2] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) + b[3] = uint8(nsegs - 1) // srh.segmentsLeft + b[4] = uint8(nsegs - 1) // srh.firstSegment + b[5] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) + // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 + native.PutUint16(b[6:], 0) // srh.reserved + for _, netIP := range segments { + b = append(b, netIP...) // srh.Segments + } + return b, nil +} + +// Helper functions +func SEG6EncapModeString(mode int) string { + switch mode { + case SEG6_IPTUN_MODE_INLINE: + return "inline" + case SEG6_IPTUN_MODE_ENCAP: + return "encap" + } + return "unknown" +} diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go new file mode 100644 index 000000000..8172b8471 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -0,0 +1,80 @@ +package nl + +import () + +// seg6local parameters +const ( + SEG6_LOCAL_UNSPEC = iota + SEG6_LOCAL_ACTION + SEG6_LOCAL_SRH + SEG6_LOCAL_TABLE + SEG6_LOCAL_NH4 + SEG6_LOCAL_NH6 + SEG6_LOCAL_IIF + SEG6_LOCAL_OIF + SEG6_LOCAL_BPF + __SEG6_LOCAL_MAX +) +const ( + SEG6_LOCAL_MAX = __SEG6_LOCAL_MAX +) + +// seg6local actions +const ( + SEG6_LOCAL_ACTION_END = iota + 1 // 1 + SEG6_LOCAL_ACTION_END_X // 2 + SEG6_LOCAL_ACTION_END_T // 3 + SEG6_LOCAL_ACTION_END_DX2 // 4 + SEG6_LOCAL_ACTION_END_DX6 // 5 + SEG6_LOCAL_ACTION_END_DX4 // 6 + SEG6_LOCAL_ACTION_END_DT6 // 7 + SEG6_LOCAL_ACTION_END_DT4 // 8 + SEG6_LOCAL_ACTION_END_B6 // 9 + SEG6_LOCAL_ACTION_END_B6_ENCAPS // 10 + SEG6_LOCAL_ACTION_END_BM // 11 + SEG6_LOCAL_ACTION_END_S // 12 + SEG6_LOCAL_ACTION_END_AS // 13 + SEG6_LOCAL_ACTION_END_AM // 14 + SEG6_LOCAL_ACTION_END_BPF // 15 + __SEG6_LOCAL_ACTION_MAX +) +const ( + SEG6_LOCAL_ACTION_MAX = __SEG6_LOCAL_ACTION_MAX - 1 +) + +// Helper functions +func SEG6LocalActionString(action int) string { + switch action { + case SEG6_LOCAL_ACTION_END: + return "End" + case SEG6_LOCAL_ACTION_END_X: + return "End.X" + case SEG6_LOCAL_ACTION_END_T: + return "End.T" + case SEG6_LOCAL_ACTION_END_DX2: + return "End.DX2" + case SEG6_LOCAL_ACTION_END_DX6: + return "End.DX6" + case SEG6_LOCAL_ACTION_END_DX4: + return "End.DX4" + case SEG6_LOCAL_ACTION_END_DT6: + return "End.DT6" + case SEG6_LOCAL_ACTION_END_DT4: + return "End.DT4" + case SEG6_LOCAL_ACTION_END_B6: + return "End.B6" + case SEG6_LOCAL_ACTION_END_B6_ENCAPS: + return "End.B6.Encaps" + case SEG6_LOCAL_ACTION_END_BM: + return "End.BM" + case SEG6_LOCAL_ACTION_END_S: + return "End.S" + case SEG6_LOCAL_ACTION_END_AS: + return "End.AS" + case SEG6_LOCAL_ACTION_END_AM: + return "End.AM" + case SEG6_LOCAL_ACTION_END_BPF: + return "End.BPF" + } + return "unknown" +} diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go new file mode 100644 index 000000000..b5ba039ac --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -0,0 +1,77 @@ +package nl + +// syscall package lack of rule attributes type. +// Thus there are defined below +const ( + FRA_UNSPEC = iota + FRA_DST /* destination address */ + FRA_SRC /* source address */ + FRA_IIFNAME /* interface name */ + FRA_GOTO /* target to jump to (FR_ACT_GOTO) */ + FRA_UNUSED2 + FRA_PRIORITY /* priority/preference */ + FRA_UNUSED3 + FRA_UNUSED4 + FRA_UNUSED5 + FRA_FWMARK /* mark */ + FRA_FLOW /* flow/class id */ + FRA_TUN_ID + FRA_SUPPRESS_IFGROUP + FRA_SUPPRESS_PREFIXLEN + FRA_TABLE /* Extended table id */ + FRA_FWMASK /* mask for netfilter mark */ + FRA_OIFNAME + FRA_PAD + FRA_L3MDEV /* iif or oif is l3mdev goto its table */ + FRA_UID_RANGE /* UID range */ + FRA_PROTOCOL /* Originator of the rule */ + FRA_IP_PROTO /* ip proto */ + FRA_SPORT_RANGE /* sport */ + FRA_DPORT_RANGE /* dport */ +) + +// ip rule netlink request types +const ( + FR_ACT_UNSPEC = iota + FR_ACT_TO_TBL /* Pass to fixed table */ + FR_ACT_GOTO /* Jump to another rule */ + FR_ACT_NOP /* No operation */ + FR_ACT_RES3 + FR_ACT_RES4 + FR_ACT_BLACKHOLE /* Drop without notification */ + FR_ACT_UNREACHABLE /* Drop with ENETUNREACH */ + FR_ACT_PROHIBIT /* Drop with EACCES */ +) + +// socket diags related +const ( + SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */ + SOCK_DESTROY = 21 + TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ +) + +// RTA_ENCAP subtype +const ( + MPLS_IPTUNNEL_UNSPEC = iota + MPLS_IPTUNNEL_DST +) + +// light weight tunnel encap types +const ( + LWTUNNEL_ENCAP_NONE = iota + LWTUNNEL_ENCAP_MPLS + LWTUNNEL_ENCAP_IP + LWTUNNEL_ENCAP_ILA + LWTUNNEL_ENCAP_IP6 + LWTUNNEL_ENCAP_SEG6 + LWTUNNEL_ENCAP_BPF + LWTUNNEL_ENCAP_SEG6_LOCAL +) + +// routing header types +const ( + IPV6_SRCRT_STRICT = 0x01 // Deprecated; will be removed + IPV6_SRCRT_TYPE_0 = 0 // Deprecated; will be removed + IPV6_SRCRT_TYPE_2 = 2 // IPv6 type 2 Routing Header + IPV6_SRCRT_TYPE_4 = 4 // Segment Routing with IPv6 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go new file mode 100644 index 000000000..0720729a9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -0,0 +1,1611 @@ +package nl + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LinkLayer +const ( + LINKLAYER_UNSPEC = iota + LINKLAYER_ETHERNET + LINKLAYER_ATM +) + +// ATM +const ( + ATM_CELL_PAYLOAD = 48 + ATM_CELL_SIZE = 53 +) + +const TC_LINKLAYER_MASK = 0x0F + +// Police +const ( + TCA_POLICE_UNSPEC = iota + TCA_POLICE_TBF + TCA_POLICE_RATE + TCA_POLICE_PEAKRATE + TCA_POLICE_AVRATE + TCA_POLICE_RESULT + TCA_POLICE_MAX = TCA_POLICE_RESULT +) + +// Message types +const ( + TCA_UNSPEC = iota + TCA_KIND + TCA_OPTIONS + TCA_STATS + TCA_XSTATS + TCA_RATE + TCA_FCNT + TCA_STATS2 + TCA_STAB + TCA_PAD + TCA_DUMP_INVISIBLE + TCA_CHAIN + TCA_HW_OFFLOAD + TCA_INGRESS_BLOCK + TCA_EGRESS_BLOCK + TCA_DUMP_FLAGS + TCA_MAX = TCA_DUMP_FLAGS +) + +const ( + TCA_ACT_TAB = 1 + TCAA_MAX = 1 +) + +const ( + TCA_ACT_UNSPEC = iota + TCA_ACT_KIND + TCA_ACT_OPTIONS + TCA_ACT_INDEX + TCA_ACT_STATS + TCA_ACT_PAD + TCA_ACT_COOKIE + TCA_ACT_FLAGS + TCA_ACT_HW_STATS + TCA_ACT_USED_HW_STATS + TCA_ACT_IN_HW_COUNT + TCA_ACT_MAX +) + +const ( + TCA_PRIO_UNSPEC = iota + TCA_PRIO_MQ + TCA_PRIO_MAX = TCA_PRIO_MQ +) + +const ( + TCA_STATS_UNSPEC = iota + TCA_STATS_BASIC + TCA_STATS_RATE_EST + TCA_STATS_QUEUE + TCA_STATS_APP + TCA_STATS_RATE_EST64 + TCA_STATS_PAD + TCA_STATS_BASIC_HW + TCA_STATS_PKT64 + TCA_STATS_MAX = TCA_STATS_PKT64 +) + +const ( + SizeofTcMsg = 0x14 + SizeofTcActionMsg = 0x04 + SizeofTcPrioMap = 0x14 + SizeofTcRateSpec = 0x0c + SizeofTcNetemQopt = 0x18 + SizeofTcNetemCorr = 0x0c + SizeofTcNetemReorder = 0x08 + SizeofTcNetemCorrupt = 0x08 + SizeOfTcNetemRate = 0x10 + SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c + SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14 + SizeofTcHtbGlob = 0x14 + SizeofTcU32Key = 0x10 + SizeofTcU32Sel = 0x10 // without keys + SizeofTcGen = 0x16 + SizeofTcConnmark = SizeofTcGen + 0x04 + SizeofTcCsum = SizeofTcGen + 0x04 + SizeofTcMirred = SizeofTcGen + 0x08 + SizeofTcTunnelKey = SizeofTcGen + 0x04 + SizeofTcSkbEdit = SizeofTcGen + SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 + SizeofTcSfqQopt = 0x0b + SizeofTcSfqRedStats = 0x18 + SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c + SizeofUint32Bitfield = 0x8 +) + +// struct tcmsg { +// unsigned char tcm_family; +// unsigned char tcm__pad1; +// unsigned short tcm__pad2; +// int tcm_ifindex; +// __u32 tcm_handle; +// __u32 tcm_parent; +// __u32 tcm_info; +// }; + +type TcMsg struct { + Family uint8 + Pad [3]byte + Ifindex int32 + Handle uint32 + Parent uint32 + Info uint32 +} + +func (msg *TcMsg) Len() int { + return SizeofTcMsg +} + +func DeserializeTcMsg(b []byte) *TcMsg { + return (*TcMsg)(unsafe.Pointer(&b[0:SizeofTcMsg][0])) +} + +func (x *TcMsg) Serialize() []byte { + return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] +} + +type Tcf struct { + Install uint64 + LastUse uint64 + Expires uint64 + FirstUse uint64 +} + +func DeserializeTcf(b []byte) *Tcf { + const size = int(unsafe.Sizeof(Tcf{})) + return (*Tcf)(unsafe.Pointer(&b[0:size][0])) +} + +// struct tcamsg { +// unsigned char tca_family; +// unsigned char tca__pad1; +// unsigned short tca__pad2; +// }; + +type TcActionMsg struct { + Family uint8 + Pad [3]byte +} + +func (msg *TcActionMsg) Len() int { + return SizeofTcActionMsg +} + +func DeserializeTcActionMsg(b []byte) *TcActionMsg { + return (*TcActionMsg)(unsafe.Pointer(&b[0:SizeofTcActionMsg][0])) +} + +func (x *TcActionMsg) Serialize() []byte { + return (*(*[SizeofTcActionMsg]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TC_PRIO_MAX = 15 +) + +// struct tc_prio_qopt { +// int bands; /* Number of bands */ +// __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +// }; + +type TcPrioMap struct { + Bands int32 + Priomap [TC_PRIO_MAX + 1]uint8 +} + +func (msg *TcPrioMap) Len() int { + return SizeofTcPrioMap +} + +func DeserializeTcPrioMap(b []byte) *TcPrioMap { + return (*TcPrioMap)(unsafe.Pointer(&b[0:SizeofTcPrioMap][0])) +} + +func (x *TcPrioMap) Serialize() []byte { + return (*(*[SizeofTcPrioMap]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_TBF_UNSPEC = iota + TCA_TBF_PARMS + TCA_TBF_RTAB + TCA_TBF_PTAB + TCA_TBF_RATE64 + TCA_TBF_PRATE64 + TCA_TBF_BURST + TCA_TBF_PBURST + TCA_TBF_MAX = TCA_TBF_PBURST +) + +// struct tc_ratespec { +// unsigned char cell_log; +// __u8 linklayer; /* lower 4 bits */ +// unsigned short overhead; +// short cell_align; +// unsigned short mpu; +// __u32 rate; +// }; + +type TcRateSpec struct { + CellLog uint8 + Linklayer uint8 + Overhead uint16 + CellAlign int16 + Mpu uint16 + Rate uint32 +} + +func (msg *TcRateSpec) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcRateSpec(b []byte) *TcRateSpec { + return (*TcRateSpec)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (x *TcRateSpec) Serialize() []byte { + return (*(*[SizeofTcRateSpec]byte)(unsafe.Pointer(x)))[:] +} + +/** +* NETEM + */ + +const ( + TCA_NETEM_UNSPEC = iota + TCA_NETEM_CORR + TCA_NETEM_DELAY_DIST + TCA_NETEM_REORDER + TCA_NETEM_CORRUPT + TCA_NETEM_LOSS + TCA_NETEM_RATE + TCA_NETEM_ECN + TCA_NETEM_RATE64 + TCA_NETEM_MAX = TCA_NETEM_RATE64 +) + +// struct tc_netem_qopt { +// __u32 latency; /* added delay (us) */ +// __u32 limit; /* fifo limit (packets) */ +// __u32 loss; /* random packet loss (0=none ~0=100%) */ +// __u32 gap; /* re-ordering gap (0 for none) */ +// __u32 duplicate; /* random packet dup (0=none ~0=100%) */ +// __u32 jitter; /* random jitter in latency (us) */ +// }; + +type TcNetemQopt struct { + Latency uint32 + Limit uint32 + Loss uint32 + Gap uint32 + Duplicate uint32 + Jitter uint32 +} + +func (msg *TcNetemQopt) Len() int { + return SizeofTcNetemQopt +} + +func DeserializeTcNetemQopt(b []byte) *TcNetemQopt { + return (*TcNetemQopt)(unsafe.Pointer(&b[0:SizeofTcNetemQopt][0])) +} + +func (x *TcNetemQopt) Serialize() []byte { + return (*(*[SizeofTcNetemQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corr { +// __u32 delay_corr; /* delay correlation */ +// __u32 loss_corr; /* packet loss correlation */ +// __u32 dup_corr; /* duplicate correlation */ +// }; + +type TcNetemCorr struct { + DelayCorr uint32 + LossCorr uint32 + DupCorr uint32 +} + +func (msg *TcNetemCorr) Len() int { + return SizeofTcNetemCorr +} + +func DeserializeTcNetemCorr(b []byte) *TcNetemCorr { + return (*TcNetemCorr)(unsafe.Pointer(&b[0:SizeofTcNetemCorr][0])) +} + +func (x *TcNetemCorr) Serialize() []byte { + return (*(*[SizeofTcNetemCorr]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_reorder { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemReorder struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemReorder) Len() int { + return SizeofTcNetemReorder +} + +func DeserializeTcNetemReorder(b []byte) *TcNetemReorder { + return (*TcNetemReorder)(unsafe.Pointer(&b[0:SizeofTcNetemReorder][0])) +} + +func (x *TcNetemReorder) Serialize() []byte { + return (*(*[SizeofTcNetemReorder]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corrupt { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemCorrupt struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemCorrupt) Len() int { + return SizeofTcNetemCorrupt +} + +func DeserializeTcNetemCorrupt(b []byte) *TcNetemCorrupt { + return (*TcNetemCorrupt)(unsafe.Pointer(&b[0:SizeofTcNetemCorrupt][0])) +} + +func (x *TcNetemCorrupt) Serialize() []byte { + return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:] +} + +// TcNetemRate is a struct that represents the rate of a netem qdisc +type TcNetemRate struct { + Rate uint32 + PacketOverhead int32 + CellSize uint32 + CellOverhead int32 +} + +func (msg *TcNetemRate) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcNetemRate(b []byte) *TcNetemRate { + return (*TcNetemRate)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (msg *TcNetemRate) Serialize() []byte { + return (*(*[SizeOfTcNetemRate]byte)(unsafe.Pointer(msg)))[:] +} + +// struct tc_tbf_qopt { +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// __u32 limit; +// __u32 buffer; +// __u32 mtu; +// }; + +type TcTbfQopt struct { + Rate TcRateSpec + Peakrate TcRateSpec + Limit uint32 + Buffer uint32 + Mtu uint32 +} + +func (msg *TcTbfQopt) Len() int { + return SizeofTcTbfQopt +} + +func DeserializeTcTbfQopt(b []byte) *TcTbfQopt { + return (*TcTbfQopt)(unsafe.Pointer(&b[0:SizeofTcTbfQopt][0])) +} + +func (x *TcTbfQopt) Serialize() []byte { + return (*(*[SizeofTcTbfQopt]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_HTB_UNSPEC = iota + TCA_HTB_PARMS + TCA_HTB_INIT + TCA_HTB_CTAB + TCA_HTB_RTAB + TCA_HTB_DIRECT_QLEN + TCA_HTB_RATE64 + TCA_HTB_CEIL64 + TCA_HTB_MAX = TCA_HTB_CEIL64 +) + +//struct tc_htb_opt { +// struct tc_ratespec rate; +// struct tc_ratespec ceil; +// __u32 buffer; +// __u32 cbuffer; +// __u32 quantum; +// __u32 level; /* out only */ +// __u32 prio; +//}; + +type TcHtbCopt struct { + Rate TcRateSpec + Ceil TcRateSpec + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (msg *TcHtbCopt) Len() int { + return SizeofTcHtbCopt +} + +func DeserializeTcHtbCopt(b []byte) *TcHtbCopt { + return (*TcHtbCopt)(unsafe.Pointer(&b[0:SizeofTcHtbCopt][0])) +} + +func (x *TcHtbCopt) Serialize() []byte { + return (*(*[SizeofTcHtbCopt]byte)(unsafe.Pointer(x)))[:] +} + +type TcHtbGlob struct { + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 +} + +func (msg *TcHtbGlob) Len() int { + return SizeofTcHtbGlob +} + +func DeserializeTcHtbGlob(b []byte) *TcHtbGlob { + return (*TcHtbGlob)(unsafe.Pointer(&b[0:SizeofTcHtbGlob][0])) +} + +func (x *TcHtbGlob) Serialize() []byte { + return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:] +} + +// HFSC + +type Curve struct { + m1 uint32 + d uint32 + m2 uint32 +} + +type HfscCopt struct { + Rsc Curve + Fsc Curve + Usc Curve +} + +func (c *Curve) Attrs() (uint32, uint32, uint32) { + return c.m1, c.d, c.m2 +} + +func (c *Curve) Set(m1 uint32, d uint32, m2 uint32) { + c.m1 = m1 + c.d = d + c.m2 = m2 +} + +func DeserializeHfscCurve(b []byte) *Curve { + return &Curve{ + m1: binary.LittleEndian.Uint32(b[0:4]), + d: binary.LittleEndian.Uint32(b[4:8]), + m2: binary.LittleEndian.Uint32(b[8:12]), + } +} + +func SerializeHfscCurve(c *Curve) (b []byte) { + t := make([]byte, binary.MaxVarintLen32) + binary.LittleEndian.PutUint32(t, c.m1) + b = append(b, t[:4]...) + binary.LittleEndian.PutUint32(t, c.d) + b = append(b, t[:4]...) + binary.LittleEndian.PutUint32(t, c.m2) + b = append(b, t[:4]...) + return b +} + +type TcHfscOpt struct { + Defcls uint16 +} + +func (x *TcHfscOpt) Serialize() []byte { + return (*(*[2]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_U32_UNSPEC = iota + TCA_U32_CLASSID + TCA_U32_HASH + TCA_U32_LINK + TCA_U32_DIVISOR + TCA_U32_SEL + TCA_U32_POLICE + TCA_U32_ACT + TCA_U32_INDEV + TCA_U32_PCNT + TCA_U32_MARK + TCA_U32_MAX = TCA_U32_MARK +) + +// struct tc_u32_key { +// __be32 mask; +// __be32 val; +// int off; +// int offmask; +// }; + +type TcU32Key struct { + Mask uint32 // big endian + Val uint32 // big endian + Off int32 + OffMask int32 +} + +func (msg *TcU32Key) Len() int { + return SizeofTcU32Key +} + +func DeserializeTcU32Key(b []byte) *TcU32Key { + return (*TcU32Key)(unsafe.Pointer(&b[0:SizeofTcU32Key][0])) +} + +func (x *TcU32Key) Serialize() []byte { + return (*(*[SizeofTcU32Key]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_u32_sel { +// unsigned char flags; +// unsigned char offshift; +// unsigned char nkeys; +// +// __be16 offmask; +// __u16 off; +// short offoff; +// +// short hoff; +// __be32 hmask; +// struct tc_u32_key keys[0]; +// }; + +const ( + TC_U32_TERMINAL = 1 << iota + TC_U32_OFFSET = 1 << iota + TC_U32_VAROFFSET = 1 << iota + TC_U32_EAT = 1 << iota +) + +type TcU32Sel struct { + Flags uint8 + Offshift uint8 + Nkeys uint8 + Pad uint8 + Offmask uint16 // big endian + Off uint16 + Offoff int16 + Hoff int16 + Hmask uint32 // big endian + Keys []TcU32Key +} + +func (msg *TcU32Sel) Len() int { + return SizeofTcU32Sel + int(msg.Nkeys)*SizeofTcU32Key +} + +func DeserializeTcU32Sel(b []byte) *TcU32Sel { + x := &TcU32Sel{} + copy((*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:], b) + next := SizeofTcU32Sel + var i uint8 + for i = 0; i < x.Nkeys; i++ { + x.Keys = append(x.Keys, *DeserializeTcU32Key(b[next:])) + next += SizeofTcU32Key + } + return x +} + +func (x *TcU32Sel) Serialize() []byte { + // This can't just unsafe.cast because it must iterate through keys. + buf := make([]byte, x.Len()) + copy(buf, (*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:]) + next := SizeofTcU32Sel + for _, key := range x.Keys { + keyBuf := key.Serialize() + copy(buf[next:], keyBuf) + next += SizeofTcU32Key + } + return buf +} + +type TcGen struct { + Index uint32 + Capab uint32 + Action int32 + Refcnt int32 + Bindcnt int32 +} + +func (msg *TcGen) Len() int { + return SizeofTcGen +} + +func DeserializeTcGen(b []byte) *TcGen { + return (*TcGen)(unsafe.Pointer(&b[0:SizeofTcGen][0])) +} + +func (x *TcGen) Serialize() []byte { + return (*(*[SizeofTcGen]byte)(unsafe.Pointer(x)))[:] +} + +// #define tc_gen \ +// __u32 index; \ +// __u32 capab; \ +// int action; \ +// int refcnt; \ +// int bindcnt + +const ( + TCA_ACT_GACT = 5 +) + +const ( + TCA_GACT_UNSPEC = iota + TCA_GACT_TM + TCA_GACT_PARMS + TCA_GACT_PROB + TCA_GACT_MAX = TCA_GACT_PROB +) + +type TcGact TcGen + +const ( + TCA_ACT_BPF = 13 +) + +const ( + TCA_ACT_BPF_UNSPEC = iota + TCA_ACT_BPF_TM + TCA_ACT_BPF_PARMS + TCA_ACT_BPF_OPS_LEN + TCA_ACT_BPF_OPS + TCA_ACT_BPF_FD + TCA_ACT_BPF_NAME + TCA_ACT_BPF_MAX = TCA_ACT_BPF_NAME +) + +const ( + TCA_BPF_FLAG_ACT_DIRECT uint32 = 1 << iota +) + +const ( + TCA_BPF_UNSPEC = iota + TCA_BPF_ACT + TCA_BPF_POLICE + TCA_BPF_CLASSID + TCA_BPF_OPS_LEN + TCA_BPF_OPS + TCA_BPF_FD + TCA_BPF_NAME + TCA_BPF_FLAGS + TCA_BPF_FLAGS_GEN + TCA_BPF_TAG + TCA_BPF_ID + TCA_BPF_MAX = TCA_BPF_ID +) + +type TcBpf TcGen + +const ( + TCA_ACT_CONNMARK = 14 +) + +const ( + TCA_CONNMARK_UNSPEC = iota + TCA_CONNMARK_PARMS + TCA_CONNMARK_TM + TCA_CONNMARK_MAX = TCA_CONNMARK_TM +) + +// struct tc_connmark { +// tc_gen; +// __u16 zone; +// }; + +type TcConnmark struct { + TcGen + Zone uint16 +} + +func (msg *TcConnmark) Len() int { + return SizeofTcConnmark +} + +func DeserializeTcConnmark(b []byte) *TcConnmark { + return (*TcConnmark)(unsafe.Pointer(&b[0:SizeofTcConnmark][0])) +} + +func (x *TcConnmark) Serialize() []byte { + return (*(*[SizeofTcConnmark]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_CSUM_UNSPEC = iota + TCA_CSUM_PARMS + TCA_CSUM_TM + TCA_CSUM_PAD + TCA_CSUM_MAX = TCA_CSUM_PAD +) + +// struct tc_csum { +// tc_gen; +// __u32 update_flags; +// } + +type TcCsum struct { + TcGen + UpdateFlags uint32 +} + +func (msg *TcCsum) Len() int { + return SizeofTcCsum +} + +func DeserializeTcCsum(b []byte) *TcCsum { + return (*TcCsum)(unsafe.Pointer(&b[0:SizeofTcCsum][0])) +} + +func (x *TcCsum) Serialize() []byte { + return (*(*[SizeofTcCsum]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_ACT_MIRRED = 8 +) + +const ( + TCA_MIRRED_UNSPEC = iota + TCA_MIRRED_TM + TCA_MIRRED_PARMS + TCA_MIRRED_MAX = TCA_MIRRED_PARMS +) + +// struct tc_mirred { +// tc_gen; +// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */ +// __u32 ifindex; /* ifindex of egress port */ +// }; + +type TcMirred struct { + TcGen + Eaction int32 + Ifindex uint32 +} + +func (msg *TcMirred) Len() int { + return SizeofTcMirred +} + +func DeserializeTcMirred(b []byte) *TcMirred { + return (*TcMirred)(unsafe.Pointer(&b[0:SizeofTcMirred][0])) +} + +func (x *TcMirred) Serialize() []byte { + return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_TUNNEL_KEY_UNSPEC = iota + TCA_TUNNEL_KEY_TM + TCA_TUNNEL_KEY_PARMS + TCA_TUNNEL_KEY_ENC_IPV4_SRC + TCA_TUNNEL_KEY_ENC_IPV4_DST + TCA_TUNNEL_KEY_ENC_IPV6_SRC + TCA_TUNNEL_KEY_ENC_IPV6_DST + TCA_TUNNEL_KEY_ENC_KEY_ID + TCA_TUNNEL_KEY_PAD + TCA_TUNNEL_KEY_ENC_DST_PORT + TCA_TUNNEL_KEY_NO_CSUM + TCA_TUNNEL_KEY_ENC_OPTS + TCA_TUNNEL_KEY_ENC_TOS + TCA_TUNNEL_KEY_ENC_TTL + TCA_TUNNEL_KEY_MAX +) + +type TcTunnelKey struct { + TcGen + Action int32 +} + +func (x *TcTunnelKey) Len() int { + return SizeofTcTunnelKey +} + +func DeserializeTunnelKey(b []byte) *TcTunnelKey { + return (*TcTunnelKey)(unsafe.Pointer(&b[0:SizeofTcTunnelKey][0])) +} + +func (x *TcTunnelKey) Serialize() []byte { + return (*(*[SizeofTcTunnelKey]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_SKBEDIT_UNSPEC = iota + TCA_SKBEDIT_TM + TCA_SKBEDIT_PARMS + TCA_SKBEDIT_PRIORITY + TCA_SKBEDIT_QUEUE_MAPPING + TCA_SKBEDIT_MARK + TCA_SKBEDIT_PAD + TCA_SKBEDIT_PTYPE + TCA_SKBEDIT_MASK + TCA_SKBEDIT_MAX +) + +type TcSkbEdit struct { + TcGen +} + +func (x *TcSkbEdit) Len() int { + return SizeofTcSkbEdit +} + +func DeserializeSkbEdit(b []byte) *TcSkbEdit { + return (*TcSkbEdit)(unsafe.Pointer(&b[0:SizeofTcSkbEdit][0])) +} + +func (x *TcSkbEdit) Serialize() []byte { + return (*(*[SizeofTcSkbEdit]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_police { +// __u32 index; +// int action; +// __u32 limit; +// __u32 burst; +// __u32 mtu; +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// int refcnt; +// int bindcnt; +// __u32 capab; +// }; + +type TcPolice struct { + Index uint32 + Action int32 + Limit uint32 + Burst uint32 + Mtu uint32 + Rate TcRateSpec + PeakRate TcRateSpec + Refcnt int32 + Bindcnt int32 + Capab uint32 +} + +func (msg *TcPolice) Len() int { + return SizeofTcPolice +} + +func DeserializeTcPolice(b []byte) *TcPolice { + return (*TcPolice)(unsafe.Pointer(&b[0:SizeofTcPolice][0])) +} + +func (x *TcPolice) Serialize() []byte { + return (*(*[SizeofTcPolice]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_FW_UNSPEC = iota + TCA_FW_CLASSID + TCA_FW_POLICE + TCA_FW_INDEV + TCA_FW_ACT + TCA_FW_MASK + TCA_FW_MAX = TCA_FW_MASK +) + +const ( + TCA_MATCHALL_UNSPEC = iota + TCA_MATCHALL_CLASSID + TCA_MATCHALL_ACT + TCA_MATCHALL_FLAGS +) + +const ( + TCA_FQ_UNSPEC = iota + TCA_FQ_PLIMIT // limit of total number of packets in queue + TCA_FQ_FLOW_PLIMIT // limit of packets per flow + TCA_FQ_QUANTUM // RR quantum + TCA_FQ_INITIAL_QUANTUM // RR quantum for new flow + TCA_FQ_RATE_ENABLE // enable/disable rate limiting + TCA_FQ_FLOW_DEFAULT_RATE // obsolete do not use + TCA_FQ_FLOW_MAX_RATE // per flow max rate + TCA_FQ_BUCKETS_LOG // log2(number of buckets) + TCA_FQ_FLOW_REFILL_DELAY // flow credit refill delay in usec + TCA_FQ_ORPHAN_MASK // mask applied to orphaned skb hashes + TCA_FQ_LOW_RATE_THRESHOLD // per packet delay under this rate + TCA_FQ_CE_THRESHOLD // DCTCP-like CE-marking threshold + TCA_FQ_TIMER_SLACK // timer slack + TCA_FQ_HORIZON // time horizon in us + TCA_FQ_HORIZON_DROP // drop packets beyond horizon, or cap their EDT +) + +const ( + TCA_FQ_CODEL_UNSPEC = iota + TCA_FQ_CODEL_TARGET + TCA_FQ_CODEL_LIMIT + TCA_FQ_CODEL_INTERVAL + TCA_FQ_CODEL_ECN + TCA_FQ_CODEL_FLOWS + TCA_FQ_CODEL_QUANTUM + TCA_FQ_CODEL_CE_THRESHOLD + TCA_FQ_CODEL_DROP_BATCH_SIZE + TCA_FQ_CODEL_MEMORY_LIMIT +) + +const ( + TCA_HFSC_UNSPEC = iota + TCA_HFSC_RSC + TCA_HFSC_FSC + TCA_HFSC_USC +) + +const ( + TCA_FLOWER_UNSPEC = iota + TCA_FLOWER_CLASSID + TCA_FLOWER_INDEV + TCA_FLOWER_ACT + TCA_FLOWER_KEY_ETH_DST /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_DST_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_SRC_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ETH_TYPE /* be16 */ + TCA_FLOWER_KEY_IP_PROTO /* u8 */ + TCA_FLOWER_KEY_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_IPV6_DST_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_TCP_SRC /* be16 */ + TCA_FLOWER_KEY_TCP_DST /* be16 */ + TCA_FLOWER_KEY_UDP_SRC /* be16 */ + TCA_FLOWER_KEY_UDP_DST /* be16 */ + + TCA_FLOWER_FLAGS + TCA_FLOWER_KEY_VLAN_ID /* be16 */ + TCA_FLOWER_KEY_VLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_VLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_KEY_ID /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST /* be32 */ + TCA_FLOWER_KEY_ENC_IPV4_DST_MASK /* be32 */ + TCA_FLOWER_KEY_ENC_IPV6_SRC /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST /* struct in6_addr */ + TCA_FLOWER_KEY_ENC_IPV6_DST_MASK /* struct in6_addr */ + + TCA_FLOWER_KEY_TCP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_TCP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_UDP_DST_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_SRC_MASK /* be16 */ + TCA_FLOWER_KEY_SCTP_DST_MASK /* be16 */ + + TCA_FLOWER_KEY_SCTP_SRC /* be16 */ + TCA_FLOWER_KEY_SCTP_DST /* be16 */ + + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT /* be16 */ + TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK /* be16 */ + + TCA_FLOWER_KEY_FLAGS /* be32 */ + TCA_FLOWER_KEY_FLAGS_MASK /* be32 */ + + TCA_FLOWER_KEY_ICMPV4_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV4_TYPE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_CODE_MASK /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE /* u8 */ + TCA_FLOWER_KEY_ICMPV6_TYPE_MASK /* u8 */ + + TCA_FLOWER_KEY_ARP_SIP /* be32 */ + TCA_FLOWER_KEY_ARP_SIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_TIP /* be32 */ + TCA_FLOWER_KEY_ARP_TIP_MASK /* be32 */ + TCA_FLOWER_KEY_ARP_OP /* u8 */ + TCA_FLOWER_KEY_ARP_OP_MASK /* u8 */ + TCA_FLOWER_KEY_ARP_SHA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_SHA_MASK /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA /* ETH_ALEN */ + TCA_FLOWER_KEY_ARP_THA_MASK /* ETH_ALEN */ + + TCA_FLOWER_KEY_MPLS_TTL /* u8 - 8 bits */ + TCA_FLOWER_KEY_MPLS_BOS /* u8 - 1 bit */ + TCA_FLOWER_KEY_MPLS_TC /* u8 - 3 bits */ + TCA_FLOWER_KEY_MPLS_LABEL /* be32 - 20 bits */ + + TCA_FLOWER_KEY_TCP_FLAGS /* be16 */ + TCA_FLOWER_KEY_TCP_FLAGS_MASK /* be16 */ + + TCA_FLOWER_KEY_IP_TOS /* u8 */ + TCA_FLOWER_KEY_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_IP_TTL /* u8 */ + TCA_FLOWER_KEY_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_CVLAN_ID /* be16 */ + TCA_FLOWER_KEY_CVLAN_PRIO /* u8 */ + TCA_FLOWER_KEY_CVLAN_ETH_TYPE /* be16 */ + + TCA_FLOWER_KEY_ENC_IP_TOS /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TOS_MASK /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL /* u8 */ + TCA_FLOWER_KEY_ENC_IP_TTL_MASK /* u8 */ + + TCA_FLOWER_KEY_ENC_OPTS + TCA_FLOWER_KEY_ENC_OPTS_MASK + + __TCA_FLOWER_MAX +) + +const TCA_CLS_FLAGS_SKIP_HW = 1 << 0 /* don't offload filter to HW */ +const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */ + +// struct tc_sfq_qopt { +// unsigned quantum; /* Bytes per round allocated to flow */ +// int perturb_period; /* Period of hash perturbation */ +// __u32 limit; /* Maximal packets in queue */ +// unsigned divisor; /* Hash divisor */ +// unsigned flows; /* Maximal number of flows */ +// }; + +type TcSfqQopt struct { + Quantum uint8 + Perturb int32 + Limit uint32 + Divisor uint8 + Flows uint8 +} + +func (x *TcSfqQopt) Len() int { + return SizeofTcSfqQopt +} + +func DeserializeTcSfqQopt(b []byte) *TcSfqQopt { + return (*TcSfqQopt)(unsafe.Pointer(&b[0:SizeofTcSfqQopt][0])) +} + +func (x *TcSfqQopt) Serialize() []byte { + return (*(*[SizeofTcSfqQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfqred_stats { +// __u32 prob_drop; /* Early drops, below max threshold */ +// __u32 forced_drop; /* Early drops, after max threshold */ +// __u32 prob_mark; /* Marked packets, below max threshold */ +// __u32 forced_mark; /* Marked packets, after max threshold */ +// __u32 prob_mark_head; /* Marked packets, below max threshold */ +// __u32 forced_mark_head;/* Marked packets, after max threshold */ +// }; +type TcSfqRedStats struct { + ProbDrop uint32 + ForcedDrop uint32 + ProbMark uint32 + ForcedMark uint32 + ProbMarkHead uint32 + ForcedMarkHead uint32 +} + +func (x *TcSfqRedStats) Len() int { + return SizeofTcSfqRedStats +} + +func DeserializeTcSfqRedStats(b []byte) *TcSfqRedStats { + return (*TcSfqRedStats)(unsafe.Pointer(&b[0:SizeofTcSfqRedStats][0])) +} + +func (x *TcSfqRedStats) Serialize() []byte { + return (*(*[SizeofTcSfqRedStats]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_sfq_qopt_v1 { +// struct tc_sfq_qopt v0; +// unsigned int depth; /* max number of packets per flow */ +// unsigned int headdrop; +// +// /* SFQRED parameters */ +// +// __u32 limit; /* HARD maximal flow queue length (bytes) */ +// __u32 qth_min; /* Min average length threshold (bytes) */ +// __u32 qth_max; /* Max average length threshold (bytes) */ +// unsigned char Wlog; /* log(W) */ +// unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */ +// unsigned char Scell_log; /* cell size for idle damping */ +// unsigned char flags; +// __u32 max_P; /* probability, high resolution */ +// +// /* SFQRED stats */ +// +// struct tc_sfqred_stats stats; +// }; +type TcSfqQoptV1 struct { + TcSfqQopt + Depth uint32 + HeadDrop uint32 + Limit uint32 + QthMin uint32 + QthMax uint32 + Wlog byte + Plog byte + ScellLog byte + Flags byte + MaxP uint32 + TcSfqRedStats +} + +func (x *TcSfqQoptV1) Len() int { + return SizeofTcSfqQoptV1 +} + +func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 { + return (*TcSfqQoptV1)(unsafe.Pointer(&b[0:SizeofTcSfqQoptV1][0])) +} + +func (x *TcSfqQoptV1) Serialize() []byte { + return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:] +} + +// IPProto represents Flower ip_proto attribute +type IPProto uint8 + +const ( + IPPROTO_TCP IPProto = unix.IPPROTO_TCP + IPPROTO_UDP IPProto = unix.IPPROTO_UDP + IPPROTO_SCTP IPProto = unix.IPPROTO_SCTP + IPPROTO_ICMP IPProto = unix.IPPROTO_ICMP + IPPROTO_ICMPV6 IPProto = unix.IPPROTO_ICMPV6 +) + +func (i IPProto) Serialize() []byte { + arr := make([]byte, 1) + arr[0] = byte(i) + return arr +} + +func (i IPProto) String() string { + switch i { + case IPPROTO_TCP: + return "tcp" + case IPPROTO_UDP: + return "udp" + case IPPROTO_SCTP: + return "sctp" + case IPPROTO_ICMP: + return "icmp" + case IPPROTO_ICMPV6: + return "icmpv6" + } + return fmt.Sprintf("%d", i) +} + +const ( + MaxOffs = 128 + SizeOfPeditSel = 24 + SizeOfPeditKey = 24 + + TCA_PEDIT_KEY_EX_HTYPE = 1 + TCA_PEDIT_KEY_EX_CMD = 2 +) + +const ( + TCA_PEDIT_UNSPEC = iota + TCA_PEDIT_TM + TCA_PEDIT_PARMS + TCA_PEDIT_PAD + TCA_PEDIT_PARMS_EX + TCA_PEDIT_KEYS_EX + TCA_PEDIT_KEY_EX +) + +// /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It +// * means no specific header type - offset is relative to the network layer +// */ +type PeditHeaderType uint16 + +const ( + TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = iota + TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + __PEDIT_HDR_TYPE_MAX +) + +type PeditCmd uint16 + +const ( + TCA_PEDIT_KEY_EX_CMD_SET = 0 + TCA_PEDIT_KEY_EX_CMD_ADD = 1 +) + +type TcPeditSel struct { + TcGen + NKeys uint8 + Flags uint8 +} + +func DeserializeTcPeditKey(b []byte) *TcPeditKey { + return (*TcPeditKey)(unsafe.Pointer(&b[0:SizeOfPeditKey][0])) +} + +func DeserializeTcPedit(b []byte) (*TcPeditSel, []TcPeditKey) { + x := &TcPeditSel{} + copy((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(x)))[:SizeOfPeditSel], b) + + var keys []TcPeditKey + + next := SizeOfPeditKey + var i uint8 + for i = 0; i < x.NKeys; i++ { + keys = append(keys, *DeserializeTcPeditKey(b[next:])) + next += SizeOfPeditKey + } + + return x, keys +} + +type TcPeditKey struct { + Mask uint32 + Val uint32 + Off uint32 + At uint32 + OffMask uint32 + Shift uint32 +} + +type TcPeditKeyEx struct { + HeaderType PeditHeaderType + Cmd PeditCmd +} + +type TcPedit struct { + Sel TcPeditSel + Keys []TcPeditKey + KeysEx []TcPeditKeyEx + Extend uint8 +} + +func (p *TcPedit) Encode(parent *RtAttr) { + parent.AddRtAttr(TCA_ACT_KIND, ZeroTerminated("pedit")) + actOpts := parent.AddRtAttr(TCA_ACT_OPTIONS, nil) + + bbuf := bytes.NewBuffer(make([]byte, 0, int(unsafe.Sizeof(p.Sel)+unsafe.Sizeof(p.Keys)))) + + bbuf.Write((*(*[SizeOfPeditSel]byte)(unsafe.Pointer(&p.Sel)))[:]) + + for i := uint8(0); i < p.Sel.NKeys; i++ { + bbuf.Write((*(*[SizeOfPeditKey]byte)(unsafe.Pointer(&p.Keys[i])))[:]) + } + actOpts.AddRtAttr(TCA_PEDIT_PARMS_EX, bbuf.Bytes()) + + exAttrs := actOpts.AddRtAttr(int(TCA_PEDIT_KEYS_EX|NLA_F_NESTED), nil) + for i := uint8(0); i < p.Sel.NKeys; i++ { + keyAttr := exAttrs.AddRtAttr(int(TCA_PEDIT_KEY_EX|NLA_F_NESTED), nil) + + htypeBuf := make([]byte, 2) + cmdBuf := make([]byte, 2) + + NativeEndian().PutUint16(htypeBuf, uint16(p.KeysEx[i].HeaderType)) + NativeEndian().PutUint16(cmdBuf, uint16(p.KeysEx[i].Cmd)) + + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_HTYPE, htypeBuf) + keyAttr.AddRtAttr(TCA_PEDIT_KEY_EX_CMD, cmdBuf) + } +} + +func (p *TcPedit) SetEthDst(mac net.HardwareAddr) { + u32 := NativeEndian().Uint32(mac) + u16 := NativeEndian().Uint16(mac[4:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = uint32(u16) + tKey.Mask = 0xffff0000 + tKey.Off = 4 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetEthSrc(mac net.HardwareAddr) { + u16 := NativeEndian().Uint16(mac) + u32 := NativeEndian().Uint32(mac[2:]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = uint32(u16) << 16 + tKey.Mask = 0x0000ffff + tKey.Off = 4 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Mask = 0 + tKey.Off = 8 + + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_ETH + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv6Src(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 8 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 20 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetDstIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Dst(ip) + } else { + p.SetIPv6Dst(ip) + } +} + +func (p *TcPedit) SetSrcIP(ip net.IP) { + if ip.To4() != nil { + p.SetIPv4Src(ip) + } else { + p.SetIPv6Src(ip) + } +} + +func (p *TcPedit) SetIPv6Dst(ip6 net.IP) { + u32 := NativeEndian().Uint32(ip6[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 24 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[4:8]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 28 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[8:12]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 32 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ + + u32 = NativeEndian().Uint32(ip6[12:16]) + tKey = TcPeditKey{} + tKeyEx = TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 36 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Src(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 12 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +func (p *TcPedit) SetIPv4Dst(ip net.IP) { + u32 := NativeEndian().Uint32(ip[:4]) + + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + tKey.Val = u32 + tKey.Off = 16 + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetDstPort only tcp and udp are supported to set port +func (p *TcPedit) SetDstPort(dstPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(dstPort)) << 16 + tKey.Mask = 0x0000ffff + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} + +// SetSrcPort only tcp and udp are supported to set port +func (p *TcPedit) SetSrcPort(srcPort uint16, protocol uint8) { + tKey := TcPeditKey{} + tKeyEx := TcPeditKeyEx{} + + switch protocol { + case unix.IPPROTO_TCP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_TCP + case unix.IPPROTO_UDP: + tKeyEx.HeaderType = TCA_PEDIT_KEY_EX_HDR_TYPE_UDP + default: + return + } + + tKeyEx.Cmd = TCA_PEDIT_KEY_EX_CMD_SET + + tKey.Val = uint32(Swap16(srcPort)) + tKey.Mask = 0xffff0000 + p.Keys = append(p.Keys, tKey) + p.KeysEx = append(p.KeysEx, tKeyEx) + p.Sel.NKeys++ +} diff --git a/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go new file mode 100644 index 000000000..f209125df --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go @@ -0,0 +1,41 @@ +package nl + +const ( + VDPA_GENL_NAME = "vdpa" + VDPA_GENL_VERSION = 0x1 +) + +const ( + VDPA_CMD_UNSPEC = iota + VDPA_CMD_MGMTDEV_NEW + VDPA_CMD_MGMTDEV_GET /* can dump */ + VDPA_CMD_DEV_NEW + VDPA_CMD_DEV_DEL + VDPA_CMD_DEV_GET /* can dump */ + VDPA_CMD_DEV_CONFIG_GET /* can dump */ + VDPA_CMD_DEV_VSTATS_GET +) + +const ( + VDPA_ATTR_UNSPEC = iota + VDPA_ATTR_MGMTDEV_BUS_NAME + VDPA_ATTR_MGMTDEV_DEV_NAME + VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES + VDPA_ATTR_DEV_NAME + VDPA_ATTR_DEV_ID + VDPA_ATTR_DEV_VENDOR_ID + VDPA_ATTR_DEV_MAX_VQS + VDPA_ATTR_DEV_MAX_VQ_SIZE + VDPA_ATTR_DEV_MIN_VQ_SIZE + VDPA_ATTR_DEV_NET_CFG_MACADDR + VDPA_ATTR_DEV_NET_STATUS + VDPA_ATTR_DEV_NET_CFG_MAX_VQP + VDPA_ATTR_DEV_NET_CFG_MTU + VDPA_ATTR_DEV_NEGOTIATED_FEATURES + VDPA_ATTR_DEV_MGMTDEV_MAX_VQS + VDPA_ATTR_DEV_SUPPORTED_FEATURES + VDPA_ATTR_DEV_QUEUE_INDEX + VDPA_ATTR_DEV_VENDOR_ATTR_NAME + VDPA_ATTR_DEV_VENDOR_ATTR_VALUE + VDPA_ATTR_DEV_FEATURES +) diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go new file mode 100644 index 000000000..cdb318ba5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -0,0 +1,314 @@ +package nl + +import ( + "bytes" + "net" + "unsafe" +) + +// Infinity for packet and byte counts +const ( + XFRM_INF = ^uint64(0) +) + +type XfrmMsgType uint8 + +type XfrmMsg interface { + Type() XfrmMsgType +} + +// Message Types +const ( + XFRM_MSG_BASE XfrmMsgType = 0x10 + XFRM_MSG_NEWSA = 0x10 + XFRM_MSG_DELSA = 0x11 + XFRM_MSG_GETSA = 0x12 + XFRM_MSG_NEWPOLICY = 0x13 + XFRM_MSG_DELPOLICY = 0x14 + XFRM_MSG_GETPOLICY = 0x15 + XFRM_MSG_ALLOCSPI = 0x16 + XFRM_MSG_ACQUIRE = 0x17 + XFRM_MSG_EXPIRE = 0x18 + XFRM_MSG_UPDPOLICY = 0x19 + XFRM_MSG_UPDSA = 0x1a + XFRM_MSG_POLEXPIRE = 0x1b + XFRM_MSG_FLUSHSA = 0x1c + XFRM_MSG_FLUSHPOLICY = 0x1d + XFRM_MSG_NEWAE = 0x1e + XFRM_MSG_GETAE = 0x1f + XFRM_MSG_REPORT = 0x20 + XFRM_MSG_MIGRATE = 0x21 + XFRM_MSG_NEWSADINFO = 0x22 + XFRM_MSG_GETSADINFO = 0x23 + XFRM_MSG_NEWSPDINFO = 0x24 + XFRM_MSG_GETSPDINFO = 0x25 + XFRM_MSG_MAPPING = 0x26 + XFRM_MSG_MAX = 0x26 + XFRM_NR_MSGTYPES = 0x17 +) + +// Attribute types +const ( + /* Netlink message attributes. */ + XFRMA_UNSPEC = iota + XFRMA_ALG_AUTH /* struct xfrm_algo */ + XFRMA_ALG_CRYPT /* struct xfrm_algo */ + XFRMA_ALG_COMP /* struct xfrm_algo */ + XFRMA_ENCAP /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA /* struct xfrm_usersa_info */ + XFRMA_POLICY /* struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL + XFRMA_REPLAY_VAL + XFRMA_REPLAY_THRESH + XFRMA_ETIMER_THRESH + XFRMA_SRCADDR /* xfrm_address_t */ + XFRMA_COADDR /* xfrm_address_t */ + XFRMA_LASTUSED /* unsigned long */ + XFRMA_POLICY_TYPE /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE + XFRMA_ALG_AEAD /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC /* struct xfrm_algo_auth */ + XFRMA_MARK /* struct xfrm_mark */ + XFRMA_TFCPAD /* __u32 */ + XFRMA_REPLAY_ESN_VAL /* struct xfrm_replay_esn */ + XFRMA_SA_EXTRA_FLAGS /* __u32 */ + XFRMA_PROTO /* __u8 */ + XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */ + XFRMA_PAD + XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ + XFRMA_SET_MARK /* __u32 */ + XFRMA_SET_MARK_MASK /* __u32 */ + XFRMA_IF_ID /* __u32 */ + + XFRMA_MAX = iota - 1 +) + +const XFRMA_OUTPUT_MARK = XFRMA_SET_MARK + +const ( + SizeofXfrmAddress = 0x10 + SizeofXfrmSelector = 0x38 + SizeofXfrmLifetimeCfg = 0x40 + SizeofXfrmLifetimeCur = 0x20 + SizeofXfrmId = 0x18 + SizeofXfrmMark = 0x08 +) + +// Netlink groups +const ( + XFRMNLGRP_NONE = 0x0 + XFRMNLGRP_ACQUIRE = 0x1 + XFRMNLGRP_EXPIRE = 0x2 + XFRMNLGRP_SA = 0x3 + XFRMNLGRP_POLICY = 0x4 + XFRMNLGRP_AEVENTS = 0x5 + XFRMNLGRP_REPORT = 0x6 + XFRMNLGRP_MIGRATE = 0x7 + XFRMNLGRP_MAPPING = 0x8 + __XFRMNLGRP_MAX = 0x9 +) + +// typedef union { +// __be32 a4; +// __be32 a6[4]; +// } xfrm_address_t; + +type XfrmAddress [SizeofXfrmAddress]byte + +func (x *XfrmAddress) ToIP() net.IP { + var empty = [12]byte{} + ip := make(net.IP, net.IPv6len) + if bytes.Equal(x[4:16], empty[:]) { + ip[10] = 0xff + ip[11] = 0xff + copy(ip[12:16], x[0:4]) + } else { + copy(ip[:], x[:]) + } + return ip +} + +// family is only used when x and prefixlen are both 0 +func (x *XfrmAddress) ToIPNet(prefixlen uint8, family uint16) *net.IPNet { + empty := [SizeofXfrmAddress]byte{} + if bytes.Equal(x[:], empty[:]) && prefixlen == 0 { + if family == FAMILY_V6 { + return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(int(prefixlen), 128)} + } + return &net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(int(prefixlen), 32)} + } + ip := x.ToIP() + if GetIPFamily(ip) == FAMILY_V4 { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)} +} + +func (x *XfrmAddress) FromIP(ip net.IP) { + var empty = [16]byte{} + if len(ip) < net.IPv4len { + copy(x[4:16], empty[:]) + } else if GetIPFamily(ip) == FAMILY_V4 { + copy(x[0:4], ip.To4()[0:4]) + copy(x[4:16], empty[:12]) + } else { + copy(x[0:16], ip.To16()[0:16]) + } +} + +func DeserializeXfrmAddress(b []byte) *XfrmAddress { + return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0])) +} + +func (x *XfrmAddress) Serialize() []byte { + return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(x)))[:] +} + +// struct xfrm_selector { +// xfrm_address_t daddr; +// xfrm_address_t saddr; +// __be16 dport; +// __be16 dport_mask; +// __be16 sport; +// __be16 sport_mask; +// __u16 family; +// __u8 prefixlen_d; +// __u8 prefixlen_s; +// __u8 proto; +// int ifindex; +// __kernel_uid32_t user; +// }; + +type XfrmSelector struct { + Daddr XfrmAddress + Saddr XfrmAddress + Dport uint16 // big endian + DportMask uint16 // big endian + Sport uint16 // big endian + SportMask uint16 // big endian + Family uint16 + PrefixlenD uint8 + PrefixlenS uint8 + Proto uint8 + Pad [3]byte + Ifindex int32 + User uint32 +} + +func (msg *XfrmSelector) Len() int { + return SizeofXfrmSelector +} + +func DeserializeXfrmSelector(b []byte) *XfrmSelector { + return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0])) +} + +func (msg *XfrmSelector) Serialize() []byte { + return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cfg { +// __u64 soft_byte_limit; +// __u64 hard_byte_limit; +// __u64 soft_packet_limit; +// __u64 hard_packet_limit; +// __u64 soft_add_expires_seconds; +// __u64 hard_add_expires_seconds; +// __u64 soft_use_expires_seconds; +// __u64 hard_use_expires_seconds; +// }; +// + +type XfrmLifetimeCfg struct { + SoftByteLimit uint64 + HardByteLimit uint64 + SoftPacketLimit uint64 + HardPacketLimit uint64 + SoftAddExpiresSeconds uint64 + HardAddExpiresSeconds uint64 + SoftUseExpiresSeconds uint64 + HardUseExpiresSeconds uint64 +} + +func (msg *XfrmLifetimeCfg) Len() int { + return SizeofXfrmLifetimeCfg +} + +func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg { + return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0])) +} + +func (msg *XfrmLifetimeCfg) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cur { +// __u64 bytes; +// __u64 packets; +// __u64 add_time; +// __u64 use_time; +// }; + +type XfrmLifetimeCur struct { + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +func (msg *XfrmLifetimeCur) Len() int { + return SizeofXfrmLifetimeCur +} + +func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur { + return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0])) +} + +func (msg *XfrmLifetimeCur) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u8 proto; +// }; + +type XfrmId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Proto uint8 + Pad [3]byte +} + +func (msg *XfrmId) Len() int { + return SizeofXfrmId +} + +func DeserializeXfrmId(b []byte) *XfrmId { + return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0])) +} + +func (msg *XfrmId) Serialize() []byte { + return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:] +} + +type XfrmMark struct { + Value uint32 + Mask uint32 +} + +func (msg *XfrmMark) Len() int { + return SizeofXfrmMark +} + +func DeserializeXfrmMark(b []byte) *XfrmMark { + return (*XfrmMark)(unsafe.Pointer(&b[0:SizeofXfrmMark][0])) +} + +func (msg *XfrmMark) Serialize() []byte { + return (*(*[SizeofXfrmMark]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go new file mode 100644 index 000000000..715df4cc5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go @@ -0,0 +1,32 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUserExpire = 0xe8 +) + +// struct xfrm_user_expire { +// struct xfrm_usersa_info state; +// __u8 hard; +// }; + +type XfrmUserExpire struct { + XfrmUsersaInfo XfrmUsersaInfo + Hard uint8 + Pad [7]byte +} + +func (msg *XfrmUserExpire) Len() int { + return SizeofXfrmUserExpire +} + +func DeserializeXfrmUserExpire(b []byte) *XfrmUserExpire { + return (*XfrmUserExpire)(unsafe.Pointer(&b[0:SizeofXfrmUserExpire][0])) +} + +func (msg *XfrmUserExpire) Serialize() []byte { + return (*(*[SizeofXfrmUserExpire]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go new file mode 100644 index 000000000..66f7e03d2 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go @@ -0,0 +1,119 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUserpolicyId = 0x40 + SizeofXfrmUserpolicyInfo = 0xa8 + SizeofXfrmUserTmpl = 0x40 +) + +// struct xfrm_userpolicy_id { +// struct xfrm_selector sel; +// __u32 index; +// __u8 dir; +// }; +// + +type XfrmUserpolicyId struct { + Sel XfrmSelector + Index uint32 + Dir uint8 + Pad [3]byte +} + +func (msg *XfrmUserpolicyId) Len() int { + return SizeofXfrmUserpolicyId +} + +func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId { + return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0])) +} + +func (msg *XfrmUserpolicyId) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_userpolicy_info { +// struct xfrm_selector sel; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// __u32 priority; +// __u32 index; +// __u8 dir; +// __u8 action; +// #define XFRM_POLICY_ALLOW 0 +// #define XFRM_POLICY_BLOCK 1 +// __u8 flags; +// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ +// /* Automatically expand selector to include matching ICMP payloads. */ +// #define XFRM_POLICY_ICMP 2 +// __u8 share; +// }; + +type XfrmUserpolicyInfo struct { + Sel XfrmSelector + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Priority uint32 + Index uint32 + Dir uint8 + Action uint8 + Flags uint8 + Share uint8 + Pad [4]byte +} + +func (msg *XfrmUserpolicyInfo) Len() int { + return SizeofXfrmUserpolicyInfo +} + +func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo { + return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0])) +} + +func (msg *XfrmUserpolicyInfo) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_user_tmpl { +// struct xfrm_id id; +// __u16 family; +// xfrm_address_t saddr; +// __u32 reqid; +// __u8 mode; +// __u8 share; +// __u8 optional; +// __u32 aalgos; +// __u32 ealgos; +// __u32 calgos; +// } + +type XfrmUserTmpl struct { + XfrmId XfrmId + Family uint16 + Pad1 [2]byte + Saddr XfrmAddress + Reqid uint32 + Mode uint8 + Share uint8 + Optional uint8 + Pad2 byte + Aalgos uint32 + Ealgos uint32 + Calgos uint32 +} + +func (msg *XfrmUserTmpl) Len() int { + return SizeofXfrmUserTmpl +} + +func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl { + return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0])) +} + +func (msg *XfrmUserTmpl) Serialize() []byte { + return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go new file mode 100644 index 000000000..e8920b9a6 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -0,0 +1,361 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUsersaId = 0x18 + SizeofXfrmStats = 0x0c + SizeofXfrmUsersaInfo = 0xe0 + SizeofXfrmUserSpiInfo = 0xe8 + SizeofXfrmAlgo = 0x44 + SizeofXfrmAlgoAuth = 0x48 + SizeofXfrmAlgoAEAD = 0x48 + SizeofXfrmEncapTmpl = 0x18 + SizeofXfrmUsersaFlush = 0x1 + SizeofXfrmReplayStateEsn = 0x18 + SizeofXfrmReplayState = 0x0c +) + +const ( + XFRM_STATE_NOECN = 1 + XFRM_STATE_DECAP_DSCP = 2 + XFRM_STATE_NOPMTUDISC = 4 + XFRM_STATE_WILDRECV = 8 + XFRM_STATE_ICMP = 16 + XFRM_STATE_AF_UNSPEC = 32 + XFRM_STATE_ALIGN4 = 64 + XFRM_STATE_ESN = 128 +) + +const ( + XFRM_SA_XFLAG_DONT_ENCAP_DSCP = 1 + XFRM_SA_XFLAG_OSEQ_MAY_WRAP = 2 +) + +// struct xfrm_usersa_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u16 family; +// __u8 proto; +// }; + +type XfrmUsersaId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Family uint16 + Proto uint8 + Pad byte +} + +func (msg *XfrmUsersaId) Len() int { + return SizeofXfrmUsersaId +} + +func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId { + return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0])) +} + +func (msg *XfrmUsersaId) Serialize() []byte { + return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_stats { +// __u32 replay_window; +// __u32 replay; +// __u32 integrity_failed; +// }; + +type XfrmStats struct { + ReplayWindow uint32 + Replay uint32 + IntegrityFailed uint32 +} + +func (msg *XfrmStats) Len() int { + return SizeofXfrmStats +} + +func DeserializeXfrmStats(b []byte) *XfrmStats { + return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0])) +} + +func (msg *XfrmStats) Serialize() []byte { + return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_usersa_info { +// struct xfrm_selector sel; +// struct xfrm_id id; +// xfrm_address_t saddr; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// struct xfrm_stats stats; +// __u32 seq; +// __u32 reqid; +// __u16 family; +// __u8 mode; /* XFRM_MODE_xxx */ +// __u8 replay_window; +// __u8 flags; +// #define XFRM_STATE_NOECN 1 +// #define XFRM_STATE_DECAP_DSCP 2 +// #define XFRM_STATE_NOPMTUDISC 4 +// #define XFRM_STATE_WILDRECV 8 +// #define XFRM_STATE_ICMP 16 +// #define XFRM_STATE_AF_UNSPEC 32 +// #define XFRM_STATE_ALIGN4 64 +// #define XFRM_STATE_ESN 128 +// }; +// +// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +// #define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 +// + +type XfrmUsersaInfo struct { + Sel XfrmSelector + Id XfrmId + Saddr XfrmAddress + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Stats XfrmStats + Seq uint32 + Reqid uint32 + Family uint16 + Mode uint8 + ReplayWindow uint8 + Flags uint8 + Pad [7]byte +} + +func (msg *XfrmUsersaInfo) Len() int { + return SizeofXfrmUsersaInfo +} + +func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo { + return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0])) +} + +func (msg *XfrmUsersaInfo) Serialize() []byte { + return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_userspi_info { +// struct xfrm_usersa_info info; +// __u32 min; +// __u32 max; +// }; + +type XfrmUserSpiInfo struct { + XfrmUsersaInfo XfrmUsersaInfo + Min uint32 + Max uint32 +} + +func (msg *XfrmUserSpiInfo) Len() int { + return SizeofXfrmUserSpiInfo +} + +func DeserializeXfrmUserSpiInfo(b []byte) *XfrmUserSpiInfo { + return (*XfrmUserSpiInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserSpiInfo][0])) +} + +func (msg *XfrmUserSpiInfo) Serialize() []byte { + return (*(*[SizeofXfrmUserSpiInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_algo { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgo struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgo) Len() int { + return SizeofXfrmAlgo + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgo(b []byte) *XfrmAlgo { + ret := XfrmAlgo{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgKey = b[68:ret.Len()] + return &ret +} + +func (msg *XfrmAlgo) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_auth { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_trunc_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgoAuth struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgTruncLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgoAuth) Len() int { + return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth { + ret := XfrmAlgoAuth{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68])) + ret.AlgKey = b[72:ret.Len()] + return &ret +} + +func (msg *XfrmAlgoAuth) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:]) + copy(b[72:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_aead { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_icv_len; /* in bits */ +// char alg_key[0]; +// } + +type XfrmAlgoAEAD struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgICVLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgoAEAD) Len() int { + return SizeofXfrmAlgoAEAD + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgoAEAD(b []byte) *XfrmAlgoAEAD { + ret := XfrmAlgoAEAD{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgICVLen = *(*uint32)(unsafe.Pointer(&b[68])) + ret.AlgKey = b[72:ret.Len()] + return &ret +} + +func (msg *XfrmAlgoAEAD) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgICVLen)))[:]) + copy(b[72:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_encap_tmpl { +// __u16 encap_type; +// __be16 encap_sport; +// __be16 encap_dport; +// xfrm_address_t encap_oa; +// }; + +type XfrmEncapTmpl struct { + EncapType uint16 + EncapSport uint16 // big endian + EncapDport uint16 // big endian + Pad [2]byte + EncapOa XfrmAddress +} + +func (msg *XfrmEncapTmpl) Len() int { + return SizeofXfrmEncapTmpl +} + +func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl { + return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0])) +} + +func (msg *XfrmEncapTmpl) Serialize() []byte { + return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_usersa_flush { +// __u8 proto; +// }; + +type XfrmUsersaFlush struct { + Proto uint8 +} + +func (msg *XfrmUsersaFlush) Len() int { + return SizeofXfrmUsersaFlush +} + +func DeserializeXfrmUsersaFlush(b []byte) *XfrmUsersaFlush { + return (*XfrmUsersaFlush)(unsafe.Pointer(&b[0:SizeofXfrmUsersaFlush][0])) +} + +func (msg *XfrmUsersaFlush) Serialize() []byte { + return (*(*[SizeofXfrmUsersaFlush]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_replay_state_esn { +// unsigned int bmp_len; +// __u32 oseq; +// __u32 seq; +// __u32 oseq_hi; +// __u32 seq_hi; +// __u32 replay_window; +// __u32 bmp[0]; +// }; + +type XfrmReplayStateEsn struct { + BmpLen uint32 + OSeq uint32 + Seq uint32 + OSeqHi uint32 + SeqHi uint32 + ReplayWindow uint32 + Bmp []uint32 +} + +func (msg *XfrmReplayStateEsn) Serialize() []byte { + // We deliberately do not pass Bmp, as it gets set by the kernel. + return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_replay_state { +// __u32 oseq; +// __u32 seq; +// __u32 bitmap; +// }; + +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func DeserializeXfrmReplayState(b []byte) *XfrmReplayState { + return (*XfrmReplayState)(unsafe.Pointer(&b[0:SizeofXfrmReplayState][0])) +} + +func (msg *XfrmReplayState) Serialize() []byte { + return (*(*[SizeofXfrmReplayState]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/order.go b/vendor/github.com/vishvananda/netlink/order.go new file mode 100644 index 000000000..e28e153a1 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/order.go @@ -0,0 +1,32 @@ +package netlink + +import ( + "encoding/binary" + + "github.com/vishvananda/netlink/nl" +) + +var ( + native = nl.NativeEndian() + networkOrder = binary.BigEndian +) + +func htonl(val uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, val) + return bytes +} + +func htons(val uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, val) + return bytes +} + +func ntohl(buf []byte) uint32 { + return binary.BigEndian.Uint32(buf) +} + +func ntohs(buf []byte) uint16 { + return binary.BigEndian.Uint16(buf) +} diff --git a/vendor/github.com/vishvananda/netlink/proc_event_linux.go b/vendor/github.com/vishvananda/netlink/proc_event_linux.go new file mode 100644 index 000000000..ac8762bd8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/proc_event_linux.go @@ -0,0 +1,208 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +const CN_IDX_PROC = 0x1 + +const ( + PROC_EVENT_NONE = 0x00000000 + PROC_EVENT_FORK = 0x00000001 + PROC_EVENT_EXEC = 0x00000002 + PROC_EVENT_UID = 0x00000004 + PROC_EVENT_GID = 0x00000040 + PROC_EVENT_SID = 0x00000080 + PROC_EVENT_PTRACE = 0x00000100 + PROC_EVENT_COMM = 0x00000200 + PROC_EVENT_COREDUMP = 0x40000000 + PROC_EVENT_EXIT = 0x80000000 +) + +const ( + CN_VAL_PROC = 0x1 + PROC_CN_MCAST_LISTEN = 0x1 +) + +type ProcEventMsg interface { + Pid() uint32 + Tgid() uint32 +} + +type ProcEventHeader struct { + What uint32 + CPU uint32 + Timestamp uint64 +} + +type ProcEvent struct { + ProcEventHeader + Msg ProcEventMsg +} + +func (pe *ProcEvent) setHeader(h ProcEventHeader) { + pe.What = h.What + pe.CPU = h.CPU + pe.Timestamp = h.Timestamp +} + +type ExitProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + ExitCode uint32 + ExitSignal uint32 + ParentPid uint32 + ParentTgid uint32 +} + +func (e *ExitProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExitProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ExecProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 +} + +func (e *ExecProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *ExecProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +type ForkProcEvent struct { + ParentPid uint32 + ParentTgid uint32 + ChildPid uint32 + ChildTgid uint32 +} + +func (e *ForkProcEvent) Pid() uint32 { + return e.ParentPid +} + +func (e *ForkProcEvent) Tgid() uint32 { + return e.ParentTgid +} + +type CommProcEvent struct { + ProcessPid uint32 + ProcessTgid uint32 + Comm [16]byte +} + +func (e *CommProcEvent) Pid() uint32 { + return e.ProcessPid +} + +func (e *CommProcEvent) Tgid() uint32 { + return e.ProcessTgid +} + +func ProcEventMonitor(ch chan<- ProcEvent, done <-chan struct{}, errorChan chan<- error) error { + h, err := NewHandle() + if err != nil { + return err + } + defer h.Delete() + + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_CONNECTOR, CN_IDX_PROC) + if err != nil { + return err + } + + var nlmsg nl.NetlinkRequest + + nlmsg.Pid = uint32(os.Getpid()) + nlmsg.Type = unix.NLMSG_DONE + nlmsg.Len = uint32(unix.SizeofNlMsghdr) + + cm := nl.NewCnMsg(CN_IDX_PROC, CN_VAL_PROC, PROC_CN_MCAST_LISTEN) + nlmsg.AddData(cm) + + s.Send(&nlmsg) + + if done != nil { + go func() { + <-done + s.Close() + }() + } + + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + errorChan <- err + return + } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } + + for _, m := range msgs { + e := parseNetlinkMessage(m) + if e != nil { + ch <- *e + } + } + + } + }() + + return nil +} + +func parseNetlinkMessage(m syscall.NetlinkMessage) *ProcEvent { + if m.Header.Type == unix.NLMSG_DONE { + buf := bytes.NewBuffer(m.Data) + msg := &nl.CnMsg{} + hdr := &ProcEventHeader{} + binary.Read(buf, nl.NativeEndian(), msg) + binary.Read(buf, nl.NativeEndian(), hdr) + + pe := &ProcEvent{} + pe.setHeader(*hdr) + switch hdr.What { + case PROC_EVENT_EXIT: + event := &ExitProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_FORK: + event := &ForkProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_EXEC: + event := &ExecProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + case PROC_EVENT_COMM: + event := &CommProcEvent{} + binary.Read(buf, nl.NativeEndian(), event) + pe.Msg = event + return pe + } + return nil + } + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go new file mode 100644 index 000000000..0163cba3a --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/protinfo.go @@ -0,0 +1,70 @@ +package netlink + +import ( + "strings" +) + +// Protinfo represents bridge flags from netlink. +type Protinfo struct { + Hairpin bool + Guard bool + FastLeave bool + RootBlock bool + Learning bool + Flood bool + ProxyArp bool + ProxyArpWiFi bool + Isolated bool + NeighSuppress bool +} + +// String returns a list of enabled flags +func (prot *Protinfo) String() string { + if prot == nil { + return "" + } + + var boolStrings []string + if prot.Hairpin { + boolStrings = append(boolStrings, "Hairpin") + } + if prot.Guard { + boolStrings = append(boolStrings, "Guard") + } + if prot.FastLeave { + boolStrings = append(boolStrings, "FastLeave") + } + if prot.RootBlock { + boolStrings = append(boolStrings, "RootBlock") + } + if prot.Learning { + boolStrings = append(boolStrings, "Learning") + } + if prot.Flood { + boolStrings = append(boolStrings, "Flood") + } + if prot.ProxyArp { + boolStrings = append(boolStrings, "ProxyArp") + } + if prot.ProxyArpWiFi { + boolStrings = append(boolStrings, "ProxyArpWiFi") + } + if prot.Isolated { + boolStrings = append(boolStrings, "Isolated") + } + if prot.NeighSuppress { + boolStrings = append(boolStrings, "NeighSuppress") + } + return strings.Join(boolStrings, " ") +} + +func boolToByte(x bool) []byte { + if x { + return []byte{1} + } + return []byte{0} +} + +func byteToBool(x byte) bool { + return uint8(x) != 0 +} diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go new file mode 100644 index 000000000..1ba25d3cd --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -0,0 +1,78 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +func LinkGetProtinfo(link Link) (Protinfo, error) { + return pkgHandle.LinkGetProtinfo(link) +} + +func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { + base := link.Attrs() + h.ensureIndex(base) + var pi Protinfo + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req.AddData(msg) + msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + if err != nil { + return pi, err + } + + for _, m := range msgs { + ans := nl.DeserializeIfInfomsg(m) + if int(ans.Index) != base.Index { + continue + } + attrs, err := nl.ParseRouteAttr(m[ans.Len():]) + if err != nil { + return pi, err + } + for _, attr := range attrs { + if attr.Attr.Type != unix.IFLA_PROTINFO|unix.NLA_F_NESTED { + continue + } + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return pi, err + } + pi = parseProtinfo(infos) + + return pi, nil + } + } + return pi, fmt.Errorf("Device with index %d not found", base.Index) +} + +func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) { + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_BRPORT_MODE: + pi.Hairpin = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_GUARD: + pi.Guard = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_FAST_LEAVE: + pi.FastLeave = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROTECT: + pi.RootBlock = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_LEARNING: + pi.Learning = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_UNICAST_FLOOD: + pi.Flood = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROXYARP: + pi.ProxyArp = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROXYARP_WIFI: + pi.ProxyArpWiFi = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_ISOLATED: + pi.Isolated = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_NEIGH_SUPPRESS: + pi.NeighSuppress = byteToBool(info.Value[0]) + } + } + return +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go new file mode 100644 index 000000000..067743d39 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -0,0 +1,396 @@ +package netlink + +import ( + "fmt" + "math" +) + +const ( + HANDLE_NONE = 0 + HANDLE_INGRESS = 0xFFFFFFF1 + HANDLE_CLSACT = HANDLE_INGRESS + HANDLE_ROOT = 0xFFFFFFFF + PRIORITY_MAP_LEN = 16 +) +const ( + HANDLE_MIN_INGRESS = 0xFFFFFFF2 + HANDLE_MIN_EGRESS = 0xFFFFFFF3 +) + +const ( + HORIZON_DROP_POLICY_CAP = 0 + HORIZON_DROP_POLICY_DROP = 1 + HORIZON_DROP_POLICY_DEFAULT = 255 +) + +type Qdisc interface { + Attrs() *QdiscAttrs + Type() string +} + +type QdiscStatistics ClassStatistics + +// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link, +// has a handle, a parent and a refcnt. The root qdisc of a device should +// have parent == HANDLE_ROOT. +type QdiscAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only + IngressBlock *uint32 + Statistics *QdiscStatistics +} + +func (q QdiscAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt) +} + +func MakeHandle(major, minor uint16) uint32 { + return (uint32(major) << 16) | uint32(minor) +} + +func MajorMinor(handle uint32) (uint16, uint16) { + return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF) +} + +func HandleStr(handle uint32) string { + switch handle { + case HANDLE_NONE: + return "none" + case HANDLE_INGRESS: + return "ingress" + case HANDLE_ROOT: + return "root" + default: + major, minor := MajorMinor(handle) + return fmt.Sprintf("%x:%x", major, minor) + } +} + +func Percentage2u32(percentage float32) uint32 { + // FIXME this is most likely not the best way to convert from % to uint32 + if percentage == 100 { + return math.MaxUint32 + } + return uint32(math.MaxUint32 * (percentage / 100)) +} + +// PfifoFast is the default qdisc created by the kernel if one has not +// been defined for the interface +type PfifoFast struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func (qdisc *PfifoFast) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *PfifoFast) Type() string { + return "pfifo_fast" +} + +// Prio is a basic qdisc that works just like PfifoFast +type Prio struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func NewPrio(attrs QdiscAttrs) *Prio { + return &Prio{ + QdiscAttrs: attrs, + Bands: 3, + PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, + } +} + +func (qdisc *Prio) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Prio) Type() string { + return "prio" +} + +// Htb is a classful qdisc that rate limits based on tokens +type Htb struct { + QdiscAttrs + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 + DirectQlen *uint32 +} + +func NewHtb(attrs QdiscAttrs) *Htb { + return &Htb{ + QdiscAttrs: attrs, + Version: 3, + Defcls: 0, + Rate2Quantum: 10, + Debug: 0, + DirectPkts: 0, + DirectQlen: nil, + } +} + +func (qdisc *Htb) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Htb) Type() string { + return "htb" +} + +// Netem is a classless qdisc that rate limits based on tokens + +type NetemQdiscAttrs struct { + Latency uint32 // in us + DelayCorr float32 // in % + Limit uint32 + Loss float32 // in % + LossCorr float32 // in % + Gap uint32 + Duplicate float32 // in % + DuplicateCorr float32 // in % + Jitter uint32 // in us + ReorderProb float32 // in % + ReorderCorr float32 // in % + CorruptProb float32 // in % + CorruptCorr float32 // in % + Rate64 uint64 +} + +func (q NetemQdiscAttrs) String() string { + return fmt.Sprintf( + "{Latency: %d, Limit: %d, Loss: %f, Gap: %d, Duplicate: %f, Jitter: %d}", + q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter, + ) +} + +type Netem struct { + QdiscAttrs + Latency uint32 + DelayCorr uint32 + Limit uint32 + Loss uint32 + LossCorr uint32 + Gap uint32 + Duplicate uint32 + DuplicateCorr uint32 + Jitter uint32 + ReorderProb uint32 + ReorderCorr uint32 + CorruptProb uint32 + CorruptCorr uint32 + Rate64 uint64 +} + +func (netem *Netem) String() string { + return fmt.Sprintf( + "{Latency: %v, Limit: %v, Loss: %v, Gap: %v, Duplicate: %v, Jitter: %v}", + netem.Latency, netem.Limit, netem.Loss, netem.Gap, netem.Duplicate, netem.Jitter, + ) +} + +func (qdisc *Netem) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Netem) Type() string { + return "netem" +} + +// Tbf is a classless qdisc that rate limits based on tokens +type Tbf struct { + QdiscAttrs + Rate uint64 + Limit uint32 + Buffer uint32 + Peakrate uint64 + Minburst uint32 + // TODO: handle other settings +} + +func (qdisc *Tbf) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Tbf) Type() string { + return "tbf" +} + +// Clsact is a qdisc for adding filters +type Clsact struct { + QdiscAttrs +} + +func (qdisc *Clsact) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Clsact) Type() string { + return "clsact" +} + +// Ingress is a qdisc for adding ingress filters +type Ingress struct { + QdiscAttrs +} + +func (qdisc *Ingress) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Ingress) Type() string { + return "ingress" +} + +// GenericQdisc qdiscs represent types that are not currently understood +// by this netlink library. +type GenericQdisc struct { + QdiscAttrs + QdiscType string +} + +func (qdisc *GenericQdisc) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *GenericQdisc) Type() string { + return qdisc.QdiscType +} + +type Hfsc struct { + QdiscAttrs + Defcls uint16 +} + +func NewHfsc(attrs QdiscAttrs) *Hfsc { + return &Hfsc{ + QdiscAttrs: attrs, + Defcls: 1, + } +} + +func (hfsc *Hfsc) Attrs() *QdiscAttrs { + return &hfsc.QdiscAttrs +} + +func (hfsc *Hfsc) Type() string { + return "hfsc" +} + +func (hfsc *Hfsc) String() string { + return fmt.Sprintf( + "{%v -- default: %d}", + hfsc.Attrs(), hfsc.Defcls, + ) +} + +// Fq is a classless packet scheduler meant to be mostly used for locally generated traffic. +type Fq struct { + QdiscAttrs + PacketLimit uint32 + FlowPacketLimit uint32 + // In bytes + Quantum uint32 + InitialQuantum uint32 + // called RateEnable under the hood + Pacing uint32 + FlowDefaultRate uint32 + FlowMaxRate uint32 + // called BucketsLog under the hood + Buckets uint32 + FlowRefillDelay uint32 + LowRateThreshold uint32 + Horizon uint32 + HorizonDropPolicy uint8 +} + +func (fq *Fq) String() string { + return fmt.Sprintf( + "{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v, Horizon: %v, HorizonDropPolicy: %v}", + fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold, fq.Horizon, fq.HorizonDropPolicy, + ) +} + +func NewFq(attrs QdiscAttrs) *Fq { + return &Fq{ + QdiscAttrs: attrs, + Pacing: 1, + HorizonDropPolicy: HORIZON_DROP_POLICY_DEFAULT, + } +} + +func (qdisc *Fq) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Fq) Type() string { + return "fq" +} + +// FQ_Codel (Fair Queuing Controlled Delay) is queuing discipline that combines Fair Queuing with the CoDel AQM scheme. +type FqCodel struct { + QdiscAttrs + Target uint32 + Limit uint32 + Interval uint32 + ECN uint32 + Flows uint32 + Quantum uint32 + CEThreshold uint32 + DropBatchSize uint32 + MemoryLimit uint32 +} + +func (fqcodel *FqCodel) String() string { + return fmt.Sprintf( + "{%v -- Target: %v, Limit: %v, Interval: %v, ECM: %v, Flows: %v, Quantum: %v}", + fqcodel.Attrs(), fqcodel.Target, fqcodel.Limit, fqcodel.Interval, fqcodel.ECN, fqcodel.Flows, fqcodel.Quantum, + ) +} + +func NewFqCodel(attrs QdiscAttrs) *FqCodel { + return &FqCodel{ + QdiscAttrs: attrs, + ECN: 1, + } +} + +func (qdisc *FqCodel) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *FqCodel) Type() string { + return "fq_codel" +} + +type Sfq struct { + QdiscAttrs + // TODO: Only the simplified options for SFQ are handled here. Support for the extended one can be added later. + Quantum uint8 + Perturb uint8 + Limit uint32 + Divisor uint8 +} + +func (sfq *Sfq) String() string { + return fmt.Sprintf( + "{%v -- Quantum: %v, Perturb: %v, Limit: %v, Divisor: %v}", + sfq.Attrs(), sfq.Quantum, sfq.Perturb, sfq.Limit, sfq.Divisor, + ) +} + +func (qdisc *Sfq) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Sfq) Type() string { + return "sfq" +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go new file mode 100644 index 000000000..e732ae3bd --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -0,0 +1,788 @@ +package netlink + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// NOTE function is here because it uses other linux functions +func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { + var limit uint32 = 1000 + var lossCorr, delayCorr, duplicateCorr uint32 + var reorderProb, reorderCorr uint32 + var corruptProb, corruptCorr uint32 + var rate64 uint64 + + latency := nattrs.Latency + loss := Percentage2u32(nattrs.Loss) + gap := nattrs.Gap + duplicate := Percentage2u32(nattrs.Duplicate) + jitter := nattrs.Jitter + + // Correlation + if latency > 0 && jitter > 0 { + delayCorr = Percentage2u32(nattrs.DelayCorr) + } + if loss > 0 { + lossCorr = Percentage2u32(nattrs.LossCorr) + } + if duplicate > 0 { + duplicateCorr = Percentage2u32(nattrs.DuplicateCorr) + } + // FIXME should validate values(like loss/duplicate are percentages...) + latency = time2Tick(latency) + + if nattrs.Limit != 0 { + limit = nattrs.Limit + } + // Jitter is only value if latency is > 0 + if latency > 0 { + jitter = time2Tick(jitter) + } + + reorderProb = Percentage2u32(nattrs.ReorderProb) + reorderCorr = Percentage2u32(nattrs.ReorderCorr) + + if reorderProb > 0 { + // ERROR if lantency == 0 + if gap == 0 { + gap = 1 + } + } + + corruptProb = Percentage2u32(nattrs.CorruptProb) + corruptCorr = Percentage2u32(nattrs.CorruptCorr) + rate64 = nattrs.Rate64 + + return &Netem{ + QdiscAttrs: attrs, + Latency: latency, + DelayCorr: delayCorr, + Limit: limit, + Loss: loss, + LossCorr: lossCorr, + Gap: gap, + Duplicate: duplicate, + DuplicateCorr: duplicateCorr, + Jitter: jitter, + ReorderProb: reorderProb, + ReorderCorr: reorderCorr, + CorruptProb: corruptProb, + CorruptCorr: corruptCorr, + Rate64: rate64, + } +} + +// QdiscDel will delete a qdisc from the system. +// Equivalent to: `tc qdisc del $qdisc` +func QdiscDel(qdisc Qdisc) error { + return pkgHandle.QdiscDel(qdisc) +} + +// QdiscDel will delete a qdisc from the system. +// Equivalent to: `tc qdisc del $qdisc` +func (h *Handle) QdiscDel(qdisc Qdisc) error { + return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) +} + +// QdiscChange will change a qdisc in place +// Equivalent to: `tc qdisc change $qdisc` +// The parent and handle MUST NOT be changed. +func QdiscChange(qdisc Qdisc) error { + return pkgHandle.QdiscChange(qdisc) +} + +// QdiscChange will change a qdisc in place +// Equivalent to: `tc qdisc change $qdisc` +// The parent and handle MUST NOT be changed. +func (h *Handle) QdiscChange(qdisc Qdisc) error { + return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) +} + +// QdiscReplace will replace a qdisc to the system. +// Equivalent to: `tc qdisc replace $qdisc` +// The handle MUST change. +func QdiscReplace(qdisc Qdisc) error { + return pkgHandle.QdiscReplace(qdisc) +} + +// QdiscReplace will replace a qdisc to the system. +// Equivalent to: `tc qdisc replace $qdisc` +// The handle MUST change. +func (h *Handle) QdiscReplace(qdisc Qdisc) error { + return h.qdiscModify( + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_REPLACE, + qdisc) +} + +// QdiscAdd will add a qdisc to the system. +// Equivalent to: `tc qdisc add $qdisc` +func QdiscAdd(qdisc Qdisc) error { + return pkgHandle.QdiscAdd(qdisc) +} + +// QdiscAdd will add a qdisc to the system. +// Equivalent to: `tc qdisc add $qdisc` +func (h *Handle) QdiscAdd(qdisc Qdisc) error { + return h.qdiscModify( + unix.RTM_NEWQDISC, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + qdisc) +} + +func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + base := qdisc.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + // When deleting don't bother building the rest of the netlink payload + if cmd != unix.RTM_DELQDISC { + if err := qdiscPayload(req, qdisc); err != nil { + return err + } + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { + + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + if qdisc.Attrs().IngressBlock != nil { + req.AddData(nl.NewRtAttr(nl.TCA_INGRESS_BLOCK, nl.Uint32Attr(*qdisc.Attrs().IngressBlock))) + } + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + + switch qdisc := qdisc.(type) { + case *Prio: + tcmap := nl.TcPrioMap{ + Bands: int32(qdisc.Bands), + Priomap: qdisc.PriorityMap, + } + options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize()) + case *Tbf: + opt := nl.TcTbfQopt{} + opt.Rate.Rate = uint32(qdisc.Rate) + opt.Peakrate.Rate = uint32(qdisc.Peakrate) + opt.Limit = qdisc.Limit + opt.Buffer = qdisc.Buffer + options.AddRtAttr(nl.TCA_TBF_PARMS, opt.Serialize()) + if qdisc.Rate >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) + } + if qdisc.Peakrate >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) + } + if qdisc.Peakrate > 0 { + options.AddRtAttr(nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) + } + case *Htb: + opt := nl.TcHtbGlob{} + opt.Version = qdisc.Version + opt.Rate2Quantum = qdisc.Rate2Quantum + opt.Defcls = qdisc.Defcls + // TODO: Handle Debug properly. For now default to 0 + opt.Debug = qdisc.Debug + opt.DirectPkts = qdisc.DirectPkts + options.AddRtAttr(nl.TCA_HTB_INIT, opt.Serialize()) + if qdisc.DirectQlen != nil { + options.AddRtAttr(nl.TCA_HTB_DIRECT_QLEN, nl.Uint32Attr(*qdisc.DirectQlen)) + } + case *Hfsc: + opt := nl.TcHfscOpt{} + opt.Defcls = qdisc.Defcls + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) + case *Netem: + opt := nl.TcNetemQopt{} + opt.Latency = qdisc.Latency + opt.Limit = qdisc.Limit + opt.Loss = qdisc.Loss + opt.Gap = qdisc.Gap + opt.Duplicate = qdisc.Duplicate + opt.Jitter = qdisc.Jitter + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) + // Correlation + corr := nl.TcNetemCorr{} + corr.DelayCorr = qdisc.DelayCorr + corr.LossCorr = qdisc.LossCorr + corr.DupCorr = qdisc.DuplicateCorr + + if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { + options.AddRtAttr(nl.TCA_NETEM_CORR, corr.Serialize()) + } + // Corruption + corruption := nl.TcNetemCorrupt{} + corruption.Probability = qdisc.CorruptProb + corruption.Correlation = qdisc.CorruptCorr + if corruption.Probability > 0 { + options.AddRtAttr(nl.TCA_NETEM_CORRUPT, corruption.Serialize()) + } + // Reorder + reorder := nl.TcNetemReorder{} + reorder.Probability = qdisc.ReorderProb + reorder.Correlation = qdisc.ReorderCorr + if reorder.Probability > 0 { + options.AddRtAttr(nl.TCA_NETEM_REORDER, reorder.Serialize()) + } + // Rate + if qdisc.Rate64 > 0 { + rate := nl.TcNetemRate{} + if qdisc.Rate64 >= uint64(1<<32) { + options.AddRtAttr(nl.TCA_NETEM_RATE64, nl.Uint64Attr(qdisc.Rate64)) + rate.Rate = ^uint32(0) + } else { + rate.Rate = uint32(qdisc.Rate64) + } + options.AddRtAttr(nl.TCA_NETEM_RATE, rate.Serialize()) + } + case *Clsact: + options = nil + case *Ingress: + // ingress filters must use the proper handle + if qdisc.Attrs().Parent != HANDLE_INGRESS { + return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") + } + case *FqCodel: + options.AddRtAttr(nl.TCA_FQ_CODEL_ECN, nl.Uint32Attr((uint32(qdisc.ECN)))) + if qdisc.Limit > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_LIMIT, nl.Uint32Attr((uint32(qdisc.Limit)))) + } + if qdisc.Interval > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_INTERVAL, nl.Uint32Attr((uint32(qdisc.Interval)))) + } + if qdisc.Flows > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_FLOWS, nl.Uint32Attr((uint32(qdisc.Flows)))) + } + if qdisc.Quantum > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + } + if qdisc.CEThreshold > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_CE_THRESHOLD, nl.Uint32Attr(qdisc.CEThreshold)) + } + if qdisc.DropBatchSize > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_DROP_BATCH_SIZE, nl.Uint32Attr(qdisc.DropBatchSize)) + } + if qdisc.MemoryLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_CODEL_MEMORY_LIMIT, nl.Uint32Attr(qdisc.MemoryLimit)) + } + case *Fq: + options.AddRtAttr(nl.TCA_FQ_RATE_ENABLE, nl.Uint32Attr((uint32(qdisc.Pacing)))) + + if qdisc.Buckets > 0 { + options.AddRtAttr(nl.TCA_FQ_BUCKETS_LOG, nl.Uint32Attr((uint32(qdisc.Buckets)))) + } + if qdisc.PacketLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_PLIMIT, nl.Uint32Attr((uint32(qdisc.PacketLimit)))) + } + if qdisc.LowRateThreshold > 0 { + options.AddRtAttr(nl.TCA_FQ_LOW_RATE_THRESHOLD, nl.Uint32Attr((uint32(qdisc.LowRateThreshold)))) + } + if qdisc.Quantum > 0 { + options.AddRtAttr(nl.TCA_FQ_QUANTUM, nl.Uint32Attr((uint32(qdisc.Quantum)))) + } + if qdisc.InitialQuantum > 0 { + options.AddRtAttr(nl.TCA_FQ_INITIAL_QUANTUM, nl.Uint32Attr((uint32(qdisc.InitialQuantum)))) + } + if qdisc.FlowRefillDelay > 0 { + options.AddRtAttr(nl.TCA_FQ_FLOW_REFILL_DELAY, nl.Uint32Attr((uint32(qdisc.FlowRefillDelay)))) + } + if qdisc.FlowPacketLimit > 0 { + options.AddRtAttr(nl.TCA_FQ_FLOW_PLIMIT, nl.Uint32Attr((uint32(qdisc.FlowPacketLimit)))) + } + if qdisc.FlowMaxRate > 0 { + options.AddRtAttr(nl.TCA_FQ_FLOW_MAX_RATE, nl.Uint32Attr((uint32(qdisc.FlowMaxRate)))) + } + if qdisc.FlowDefaultRate > 0 { + options.AddRtAttr(nl.TCA_FQ_FLOW_DEFAULT_RATE, nl.Uint32Attr((uint32(qdisc.FlowDefaultRate)))) + } + if qdisc.Horizon > 0 { + options.AddRtAttr(nl.TCA_FQ_HORIZON, nl.Uint32Attr(qdisc.Horizon)) + } + if qdisc.HorizonDropPolicy != HORIZON_DROP_POLICY_DEFAULT { + options.AddRtAttr(nl.TCA_FQ_HORIZON_DROP, nl.Uint8Attr(qdisc.HorizonDropPolicy)) + } + case *Sfq: + opt := nl.TcSfqQoptV1{} + opt.TcSfqQopt.Quantum = qdisc.Quantum + opt.TcSfqQopt.Perturb = int32(qdisc.Perturb) + opt.TcSfqQopt.Limit = qdisc.Limit + opt.TcSfqQopt.Divisor = qdisc.Divisor + + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) + default: + options = nil + } + + if options != nil { + req.AddData(options) + } + return nil +} + +// QdiscList gets a list of qdiscs in the system. +// Equivalent to: `tc qdisc show`. +// The list can be filtered by link. +func QdiscList(link Link) ([]Qdisc, error) { + return pkgHandle.QdiscList(link) +} + +// QdiscList gets a list of qdiscs in the system. +// Equivalent to: `tc qdisc show`. +// The list can be filtered by link. +func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { + req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) + if err != nil { + return nil, err + } + + var res []Qdisc + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip qdiscs from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + base := QdiscAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + Refcnt: msg.Info, + } + var qdisc Qdisc + qdiscType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + qdiscType = string(attr.Value[:len(attr.Value)-1]) + switch qdiscType { + case "pfifo_fast": + qdisc = &PfifoFast{} + case "prio": + qdisc = &Prio{} + case "tbf": + qdisc = &Tbf{} + case "ingress": + qdisc = &Ingress{} + case "htb": + qdisc = &Htb{} + case "fq": + qdisc = &Fq{} + case "hfsc": + qdisc = &Hfsc{} + case "fq_codel": + qdisc = &FqCodel{} + case "netem": + qdisc = &Netem{} + case "sfq": + qdisc = &Sfq{} + case "clsact": + qdisc = &Clsact{} + default: + qdisc = &GenericQdisc{QdiscType: qdiscType} + } + case nl.TCA_OPTIONS: + switch qdiscType { + case "pfifo_fast": + // pfifo returns TcPrioMap directly without wrapping it in rtattr + if err := parsePfifoFastData(qdisc, attr.Value); err != nil { + return nil, err + } + case "prio": + // prio returns TcPrioMap directly without wrapping it in rtattr + if err := parsePrioData(qdisc, attr.Value); err != nil { + return nil, err + } + case "tbf": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseTbfData(qdisc, data); err != nil { + return nil, err + } + case "hfsc": + if err := parseHfscData(qdisc, attr.Value); err != nil { + return nil, err + } + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseHtbData(qdisc, data); err != nil { + return nil, err + } + case "fq": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseFqData(qdisc, data); err != nil { + return nil, err + } + case "fq_codel": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseFqCodelData(qdisc, data); err != nil { + return nil, err + } + case "netem": + if err := parseNetemData(qdisc, attr.Value); err != nil { + return nil, err + } + case "sfq": + if err := parseSfqData(qdisc, attr.Value); err != nil { + return nil, err + } + + // no options for ingress + } + case nl.TCA_INGRESS_BLOCK: + ingressBlock := new(uint32) + *ingressBlock = native.Uint32(attr.Value) + base.IngressBlock = ingressBlock + case nl.TCA_STATS: + s, err := parseTcStats(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) + case nl.TCA_STATS2: + s, err := parseTcStats2(attr.Value) + if err != nil { + return nil, err + } + base.Statistics = (*QdiscStatistics)(s) + } + } + *qdisc.Attrs() = base + res = append(res, qdisc) + } + + return res, nil +} + +func parsePfifoFastData(qdisc Qdisc, value []byte) error { + pfifo := qdisc.(*PfifoFast) + tcmap := nl.DeserializeTcPrioMap(value) + pfifo.PriorityMap = tcmap.Priomap + pfifo.Bands = uint8(tcmap.Bands) + return nil +} + +func parsePrioData(qdisc Qdisc, value []byte) error { + prio := qdisc.(*Prio) + tcmap := nl.DeserializeTcPrioMap(value) + prio.PriorityMap = tcmap.Priomap + prio.Bands = uint8(tcmap.Bands) + return nil +} + +func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + htb := qdisc.(*Htb) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_INIT: + opt := nl.DeserializeTcHtbGlob(datum.Value) + htb.Version = opt.Version + htb.Rate2Quantum = opt.Rate2Quantum + htb.Defcls = opt.Defcls + htb.Debug = opt.Debug + htb.DirectPkts = opt.DirectPkts + case nl.TCA_HTB_DIRECT_QLEN: + directQlen := native.Uint32(datum.Value) + htb.DirectQlen = &directQlen + } + } + return nil +} + +func parseFqCodelData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + fqCodel := qdisc.(*FqCodel) + for _, datum := range data { + + switch datum.Attr.Type { + case nl.TCA_FQ_CODEL_TARGET: + fqCodel.Target = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_LIMIT: + fqCodel.Limit = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_INTERVAL: + fqCodel.Interval = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_ECN: + fqCodel.ECN = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_FLOWS: + fqCodel.Flows = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_QUANTUM: + fqCodel.Quantum = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_CE_THRESHOLD: + fqCodel.CEThreshold = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_DROP_BATCH_SIZE: + fqCodel.DropBatchSize = native.Uint32(datum.Value) + case nl.TCA_FQ_CODEL_MEMORY_LIMIT: + fqCodel.MemoryLimit = native.Uint32(datum.Value) + } + } + return nil +} + +func parseHfscData(qdisc Qdisc, data []byte) error { + Hfsc := qdisc.(*Hfsc) + Hfsc.Defcls = native.Uint16(data) + return nil +} + +func parseFqData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + fq := qdisc.(*Fq) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FQ_BUCKETS_LOG: + fq.Buckets = native.Uint32(datum.Value) + case nl.TCA_FQ_LOW_RATE_THRESHOLD: + fq.LowRateThreshold = native.Uint32(datum.Value) + case nl.TCA_FQ_QUANTUM: + fq.Quantum = native.Uint32(datum.Value) + case nl.TCA_FQ_RATE_ENABLE: + fq.Pacing = native.Uint32(datum.Value) + case nl.TCA_FQ_INITIAL_QUANTUM: + fq.InitialQuantum = native.Uint32(datum.Value) + case nl.TCA_FQ_ORPHAN_MASK: + // TODO + case nl.TCA_FQ_FLOW_REFILL_DELAY: + fq.FlowRefillDelay = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_PLIMIT: + fq.FlowPacketLimit = native.Uint32(datum.Value) + case nl.TCA_FQ_PLIMIT: + fq.PacketLimit = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_MAX_RATE: + fq.FlowMaxRate = native.Uint32(datum.Value) + case nl.TCA_FQ_FLOW_DEFAULT_RATE: + fq.FlowDefaultRate = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON: + fq.Horizon = native.Uint32(datum.Value) + case nl.TCA_FQ_HORIZON_DROP: + fq.HorizonDropPolicy = datum.Value[0] + + } + } + return nil +} + +func parseNetemData(qdisc Qdisc, value []byte) error { + netem := qdisc.(*Netem) + opt := nl.DeserializeTcNetemQopt(value) + netem.Latency = opt.Latency + netem.Limit = opt.Limit + netem.Loss = opt.Loss + netem.Gap = opt.Gap + netem.Duplicate = opt.Duplicate + netem.Jitter = opt.Jitter + data, err := nl.ParseRouteAttr(value[nl.SizeofTcNetemQopt:]) + if err != nil { + return err + } + var rate *nl.TcNetemRate + var rate64 uint64 + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_NETEM_CORR: + opt := nl.DeserializeTcNetemCorr(datum.Value) + netem.DelayCorr = opt.DelayCorr + netem.LossCorr = opt.LossCorr + netem.DuplicateCorr = opt.DupCorr + case nl.TCA_NETEM_CORRUPT: + opt := nl.DeserializeTcNetemCorrupt(datum.Value) + netem.CorruptProb = opt.Probability + netem.CorruptCorr = opt.Correlation + case nl.TCA_NETEM_REORDER: + opt := nl.DeserializeTcNetemReorder(datum.Value) + netem.ReorderProb = opt.Probability + netem.ReorderCorr = opt.Correlation + case nl.TCA_NETEM_RATE: + rate = nl.DeserializeTcNetemRate(datum.Value) + case nl.TCA_NETEM_RATE64: + rate64 = native.Uint64(datum.Value) + } + } + if rate != nil { + netem.Rate64 = uint64(rate.Rate) + if rate64 > 0 { + netem.Rate64 = rate64 + } + } + + return nil +} + +func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + tbf := qdisc.(*Tbf) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_TBF_PARMS: + opt := nl.DeserializeTcTbfQopt(datum.Value) + tbf.Rate = uint64(opt.Rate.Rate) + tbf.Peakrate = uint64(opt.Peakrate.Rate) + tbf.Limit = opt.Limit + tbf.Buffer = opt.Buffer + case nl.TCA_TBF_RATE64: + tbf.Rate = native.Uint64(datum.Value[0:8]) + case nl.TCA_TBF_PRATE64: + tbf.Peakrate = native.Uint64(datum.Value[0:8]) + case nl.TCA_TBF_PBURST: + tbf.Minburst = native.Uint32(datum.Value[0:4]) + } + } + return nil +} + +func parseSfqData(qdisc Qdisc, value []byte) error { + sfq := qdisc.(*Sfq) + opt := nl.DeserializeTcSfqQoptV1(value) + sfq.Quantum = opt.TcSfqQopt.Quantum + sfq.Perturb = uint8(opt.TcSfqQopt.Perturb) + sfq.Limit = opt.TcSfqQopt.Limit + sfq.Divisor = opt.TcSfqQopt.Divisor + + return nil +} + +const ( + TIME_UNITS_PER_SEC = 1000000 +) + +var ( + tickInUsec float64 + clockFactor float64 + hz float64 + + // Without this, the go race detector may report races. + initClockMutex sync.Mutex +) + +func initClock() { + data, err := ioutil.ReadFile("/proc/net/psched") + if err != nil { + return + } + parts := strings.Split(strings.TrimSpace(string(data)), " ") + if len(parts) < 4 { + return + } + var vals [4]uint64 + for i := range vals { + val, err := strconv.ParseUint(parts[i], 16, 32) + if err != nil { + return + } + vals[i] = val + } + // compatibility + if vals[2] == 1000000000 { + vals[0] = vals[1] + } + clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC + tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor + if vals[2] == 1000000 { + // ref https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/lib/utils.c#n963 + hz = float64(vals[3]) + } else { + hz = 100 + } +} + +func TickInUsec() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() + if tickInUsec == 0.0 { + initClock() + } + return tickInUsec +} + +func ClockFactor() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() + if clockFactor == 0.0 { + initClock() + } + return clockFactor +} + +func Hz() float64 { + initClockMutex.Lock() + defer initClockMutex.Unlock() + if hz == 0.0 { + initClock() + } + return hz +} + +func time2Tick(time uint32) uint32 { + return uint32(float64(time) * TickInUsec()) +} + +func tick2Time(tick uint32) uint32 { + return uint32(float64(tick) / TickInUsec()) +} + +func time2Ktime(time uint32) uint32 { + return uint32(float64(time) * ClockFactor()) +} + +func ktime2Time(ktime uint32) uint32 { + return uint32(float64(ktime) / ClockFactor()) +} + +func burst(rate uint64, buffer uint32) uint32 { + return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC) +} + +func latency(rate uint64, limit, buffer uint32) float64 { + return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) +} + +func Xmittime(rate uint64, size uint32) uint32 { + // https://git.kernel.org/pub/scm/network/iproute2/iproute2.git/tree/tc/tc_core.c#n62 + return time2Tick(uint32(TIME_UNITS_PER_SEC * (float64(size) / float64(rate)))) +} + +func Xmitsize(rate uint64, ticks uint32) uint32 { + return uint32((float64(rate) * float64(tick2Time(ticks))) / TIME_UNITS_PER_SEC) +} diff --git a/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go new file mode 100644 index 000000000..036399db6 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -0,0 +1,331 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// LinkAttrs represents data shared by most link types +type RdmaLinkAttrs struct { + Index uint32 + Name string + FirmwareVersion string + NodeGuid string + SysImageGuid string +} + +// Link represents a rdma device from netlink. +type RdmaLink struct { + Attrs RdmaLinkAttrs +} + +func getProtoField(clientType int, op int) int { + return ((clientType << nl.RDMA_NL_GET_CLIENT_SHIFT) | op) +} + +func uint64ToGuidString(guid uint64) string { + //Convert to byte array + sysGuidBytes := new(bytes.Buffer) + binary.Write(sysGuidBytes, binary.LittleEndian, guid) + + //Convert to HardwareAddr + sysGuidNet := net.HardwareAddr(sysGuidBytes.Bytes()) + + //Get the String + return sysGuidNet.String() +} + +func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) { + + link := RdmaLink{} + + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_ATTR_DEV_INDEX: + var Index uint32 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &Index) + link.Attrs.Index = Index + case nl.RDMA_NLDEV_ATTR_DEV_NAME: + link.Attrs.Name = string(value[0 : len-1]) + case nl.RDMA_NLDEV_ATTR_FW_VERSION: + link.Attrs.FirmwareVersion = string(value[0 : len-1]) + case nl.RDMA_NLDEV_ATTR_NODE_GUID: + var guid uint64 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &guid) + link.Attrs.NodeGuid = uint64ToGuidString(guid) + case nl.RDMA_NLDEV_ATTR_SYS_IMAGE_GUID: + var sysGuid uint64 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &sysGuid) + link.Attrs.SysImageGuid = uint64ToGuidString(sysGuid) + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return &link, nil +} + +func execRdmaSetLink(req *nl.NetlinkRequest) error { + + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func RdmaLinkList() ([]*RdmaLink, error) { + return pkgHandle.RdmaLinkList() +} + +// RdmaLinkList gets a list of RDMA link devices. +// Equivalent to: `rdma dev show` +func (h *Handle) RdmaLinkList() ([]*RdmaLink, error) { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return nil, err + } + + var res []*RdmaLink + for _, m := range msgs { + link, err := executeOneGetRdmaLink(m) + if err != nil { + return nil, err + } + res = append(res, link) + } + + return res, nil +} + +// RdmaLinkByName finds a link by name and returns a pointer to the object if +// found and nil error, otherwise returns error code. +func RdmaLinkByName(name string) (*RdmaLink, error) { + return pkgHandle.RdmaLinkByName(name) +} + +// RdmaLinkByName finds a link by name and returns a pointer to the object if +// found and nil error, otherwise returns error code. +func (h *Handle) RdmaLinkByName(name string) (*RdmaLink, error) { + links, err := h.RdmaLinkList() + if err != nil { + return nil, err + } + for _, link := range links { + if link.Attrs.Name == name { + return link, nil + } + } + return nil, fmt.Errorf("Rdma device %v not found", name) +} + +// RdmaLinkSetName sets the name of the rdma link device. Return nil on success +// or error otherwise. +// Equivalent to: `rdma dev set $old_devname name $name` +func RdmaLinkSetName(link *RdmaLink, name string) error { + return pkgHandle.RdmaLinkSetName(link, name) +} + +// RdmaLinkSetName sets the name of the rdma link device. Return nil on success +// or error otherwise. +// Equivalent to: `rdma dev set $old_devname name $name` +func (h *Handle) RdmaLinkSetName(link *RdmaLink, name string) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs.Index)) + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b) + req.AddData(data) + + b = make([]byte, len(name)+1) + copy(b, name) + data = nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, b) + req.AddData(data) + + return execRdmaSetLink(req) +} + +func netnsModeToString(mode uint8) string { + switch mode { + case 0: + return "exclusive" + case 1: + return "shared" + default: + return "unknown" + } +} + +func executeOneGetRdmaNetnsMode(data []byte) (string, error) { + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE: + var mode uint8 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &mode) + return netnsModeToString(mode), nil + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return "", fmt.Errorf("Invalid netns mode") +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func RdmaSystemGetNetnsMode() (string, error) { + return pkgHandle.RdmaSystemGetNetnsMode() +} + +// RdmaSystemGetNetnsMode gets the net namespace mode for RDMA subsystem +// Returns mode string and error status as nil on success or returns error +// otherwise. +// Equivalent to: `rdma system show netns' +func (h *Handle) RdmaSystemGetNetnsMode() (string, error) { + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return "", err + } + if len(msgs) == 0 { + return "", fmt.Errorf("No valid response from kernel") + } + return executeOneGetRdmaNetnsMode(msgs[0]) +} + +func netnsModeStringToUint8(mode string) (uint8, error) { + switch mode { + case "exclusive": + return 0, nil + case "shared": + return 1, nil + default: + return 0, fmt.Errorf("Invalid mode; %q", mode) + } +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func RdmaSystemSetNetnsMode(NewMode string) error { + return pkgHandle.RdmaSystemSetNetnsMode(NewMode) +} + +// RdmaSystemSetNetnsMode sets the net namespace mode for RDMA subsystem +// Returns nil on success or appropriate error code. +// Equivalent to: `rdma system set netns { shared | exclusive }' +func (h *Handle) RdmaSystemSetNetnsMode(NewMode string) error { + value, err := netnsModeStringToUint8(NewMode) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SYS_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_SYS_ATTR_NETNS_MODE, []byte{value}) + req.AddData(data) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + return pkgHandle.RdmaLinkSetNsFd(link, fd) +} + +// RdmaLinkSetNsFd puts the RDMA device into a new network namespace. The +// fd must be an open file descriptor to a network namespace. +// Similar to: `rdma dev set $dev netns $ns` +func (h *Handle) RdmaLinkSetNsFd(link *RdmaLink, fd uint32) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_SET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, + nl.Uint32Attr(link.Attrs.Index)) + req.AddData(data) + + data = nl.NewRtAttr(nl.RDMA_NLDEV_NET_NS_FD, nl.Uint32Attr(fd)) + req.AddData(data) + + return execRdmaSetLink(req) +} + +// RdmaLinkDel deletes an rdma link +// +// Similar to: rdma link delete NAME +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkDel(name string) error { + return pkgHandle.RdmaLinkDel(name) +} + +// RdmaLinkDel deletes an rdma link. +func (h *Handle) RdmaLinkDel(name string) error { + link, err := h.RdmaLinkByName(name) + if err != nil { + return err + } + + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_DELLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + b := make([]byte, 4) + native.PutUint32(b, link.Attrs.Index) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b)) + + _, err = req.Execute(unix.NETLINK_RDMA, 0) + return err +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +// Similar to: rdma link add NAME type TYPE netdev NETDEV +// NAME - specifies the new name of the rdma link to add +// TYPE - specifies which rdma type to use. Link types: +// rxe - Soft RoCE driver +// siw - Soft iWARP driver +// NETDEV - specifies the network device to which the link is bound +// +// REF: https://man7.org/linux/man-pages/man8/rdma-link.8.html +func RdmaLinkAdd(linkName, linkType, netdev string) error { + return pkgHandle.RdmaLinkAdd(linkName, linkType, netdev) +} + +// RdmaLinkAdd adds an rdma link for the specified type to the network device. +func (h *Handle) RdmaLinkAdd(linkName string, linkType string, netdev string) error { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_NEWLINK) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK) + + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_NAME, nl.ZeroTerminated(linkName))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_LINK_TYPE, nl.ZeroTerminated(linkType))) + req.AddData(nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_NDEV_NAME, nl.ZeroTerminated(netdev))) + _, err := req.Execute(unix.NETLINK_RDMA, 0) + return err +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go new file mode 100644 index 000000000..1b4555d5c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -0,0 +1,234 @@ +package netlink + +import ( + "fmt" + "net" + "strings" +) + +// Scope is an enum representing a route scope. +type Scope uint8 + +type NextHopFlag int + +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE + RT_FILTER_HOPLIMIT + RT_FILTER_PRIORITY + RT_FILTER_MARK + RT_FILTER_MASK + RT_FILTER_REALM +) + +type Destination interface { + Family() int + Decode([]byte) error + Encode() ([]byte, error) + String() string + Equal(Destination) bool +} + +type Encap interface { + Type() int + Decode([]byte) error + Encode() ([]byte, error) + String() string + Equal(Encap) bool +} + +//Protocol describe what was the originator of the route +type RouteProtocol int + +// Route represents a netlink route. +type Route struct { + LinkIndex int + ILinkIndex int + Scope Scope + Dst *net.IPNet + Src net.IP + Gw net.IP + MultiPath []*NexthopInfo + Protocol RouteProtocol + Priority int + Family int + Table int + Type int + Tos int + Flags int + MPLSDst *int + NewDst Destination + Encap Encap + Via Destination + Realm int + MTU int + Window int + Rtt int + RttVar int + Ssthresh int + Cwnd int + AdvMSS int + Reordering int + Hoplimit int + InitCwnd int + Features int + RtoMin int + InitRwnd int + QuickACK int + Congctl string + FastOpenNoCookie int +} + +func (r Route) String() string { + elems := []string{} + if len(r.MultiPath) == 0 { + elems = append(elems, fmt.Sprintf("Ifindex: %d", r.LinkIndex)) + } + if r.MPLSDst != nil { + elems = append(elems, fmt.Sprintf("Dst: %d", r.MPLSDst)) + } else { + elems = append(elems, fmt.Sprintf("Dst: %s", r.Dst)) + } + if r.NewDst != nil { + elems = append(elems, fmt.Sprintf("NewDst: %s", r.NewDst)) + } + if r.Encap != nil { + elems = append(elems, fmt.Sprintf("Encap: %s", r.Encap)) + } + if r.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", r.Via)) + } + elems = append(elems, fmt.Sprintf("Src: %s", r.Src)) + if len(r.MultiPath) > 0 { + elems = append(elems, fmt.Sprintf("Gw: %s", r.MultiPath)) + } else { + elems = append(elems, fmt.Sprintf("Gw: %s", r.Gw)) + } + elems = append(elems, fmt.Sprintf("Flags: %s", r.ListFlags())) + elems = append(elems, fmt.Sprintf("Table: %d", r.Table)) + elems = append(elems, fmt.Sprintf("Realm: %d", r.Realm)) + return fmt.Sprintf("{%s}", strings.Join(elems, " ")) +} + +func (r Route) Equal(x Route) bool { + return r.LinkIndex == x.LinkIndex && + r.ILinkIndex == x.ILinkIndex && + r.Scope == x.Scope && + ipNetEqual(r.Dst, x.Dst) && + r.Src.Equal(x.Src) && + r.Gw.Equal(x.Gw) && + nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) && + r.Protocol == x.Protocol && + r.Priority == x.Priority && + r.Realm == x.Realm && + r.Table == x.Table && + r.Type == x.Type && + r.Tos == x.Tos && + r.Hoplimit == x.Hoplimit && + r.Flags == x.Flags && + (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && + (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && + (r.Via == x.Via || (r.Via != nil && r.Via.Equal(x.Via))) && + (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap))) +} + +func (r *Route) SetFlag(flag NextHopFlag) { + r.Flags |= int(flag) +} + +func (r *Route) ClearFlag(flag NextHopFlag) { + r.Flags &^= int(flag) +} + +type flagString struct { + f NextHopFlag + s string +} + +// RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE + +// NlFlags is only non-zero for RTM_NEWROUTE, the following flags can be set: +// - unix.NLM_F_REPLACE - Replace existing matching config object with this request +// - unix.NLM_F_EXCL - Don't replace the config object if it already exists +// - unix.NLM_F_CREATE - Create config object if it doesn't already exist +// - unix.NLM_F_APPEND - Add to the end of the object list +type RouteUpdate struct { + Type uint16 + NlFlags uint16 + Route +} + +type NexthopInfo struct { + LinkIndex int + Hops int + Gw net.IP + Flags int + NewDst Destination + Encap Encap + Via Destination +} + +func (n *NexthopInfo) String() string { + elems := []string{} + elems = append(elems, fmt.Sprintf("Ifindex: %d", n.LinkIndex)) + if n.NewDst != nil { + elems = append(elems, fmt.Sprintf("NewDst: %s", n.NewDst)) + } + if n.Encap != nil { + elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap)) + } + if n.Via != nil { + elems = append(elems, fmt.Sprintf("Via: %s", n.Via)) + } + elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1)) + elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw)) + elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags())) + return fmt.Sprintf("{%s}", strings.Join(elems, " ")) +} + +func (n NexthopInfo) Equal(x NexthopInfo) bool { + return n.LinkIndex == x.LinkIndex && + n.Hops == x.Hops && + n.Gw.Equal(x.Gw) && + n.Flags == x.Flags && + (n.NewDst == x.NewDst || (n.NewDst != nil && n.NewDst.Equal(x.NewDst))) && + (n.Encap == x.Encap || (n.Encap != nil && n.Encap.Equal(x.Encap))) +} + +type nexthopInfoSlice []*NexthopInfo + +func (n nexthopInfoSlice) Equal(x []*NexthopInfo) bool { + if len(n) != len(x) { + return false + } + for i := range n { + if n[i] == nil || x[i] == nil { + return false + } + if !n[i].Equal(*x[i]) { + return false + } + } + return true +} + +// ipNetEqual returns true iff both IPNet are equal +func ipNetEqual(ipn1 *net.IPNet, ipn2 *net.IPNet) bool { + if ipn1 == ipn2 { + return true + } + if ipn1 == nil || ipn2 == nil { + return false + } + m1, _ := ipn1.Mask.Size() + m2, _ := ipn2.Mask.Size() + return m1 == m2 && ipn1.IP.Equal(ipn2.IP) +} diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go new file mode 100644 index 000000000..0cd4f8363 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -0,0 +1,1849 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +// RtAttr is shared so it is in netlink_linux.go + +const ( + SCOPE_UNIVERSE Scope = unix.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = unix.RT_SCOPE_SITE + SCOPE_LINK Scope = unix.RT_SCOPE_LINK + SCOPE_HOST Scope = unix.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE +) + +func (s Scope) String() string { + switch s { + case SCOPE_UNIVERSE: + return "universe" + case SCOPE_SITE: + return "site" + case SCOPE_LINK: + return "link" + case SCOPE_HOST: + return "host" + case SCOPE_NOWHERE: + return "nowhere" + default: + return "unknown" + } +} + +const ( + FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE +) + +var testFlags = []flagString{ + {f: FLAG_ONLINK, s: "onlink"}, + {f: FLAG_PERVASIVE, s: "pervasive"}, +} + +func listFlags(flag int) []string { + var flags []string + for _, tf := range testFlags { + if flag&int(tf.f) != 0 { + flags = append(flags, tf.s) + } + } + return flags +} + +func (r *Route) ListFlags() []string { + return listFlags(r.Flags) +} + +func (n *NexthopInfo) ListFlags() []string { + return listFlags(n.Flags) +} + +type MPLSDestination struct { + Labels []int +} + +func (d *MPLSDestination) Family() int { + return nl.FAMILY_MPLS +} + +func (d *MPLSDestination) Decode(buf []byte) error { + d.Labels = nl.DecodeMPLSStack(buf) + return nil +} + +func (d *MPLSDestination) Encode() ([]byte, error) { + return nl.EncodeMPLSStack(d.Labels...), nil +} + +func (d *MPLSDestination) String() string { + s := make([]string, 0, len(d.Labels)) + for _, l := range d.Labels { + s = append(s, fmt.Sprintf("%d", l)) + } + return strings.Join(s, "/") +} + +func (d *MPLSDestination) Equal(x Destination) bool { + o, ok := x.(*MPLSDestination) + if !ok { + return false + } + if d == nil && o == nil { + return true + } + if d == nil || o == nil { + return false + } + if d.Labels == nil && o.Labels == nil { + return true + } + if d.Labels == nil || o.Labels == nil { + return false + } + if len(d.Labels) != len(o.Labels) { + return false + } + for i := range d.Labels { + if d.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + +type MPLSEncap struct { + Labels []int +} + +func (e *MPLSEncap) Type() int { + return nl.LWTUNNEL_ENCAP_MPLS +} + +func (e *MPLSEncap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lack of bytes") + } + l := native.Uint16(buf) + if len(buf) < int(l) { + return fmt.Errorf("lack of bytes") + } + buf = buf[:l] + typ := native.Uint16(buf[2:]) + if typ != nl.MPLS_IPTUNNEL_DST { + return fmt.Errorf("unknown MPLS Encap Type: %d", typ) + } + e.Labels = nl.DecodeMPLSStack(buf[4:]) + return nil +} + +func (e *MPLSEncap) Encode() ([]byte, error) { + s := nl.EncodeMPLSStack(e.Labels...) + hdr := make([]byte, 4) + native.PutUint16(hdr, uint16(len(s)+4)) + native.PutUint16(hdr[2:], nl.MPLS_IPTUNNEL_DST) + return append(hdr, s...), nil +} + +func (e *MPLSEncap) String() string { + s := make([]string, 0, len(e.Labels)) + for _, l := range e.Labels { + s = append(s, fmt.Sprintf("%d", l)) + } + return strings.Join(s, "/") +} + +func (e *MPLSEncap) Equal(x Encap) bool { + o, ok := x.(*MPLSEncap) + if !ok { + return false + } + if e == nil && o == nil { + return true + } + if e == nil || o == nil { + return false + } + if e.Labels == nil && o.Labels == nil { + return true + } + if e.Labels == nil || o.Labels == nil { + return false + } + if len(e.Labels) != len(o.Labels) { + return false + } + for i := range e.Labels { + if e.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + +// SEG6 definitions +type SEG6Encap struct { + Mode int + Segments []net.IP +} + +func (e *SEG6Encap) Type() int { + return nl.LWTUNNEL_ENCAP_SEG6 +} +func (e *SEG6Encap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lack of bytes") + } + // Get Length(l) & Type(typ) : 2 + 2 bytes + l := native.Uint16(buf) + if len(buf) < int(l) { + return fmt.Errorf("lack of bytes") + } + buf = buf[:l] // make sure buf size upper limit is Length + typ := native.Uint16(buf[2:]) + // LWTUNNEL_ENCAP_SEG6 has only one attr type SEG6_IPTUNNEL_SRH + if typ != nl.SEG6_IPTUNNEL_SRH { + return fmt.Errorf("unknown SEG6 Type: %d", typ) + } + + var err error + e.Mode, e.Segments, err = nl.DecodeSEG6Encap(buf[4:]) + + return err +} +func (e *SEG6Encap) Encode() ([]byte, error) { + s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) + hdr := make([]byte, 4) + native.PutUint16(hdr, uint16(len(s)+4)) + native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) + return append(hdr, s...), err +} +func (e *SEG6Encap) String() string { + segs := make([]string, 0, len(e.Segments)) + // append segment backwards (from n to 0) since seg#0 is the last segment. + for i := len(e.Segments); i > 0; i-- { + segs = append(segs, e.Segments[i-1].String()) + } + str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), + len(e.Segments), strings.Join(segs, " ")) + return str +} +func (e *SEG6Encap) Equal(x Encap) bool { + o, ok := x.(*SEG6Encap) + if !ok { + return false + } + if e == o { + return true + } + if e == nil || o == nil { + return false + } + if e.Mode != o.Mode { + return false + } + if len(e.Segments) != len(o.Segments) { + return false + } + for i := range e.Segments { + if !e.Segments[i].Equal(o.Segments[i]) { + return false + } + } + return true +} + +// SEG6LocalEncap definitions +type SEG6LocalEncap struct { + Flags [nl.SEG6_LOCAL_MAX]bool + Action int + Segments []net.IP // from SRH in seg6_local_lwt + Table int // table id for End.T and End.DT6 + InAddr net.IP + In6Addr net.IP + Iif int + Oif int + bpf bpfObj +} + +func (e *SEG6LocalEncap) SetProg(progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("seg6local bpf SetProg: invalid fd") + } + e.bpf.progFd = progFd + e.bpf.progName = progName + return nil +} + +func (e *SEG6LocalEncap) Type() int { + return nl.LWTUNNEL_ENCAP_SEG6_LOCAL +} +func (e *SEG6LocalEncap) Decode(buf []byte) error { + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.SEG6_LOCAL_ACTION: + e.Action = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_ACTION] = true + case nl.SEG6_LOCAL_SRH: + e.Segments, err = nl.DecodeSEG6Srh(attr.Value[:]) + e.Flags[nl.SEG6_LOCAL_SRH] = true + case nl.SEG6_LOCAL_TABLE: + e.Table = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_TABLE] = true + case nl.SEG6_LOCAL_NH4: + e.InAddr = net.IP(attr.Value[0:4]) + e.Flags[nl.SEG6_LOCAL_NH4] = true + case nl.SEG6_LOCAL_NH6: + e.In6Addr = net.IP(attr.Value[0:16]) + e.Flags[nl.SEG6_LOCAL_NH6] = true + case nl.SEG6_LOCAL_IIF: + e.Iif = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_IIF] = true + case nl.SEG6_LOCAL_OIF: + e.Oif = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_OIF] = true + case nl.SEG6_LOCAL_BPF: + var bpfAttrs []syscall.NetlinkRouteAttr + bpfAttrs, err = nl.ParseRouteAttr(attr.Value) + bpfobj := bpfObj{} + for _, bpfAttr := range bpfAttrs { + switch bpfAttr.Attr.Type { + case nl.LWT_BPF_PROG_FD: + bpfobj.progFd = int(native.Uint32(bpfAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfobj.progName = string(bpfAttr.Value) + default: + err = fmt.Errorf("seg6local bpf decode: unknown attribute: Type %d", bpfAttr.Attr) + } + } + e.bpf = bpfobj + e.Flags[nl.SEG6_LOCAL_BPF] = true + } + } + return err +} +func (e *SEG6LocalEncap) Encode() ([]byte, error) { + var err error + res := make([]byte, 8) + native.PutUint16(res, 8) // length + native.PutUint16(res[2:], nl.SEG6_LOCAL_ACTION) + native.PutUint32(res[4:], uint32(e.Action)) + if e.Flags[nl.SEG6_LOCAL_SRH] { + srh, err := nl.EncodeSEG6Srh(e.Segments) + if err != nil { + return nil, err + } + attr := make([]byte, 4) + native.PutUint16(attr, uint16(len(srh)+4)) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_SRH) + attr = append(attr, srh...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_TABLE] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_TABLE) + native.PutUint32(attr[4:], uint32(e.Table)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { + attr := make([]byte, 4) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH4) + ipv4 := e.InAddr.To4() + if ipv4 == nil { + err = fmt.Errorf("SEG6_LOCAL_NH4 has invalid IPv4 address") + return nil, err + } + attr = append(attr, ipv4...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH6] { + attr := make([]byte, 4) + native.PutUint16(attr, 20) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_NH6) + attr = append(attr, e.In6Addr...) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_IIF] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_IIF) + native.PutUint32(attr[4:], uint32(e.Iif)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_OIF] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_OIF) + native.PutUint32(attr[4:], uint32(e.Oif)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_BPF] { + attr := nl.NewRtAttr(nl.SEG6_LOCAL_BPF, []byte{}) + if e.bpf.progFd != 0 { + attr.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(e.bpf.progFd))) + } + if e.bpf.progName != "" { + attr.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(e.bpf.progName)) + } + res = append(res, attr.Serialize()...) + } + return res, err +} +func (e *SEG6LocalEncap) String() string { + strs := make([]string, 0, nl.SEG6_LOCAL_MAX) + strs = append(strs, fmt.Sprintf("action %s", nl.SEG6LocalActionString(e.Action))) + + if e.Flags[nl.SEG6_LOCAL_TABLE] { + strs = append(strs, fmt.Sprintf("table %d", e.Table)) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { + strs = append(strs, fmt.Sprintf("nh4 %s", e.InAddr)) + } + if e.Flags[nl.SEG6_LOCAL_NH6] { + strs = append(strs, fmt.Sprintf("nh6 %s", e.In6Addr)) + } + if e.Flags[nl.SEG6_LOCAL_IIF] { + link, err := LinkByIndex(e.Iif) + if err != nil { + strs = append(strs, fmt.Sprintf("iif %d", e.Iif)) + } else { + strs = append(strs, fmt.Sprintf("iif %s", link.Attrs().Name)) + } + } + if e.Flags[nl.SEG6_LOCAL_OIF] { + link, err := LinkByIndex(e.Oif) + if err != nil { + strs = append(strs, fmt.Sprintf("oif %d", e.Oif)) + } else { + strs = append(strs, fmt.Sprintf("oif %s", link.Attrs().Name)) + } + } + if e.Flags[nl.SEG6_LOCAL_SRH] { + segs := make([]string, 0, len(e.Segments)) + // append segment backwards (from n to 0) since seg#0 is the last segment. + for i := len(e.Segments); i > 0; i-- { + segs = append(segs, e.Segments[i-1].String()) + } + strs = append(strs, fmt.Sprintf("segs %d [ %s ]", len(e.Segments), strings.Join(segs, " "))) + } + if e.Flags[nl.SEG6_LOCAL_BPF] { + strs = append(strs, fmt.Sprintf("bpf %s[%d]", e.bpf.progName, e.bpf.progFd)) + } + return strings.Join(strs, " ") +} +func (e *SEG6LocalEncap) Equal(x Encap) bool { + o, ok := x.(*SEG6LocalEncap) + if !ok { + return false + } + if e == o { + return true + } + if e == nil || o == nil { + return false + } + // compare all arrays first + for i := range e.Flags { + if e.Flags[i] != o.Flags[i] { + return false + } + } + if len(e.Segments) != len(o.Segments) { + return false + } + for i := range e.Segments { + if !e.Segments[i].Equal(o.Segments[i]) { + return false + } + } + // compare values + if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { + return false + } + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf { + return false + } + return true +} + +// Encap BPF definitions +type bpfObj struct { + progFd int + progName string +} +type BpfEncap struct { + progs [nl.LWT_BPF_MAX]bpfObj + headroom int +} + +// SetProg adds a bpf function to the route via netlink RTA_ENCAP. The fd must be a bpf +// program loaded with bpf(type=BPF_PROG_TYPE_LWT_*) matching the direction the program should +// be applied to (LWT_BPF_IN, LWT_BPF_OUT, LWT_BPF_XMIT). +func (e *BpfEncap) SetProg(mode, progFd int, progName string) error { + if progFd <= 0 { + return fmt.Errorf("lwt bpf SetProg: invalid fd") + } + if mode <= nl.LWT_BPF_UNSPEC || mode >= nl.LWT_BPF_XMIT_HEADROOM { + return fmt.Errorf("lwt bpf SetProg:invalid mode") + } + e.progs[mode].progFd = progFd + e.progs[mode].progName = fmt.Sprintf("%s[fd:%d]", progName, progFd) + return nil +} + +// SetXmitHeadroom sets the xmit headroom (LWT_BPF_MAX_HEADROOM) via netlink RTA_ENCAP. +// maximum headroom is LWT_BPF_MAX_HEADROOM +func (e *BpfEncap) SetXmitHeadroom(headroom int) error { + if headroom > nl.LWT_BPF_MAX_HEADROOM || headroom < 0 { + return fmt.Errorf("invalid headroom size. range is 0 - %d", nl.LWT_BPF_MAX_HEADROOM) + } + e.headroom = headroom + return nil +} + +func (e *BpfEncap) Type() int { + return nl.LWTUNNEL_ENCAP_BPF +} +func (e *BpfEncap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("lwt bpf decode: lack of bytes") + } + native := nl.NativeEndian() + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing attribute. err: %v", err) + } + for _, attr := range attrs { + if int(attr.Attr.Type) < 1 { + // nl.LWT_BPF_UNSPEC + continue + } + if int(attr.Attr.Type) > nl.LWT_BPF_MAX { + return fmt.Errorf("lwt bpf decode: received unknown attribute type: %d", attr.Attr.Type) + } + switch int(attr.Attr.Type) { + case nl.LWT_BPF_MAX_HEADROOM: + e.headroom = int(native.Uint32(attr.Value)) + default: + bpfO := bpfObj{} + parsedAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return fmt.Errorf("lwt bpf decode: failed parsing route attribute") + } + for _, parsedAttr := range parsedAttrs { + switch int(parsedAttr.Attr.Type) { + case nl.LWT_BPF_PROG_FD: + bpfO.progFd = int(native.Uint32(parsedAttr.Value)) + case nl.LWT_BPF_PROG_NAME: + bpfO.progName = string(parsedAttr.Value) + default: + return fmt.Errorf("lwt bpf decode: received unknown attribute: type: %d, len: %d", parsedAttr.Attr.Type, parsedAttr.Attr.Len) + } + } + e.progs[attr.Attr.Type] = bpfO + } + } + return nil +} + +func (e *BpfEncap) Encode() ([]byte, error) { + buf := make([]byte, 0) + native = nl.NativeEndian() + for index, attr := range e.progs { + nlMsg := nl.NewRtAttr(index, []byte{}) + if attr.progFd != 0 { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_FD, nl.Uint32Attr(uint32(attr.progFd))) + } + if attr.progName != "" { + nlMsg.AddRtAttr(nl.LWT_BPF_PROG_NAME, nl.ZeroTerminated(attr.progName)) + } + if nlMsg.Len() > 4 { + buf = append(buf, nlMsg.Serialize()...) + } + } + if len(buf) <= 4 { + return nil, fmt.Errorf("lwt bpf encode: bpf obj definitions returned empty buffer") + } + if e.headroom > 0 { + hRoom := nl.NewRtAttr(nl.LWT_BPF_XMIT_HEADROOM, nl.Uint32Attr(uint32(e.headroom))) + buf = append(buf, hRoom.Serialize()...) + } + return buf, nil +} + +func (e *BpfEncap) String() string { + progs := make([]string, 0) + for index, obj := range e.progs { + empty := bpfObj{} + switch index { + case nl.LWT_BPF_IN: + if obj != empty { + progs = append(progs, fmt.Sprintf("in: %s", obj.progName)) + } + case nl.LWT_BPF_OUT: + if obj != empty { + progs = append(progs, fmt.Sprintf("out: %s", obj.progName)) + } + case nl.LWT_BPF_XMIT: + if obj != empty { + progs = append(progs, fmt.Sprintf("xmit: %s", obj.progName)) + } + } + } + if e.headroom > 0 { + progs = append(progs, fmt.Sprintf("xmit headroom: %d", e.headroom)) + } + return strings.Join(progs, " ") +} + +func (e *BpfEncap) Equal(x Encap) bool { + o, ok := x.(*BpfEncap) + if !ok { + return false + } + if e.headroom != o.headroom { + return false + } + for i := range o.progs { + if o.progs[i] != e.progs[i] { + return false + } + } + return true +} + +// IP6tnlEncap definition +type IP6tnlEncap struct { + ID uint64 + Dst net.IP + Src net.IP + Hoplimit uint8 + TC uint8 + Flags uint16 +} + +func (e *IP6tnlEncap) Type() int { + return nl.LWTUNNEL_ENCAP_IP6 +} + +func (e *IP6tnlEncap) Decode(buf []byte) error { + attrs, err := nl.ParseRouteAttr(buf) + if err != nil { + return err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.LWTUNNEL_IP6_ID: + e.ID = uint64(native.Uint64(attr.Value[0:4])) + case nl.LWTUNNEL_IP6_DST: + e.Dst = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_SRC: + e.Src = net.IP(attr.Value[:]) + case nl.LWTUNNEL_IP6_HOPLIMIT: + e.Hoplimit = attr.Value[0] + case nl.LWTUNNEL_IP6_TC: + // e.TC = attr.Value[0] + err = fmt.Errorf("decoding TC in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_FLAGS: + // e.Flags = uint16(native.Uint16(attr.Value[0:2])) + err = fmt.Errorf("decoding FLAG in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_PAD: + err = fmt.Errorf("decoding PAD in IP6tnlEncap is not supported") + case nl.LWTUNNEL_IP6_OPTS: + err = fmt.Errorf("decoding OPTS in IP6tnlEncap is not supported") + } + } + return err +} + +func (e *IP6tnlEncap) Encode() ([]byte, error) { + + final := []byte{} + + resID := make([]byte, 12) + native.PutUint16(resID, 12) // 2+2+8 + native.PutUint16(resID[2:], nl.LWTUNNEL_IP6_ID) + native.PutUint64(resID[4:], 0) + final = append(final, resID...) + + resDst := make([]byte, 4) + native.PutUint16(resDst, 20) // 2+2+16 + native.PutUint16(resDst[2:], nl.LWTUNNEL_IP6_DST) + resDst = append(resDst, e.Dst...) + final = append(final, resDst...) + + resSrc := make([]byte, 4) + native.PutUint16(resSrc, 20) + native.PutUint16(resSrc[2:], nl.LWTUNNEL_IP6_SRC) + resSrc = append(resSrc, e.Src...) + final = append(final, resSrc...) + + // resTc := make([]byte, 5) + // native.PutUint16(resTc, 5) + // native.PutUint16(resTc[2:], nl.LWTUNNEL_IP6_TC) + // resTc[4] = e.TC + // final = append(final,resTc...) + + resHops := make([]byte, 5) + native.PutUint16(resHops, 5) + native.PutUint16(resHops[2:], nl.LWTUNNEL_IP6_HOPLIMIT) + resHops[4] = e.Hoplimit + final = append(final, resHops...) + + // resFlags := make([]byte, 6) + // native.PutUint16(resFlags, 6) + // native.PutUint16(resFlags[2:], nl.LWTUNNEL_IP6_FLAGS) + // native.PutUint16(resFlags[4:], e.Flags) + // final = append(final,resFlags...) + + return final, nil +} + +func (e *IP6tnlEncap) String() string { + return fmt.Sprintf("id %d src %s dst %s hoplimit %d tc %d flags 0x%.4x", e.ID, e.Src, e.Dst, e.Hoplimit, e.TC, e.Flags) +} + +func (e *IP6tnlEncap) Equal(x Encap) bool { + o, ok := x.(*IP6tnlEncap) + if !ok { + return false + } + + if e.ID != o.ID || e.Flags != o.Flags || e.Hoplimit != o.Hoplimit || e.Src.Equal(o.Src) || e.Dst.Equal(o.Dst) || e.TC != o.TC { + return false + } + return true +} + +type Via struct { + AddrFamily int + Addr net.IP +} + +func (v *Via) Equal(x Destination) bool { + o, ok := x.(*Via) + if !ok { + return false + } + if v.AddrFamily == x.Family() && v.Addr.Equal(o.Addr) { + return true + } + return false +} + +func (v *Via) String() string { + return fmt.Sprintf("Family: %d, Address: %s", v.AddrFamily, v.Addr.String()) +} + +func (v *Via) Family() int { + return v.AddrFamily +} + +func (v *Via) Encode() ([]byte, error) { + buf := &bytes.Buffer{} + err := binary.Write(buf, native, uint16(v.AddrFamily)) + if err != nil { + return nil, err + } + err = binary.Write(buf, native, v.Addr) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (v *Via) Decode(b []byte) error { + if len(b) < 6 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.AddrFamily = int(native.Uint16(b[0:2])) + if v.AddrFamily == nl.FAMILY_V4 { + v.Addr = net.IP(b[2:6]) + return nil + } else if v.AddrFamily == nl.FAMILY_V6 { + if len(b) < 18 { + return fmt.Errorf("decoding failed: buffer too small (%d bytes)", len(b)) + } + v.Addr = net.IP(b[2:]) + return nil + } + return fmt.Errorf("decoding failed: address family %d unknown", v.AddrFamily) +} + +// RouteAdd will add a route to the system. +// Equivalent to: `ip route add $route` +func RouteAdd(route *Route) error { + return pkgHandle.RouteAdd(route) +} + +// RouteAdd will add a route to the system. +// Equivalent to: `ip route add $route` +func (h *Handle) RouteAdd(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func RouteAppend(route *Route) error { + return pkgHandle.RouteAppend(route) +} + +// RouteAppend will append a route to the system. +// Equivalent to: `ip route append $route` +func (h *Handle) RouteAppend(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteAddEcmp will add a route to the system. +func RouteAddEcmp(route *Route) error { + return pkgHandle.RouteAddEcmp(route) +} + +// RouteAddEcmp will add a route to the system. +func (h *Handle) RouteAddEcmp(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func RouteChange(route *Route) error { + return pkgHandle.RouteChange(route) +} + +// RouteChange will change an existing route in the system. +// Equivalent to: `ip route change $route` +func (h *Handle) RouteChange(route *Route) error { + flags := unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteReplace will add a route to the system. +// Equivalent to: `ip route replace $route` +func RouteReplace(route *Route) error { + return pkgHandle.RouteReplace(route) +} + +// RouteReplace will add a route to the system. +// Equivalent to: `ip route replace $route` +func (h *Handle) RouteReplace(route *Route) error { + flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK + req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err +} + +// RouteDel will delete a route from the system. +// Equivalent to: `ip route del $route` +func RouteDel(route *Route) error { + return pkgHandle.RouteDel(route) +} + +// RouteDel will delete a route from the system. +// Equivalent to: `ip route del $route` +func (h *Handle) RouteDel(route *Route) error { + req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) + _, err := h.routeHandle(route, req, nl.NewRtDelMsg()) + return err +} + +func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) ([][]byte, error) { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return nil, err + } + return req.Execute(unix.NETLINK_ROUTE, 0) +} + +func (h *Handle) routeHandleIter(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg, f func(msg []byte) bool) error { + if err := h.prepareRouteReq(route, req, msg); err != nil { + return err + } + return req.ExecuteIter(unix.NETLINK_ROUTE, 0, f) +} + +func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { + if req.NlMsghdr.Type != unix.RTM_GETROUTE && (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { + return fmt.Errorf("either Dst.IP, Src.IP or Gw must be set") + } + + family := -1 + var rtAttrs []*nl.RtAttr + + if route.Dst != nil && route.Dst.IP != nil { + dstLen, _ := route.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + dstFamily := nl.GetIPFamily(route.Dst.IP) + family = dstFamily + var dstData []byte + if dstFamily == FAMILY_V4 { + dstData = route.Dst.IP.To4() + } else { + dstData = route.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + } else if route.MPLSDst != nil { + family = nl.FAMILY_MPLS + msg.Dst_len = uint8(20) + msg.Type = unix.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + } + + if route.NewDst != nil { + if family != -1 && family != route.NewDst.Family() { + return fmt.Errorf("new destination and destination are not the same address family") + } + buf, err := route.NewDst.Encode() + if err != nil { + return err + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_NEWDST, buf)) + } + + if route.Encap != nil { + buf := make([]byte, 2) + native.PutUint16(buf, uint16(route.Encap.Type())) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) + buf, err := route.Encap.Encode() + if err != nil { + return err + } + switch route.Encap.Type() { + case nl.LWTUNNEL_ENCAP_BPF: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP|unix.NLA_F_NESTED, buf)) + default: + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + } + + } + + if route.Src != nil { + srcFamily := nl.GetIPFamily(route.Src) + if family != -1 && family != srcFamily { + return fmt.Errorf("source and destination ip are not the same IP family") + } + family = srcFamily + var srcData []byte + if srcFamily == FAMILY_V4 { + srcData = route.Src.To4() + } else { + srcData = route.Src.To16() + } + // The commonly used src ip for routes is actually PREFSRC + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PREFSRC, srcData)) + } + + if route.Gw != nil { + gwFamily := nl.GetIPFamily(route.Gw) + if family != -1 && family != gwFamily { + return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + } + family = gwFamily + var gwData []byte + if gwFamily == FAMILY_V4 { + gwData = route.Gw.To4() + } else { + gwData = route.Gw.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) + } + + if route.Via != nil { + buf, err := route.Via.Encode() + if err != nil { + return fmt.Errorf("failed to encode RTA_VIA: %v", err) + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_VIA, buf)) + } + + if len(route.MultiPath) > 0 { + buf := []byte{} + for _, nh := range route.MultiPath { + rtnh := &nl.RtNexthop{ + RtNexthop: unix.RtNexthop{ + Hops: uint8(nh.Hops), + Ifindex: int32(nh.LinkIndex), + Flags: uint8(nh.Flags), + }, + } + children := []nl.NetlinkRequestData{} + if nh.Gw != nil { + gwFamily := nl.GetIPFamily(nh.Gw) + if family != -1 && family != gwFamily { + return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + } + if gwFamily == FAMILY_V4 { + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) + } else { + children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To16()))) + } + } + if nh.NewDst != nil { + if family != -1 && family != nh.NewDst.Family() { + return fmt.Errorf("new destination and destination are not the same address family") + } + buf, err := nh.NewDst.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(unix.RTA_NEWDST, buf)) + } + if nh.Encap != nil { + buf := make([]byte, 2) + native.PutUint16(buf, uint16(nh.Encap.Type())) + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) + buf, err := nh.Encap.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) + } + if nh.Via != nil { + buf, err := nh.Via.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(unix.RTA_VIA, buf)) + } + rtnh.Children = children + buf = append(buf, rtnh.Serialize()...) + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_MULTIPATH, buf)) + } + + if route.Table > 0 { + if route.Table >= 256 { + msg.Table = unix.RT_TABLE_UNSPEC + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Table)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_TABLE, b)) + } else { + msg.Table = uint8(route.Table) + } + } + + if route.Priority > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Priority)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) + } + if route.Realm > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Realm)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_FLOW, b)) + } + if route.Tos > 0 { + msg.Tos = uint8(route.Tos) + } + if route.Protocol > 0 { + msg.Protocol = uint8(route.Protocol) + } + if route.Type > 0 { + msg.Type = uint8(route.Type) + } + + var metrics []*nl.RtAttr + if route.MTU > 0 { + b := nl.Uint32Attr(uint32(route.MTU)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) + } + if route.Window > 0 { + b := nl.Uint32Attr(uint32(route.Window)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_WINDOW, b)) + } + if route.Rtt > 0 { + b := nl.Uint32Attr(uint32(route.Rtt)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTT, b)) + } + if route.RttVar > 0 { + b := nl.Uint32Attr(uint32(route.RttVar)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTTVAR, b)) + } + if route.Ssthresh > 0 { + b := nl.Uint32Attr(uint32(route.Ssthresh)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_SSTHRESH, b)) + } + if route.Cwnd > 0 { + b := nl.Uint32Attr(uint32(route.Cwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CWND, b)) + } + if route.AdvMSS > 0 { + b := nl.Uint32Attr(uint32(route.AdvMSS)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) + } + if route.Reordering > 0 { + b := nl.Uint32Attr(uint32(route.Reordering)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_REORDERING, b)) + } + if route.Hoplimit > 0 { + b := nl.Uint32Attr(uint32(route.Hoplimit)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_HOPLIMIT, b)) + } + if route.InitCwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitCwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITCWND, b)) + } + if route.Features > 0 { + b := nl.Uint32Attr(uint32(route.Features)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FEATURES, b)) + } + if route.RtoMin > 0 { + b := nl.Uint32Attr(uint32(route.RtoMin)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTO_MIN, b)) + } + if route.InitRwnd > 0 { + b := nl.Uint32Attr(uint32(route.InitRwnd)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_INITRWND, b)) + } + if route.QuickACK > 0 { + b := nl.Uint32Attr(uint32(route.QuickACK)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_QUICKACK, b)) + } + if route.Congctl != "" { + b := nl.ZeroTerminated(route.Congctl) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_CC_ALGO, b)) + } + if route.FastOpenNoCookie > 0 { + b := nl.Uint32Attr(uint32(route.FastOpenNoCookie)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_FASTOPEN_NO_COOKIE, b)) + } + + if metrics != nil { + attr := nl.NewRtAttr(unix.RTA_METRICS, nil) + for _, metric := range metrics { + attr.AddChild(metric) + } + rtAttrs = append(rtAttrs, attr) + } + + msg.Flags = uint32(route.Flags) + msg.Scope = uint8(route.Scope) + // only overwrite family if it was not set in msg + if msg.Family == 0 { + msg.Family = uint8(family) + } + req.AddData(msg) + for _, attr := range rtAttrs { + req.AddData(attr) + } + + if (req.NlMsghdr.Type != unix.RTM_GETROUTE) || (req.NlMsghdr.Type == unix.RTM_GETROUTE && route.LinkIndex > 0) { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.LinkIndex)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + return nil +} + +// RouteList gets a list of routes in the system. +// Equivalent to: `ip route show`. +// The list can be filtered by link and ip family. +func RouteList(link Link, family int) ([]Route, error) { + return pkgHandle.RouteList(link, family) +} + +// RouteList gets a list of routes in the system. +// Equivalent to: `ip route show`. +// The list can be filtered by link and ip family. +func (h *Handle) RouteList(link Link, family int) ([]Route, error) { + routeFilter := &Route{} + if link != nil { + routeFilter.LinkIndex = link.Attrs().Index + + return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) + } + return h.RouteListFiltered(family, routeFilter, 0) +} + +// RouteListFiltered gets a list of routes in the system filtered with specified rules. +// All rules must be defined in RouteFilter struct +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return pkgHandle.RouteListFiltered(family, filter, filterMask) +} + +// RouteListFiltered gets a list of routes in the system filtered with specified rules. +// All rules must be defined in RouteFilter struct +func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + var res []Route + err := h.RouteListFilteredIter(family, filter, filterMask, func(route Route) (cont bool) { + res = append(res, route) + return true + }) + if err != nil { + return nil, err + } + return res, nil +} + +// RouteListFilteredIter passes each route that matches the filter to the given iterator func. Iteration continues +// until all routes are loaded or the func returns false. +func RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + return pkgHandle.RouteListFilteredIter(family, filter, filterMask, f) +} + +func (h *Handle) RouteListFilteredIter(family int, filter *Route, filterMask uint64, f func(Route) (cont bool)) error { + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) + rtmsg := &nl.RtMsg{} + rtmsg.Family = uint8(family) + + var parseErr error + err := h.routeHandleIter(filter, req, rtmsg, func(m []byte) bool { + msg := nl.DeserializeRtMsg(m) + if family != FAMILY_ALL && msg.Family != uint8(family) { + // Ignore routes not matching requested family + return true + } + if msg.Flags&unix.RTM_F_CLONED != 0 { + // Ignore cloned routes + return true + } + if msg.Table != unix.RT_TABLE_MAIN { + if filter == nil || filterMask&RT_FILTER_TABLE == 0 { + // Ignore non-main tables + return true + } + } + route, err := deserializeRoute(m) + if err != nil { + parseErr = err + return false + } + if filter != nil { + switch { + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: + return true + case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: + return true + case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope: + return true + case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type: + return true + case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: + return true + case filterMask&RT_FILTER_REALM != 0 && route.Realm != filter.Realm: + return true + case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: + return true + case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: + return true + case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw): + return true + case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src): + return true + case filterMask&RT_FILTER_DST != 0: + if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) { + if filter.Dst == nil { + filter.Dst = genZeroIPNet(family) + } + if !ipNetEqual(route.Dst, filter.Dst) { + return true + } + } + case filterMask&RT_FILTER_HOPLIMIT != 0 && route.Hoplimit != filter.Hoplimit: + return true + } + } + return f(route) + }) + if err != nil { + return err + } + if parseErr != nil { + return parseErr + } + return nil +} + +// deserializeRoute decodes a binary netlink message into a Route struct +func deserializeRoute(m []byte) (Route, error) { + msg := nl.DeserializeRtMsg(m) + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return Route{}, err + } + route := Route{ + Scope: Scope(msg.Scope), + Protocol: RouteProtocol(int(msg.Protocol)), + Table: int(msg.Table), + Type: int(msg.Type), + Tos: int(msg.Tos), + Flags: int(msg.Flags), + Family: int(msg.Family), + } + + var encap, encapType syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.RTA_GATEWAY: + route.Gw = net.IP(attr.Value) + case unix.RTA_PREFSRC: + route.Src = net.IP(attr.Value) + case unix.RTA_DST: + if msg.Family == nl.FAMILY_MPLS { + stack := nl.DecodeMPLSStack(attr.Value) + if len(stack) == 0 || len(stack) > 1 { + return route, fmt.Errorf("invalid MPLS RTA_DST") + } + route.MPLSDst = &stack[0] + } else { + route.Dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), + } + } + case unix.RTA_OIF: + route.LinkIndex = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_IIF: + route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_PRIORITY: + route.Priority = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_FLOW: + route.Realm = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_TABLE: + route.Table = int(native.Uint32(attr.Value[0:4])) + case unix.RTA_MULTIPATH: + parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { + if len(value) < unix.SizeofRtNexthop { + return nil, nil, fmt.Errorf("lack of bytes") + } + nh := nl.DeserializeRtNexthop(value) + if len(value) < int(nh.RtNexthop.Len) { + return nil, nil, fmt.Errorf("lack of bytes") + } + info := &NexthopInfo{ + LinkIndex: int(nh.RtNexthop.Ifindex), + Hops: int(nh.RtNexthop.Hops), + Flags: int(nh.RtNexthop.Flags), + } + attrs, err := nl.ParseRouteAttr(value[unix.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + if err != nil { + return nil, nil, err + } + var encap, encapType syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.RTA_GATEWAY: + info.Gw = net.IP(attr.Value) + case unix.RTA_NEWDST: + var d Destination + switch msg.Family { + case nl.FAMILY_MPLS: + d = &MPLSDestination{} + } + if err := d.Decode(attr.Value); err != nil { + return nil, nil, err + } + info.NewDst = d + case unix.RTA_ENCAP_TYPE: + encapType = attr + case unix.RTA_ENCAP: + encap = attr + case unix.RTA_VIA: + d := &Via{} + if err := d.Decode(attr.Value); err != nil { + return nil, nil, err + } + info.Via = d + } + } + + if len(encap.Value) != 0 && len(encapType.Value) != 0 { + typ := int(native.Uint16(encapType.Value[0:2])) + var e Encap + switch typ { + case nl.LWTUNNEL_ENCAP_MPLS: + e = &MPLSEncap{} + if err := e.Decode(encap.Value); err != nil { + return nil, nil, err + } + } + info.Encap = e + } + + return info, value[int(nh.RtNexthop.Len):], nil + } + rest := attr.Value + for len(rest) > 0 { + info, buf, err := parseRtNexthop(rest) + if err != nil { + return route, err + } + route.MultiPath = append(route.MultiPath, info) + rest = buf + } + case unix.RTA_NEWDST: + var d Destination + switch msg.Family { + case nl.FAMILY_MPLS: + d = &MPLSDestination{} + } + if err := d.Decode(attr.Value); err != nil { + return route, err + } + route.NewDst = d + case unix.RTA_VIA: + v := &Via{} + if err := v.Decode(attr.Value); err != nil { + return route, err + } + route.Via = v + case unix.RTA_ENCAP_TYPE: + encapType = attr + case unix.RTA_ENCAP: + encap = attr + case unix.RTA_METRICS: + metrics, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return route, err + } + for _, metric := range metrics { + switch metric.Attr.Type { + case unix.RTAX_MTU: + route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_WINDOW: + route.Window = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTT: + route.Rtt = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTTVAR: + route.RttVar = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_SSTHRESH: + route.Ssthresh = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CWND: + route.Cwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_ADVMSS: + route.AdvMSS = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_REORDERING: + route.Reordering = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_HOPLIMIT: + route.Hoplimit = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITCWND: + route.InitCwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_FEATURES: + route.Features = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_RTO_MIN: + route.RtoMin = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_INITRWND: + route.InitRwnd = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_QUICKACK: + route.QuickACK = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_CC_ALGO: + route.Congctl = nl.BytesToString(metric.Value) + case unix.RTAX_FASTOPEN_NO_COOKIE: + route.FastOpenNoCookie = int(native.Uint32(metric.Value[0:4])) + } + } + } + } + + // Same logic to generate "default" dst with iproute2 implementation + if route.Dst == nil { + var addLen int + var ip net.IP + switch msg.Family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + + if addLen != 0 { + route.Dst = &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(int(msg.Dst_len), 8*addLen), + } + } + } + + if len(encap.Value) != 0 && len(encapType.Value) != 0 { + typ := int(native.Uint16(encapType.Value[0:2])) + var e Encap + switch typ { + case nl.LWTUNNEL_ENCAP_MPLS: + e = &MPLSEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } + case nl.LWTUNNEL_ENCAP_SEG6: + e = &SEG6Encap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } + case nl.LWTUNNEL_ENCAP_SEG6_LOCAL: + e = &SEG6LocalEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } + case nl.LWTUNNEL_ENCAP_BPF: + e = &BpfEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } + } + route.Encap = e + } + + return route, nil +} + +// RouteGetOptions contains a set of options to use with +// RouteGetWithOptions +type RouteGetOptions struct { + Iif string + IifIndex int + Oif string + VrfName string + SrcAddr net.IP + UID *uint32 + Mark uint32 + FIBMatch bool +} + +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { + return pkgHandle.RouteGetWithOptions(destination, options) +} + +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func RouteGet(destination net.IP) ([]Route, error) { + return pkgHandle.RouteGet(destination) +} + +// RouteGetWithOptions gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get <> vrf '. +func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOptions) ([]Route, error) { + req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) + family := nl.GetIPFamily(destination) + var destinationData []byte + var bitlen uint8 + if family == FAMILY_V4 { + destinationData = destination.To4() + bitlen = 32 + } else { + destinationData = destination.To16() + bitlen = 128 + } + msg := &nl.RtMsg{} + msg.Family = uint8(family) + msg.Dst_len = bitlen + if options != nil && options.SrcAddr != nil { + msg.Src_len = bitlen + } + msg.Flags = unix.RTM_F_LOOKUP_TABLE + if options != nil && options.FIBMatch { + msg.Flags |= unix.RTM_F_FIB_MATCH + } + req.AddData(msg) + + rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) + req.AddData(rtaDst) + + if options != nil { + if options.VrfName != "" { + link, err := h.LinkByName(options.VrfName) + if err != nil { + return nil, err + } + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + + iifIndex := 0 + if len(options.Iif) > 0 { + link, err := h.LinkByName(options.Iif) + if err != nil { + return nil, err + } + + iifIndex = link.Attrs().Index + } else if options.IifIndex > 0 { + iifIndex = options.IifIndex + } + + if iifIndex > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(iifIndex)) + + req.AddData(nl.NewRtAttr(unix.RTA_IIF, b)) + } + + if len(options.Oif) > 0 { + link, err := h.LinkByName(options.Oif) + if err != nil { + return nil, err + } + + b := make([]byte, 4) + native.PutUint32(b, uint32(link.Attrs().Index)) + + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } + + if options.SrcAddr != nil { + var srcAddr []byte + if family == FAMILY_V4 { + srcAddr = options.SrcAddr.To4() + } else { + srcAddr = options.SrcAddr.To16() + } + + req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr)) + } + + if options.UID != nil { + uid := *options.UID + b := make([]byte, 4) + native.PutUint32(b, uid) + + req.AddData(nl.NewRtAttr(unix.RTA_UID, b)) + } + + if options.Mark > 0 { + b := make([]byte, 4) + native.PutUint32(b, options.Mark) + + req.AddData(nl.NewRtAttr(unix.RTA_MARK, b)) + } + } + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + if err != nil { + return nil, err + } + + var res []Route + for _, m := range msgs { + route, err := deserializeRoute(m) + if err != nil { + return nil, err + } + res = append(res, route) + } + return res, nil +} + +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { + return h.RouteGetWithOptions(destination, nil) +} + +// RouteSubscribe takes a chan down which notifications will be sent +// when routes are added or deleted. Close the 'done' chan to stop subscription. +func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil, false, 0, nil, false) +} + +// RouteSubscribeAt works like RouteSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { + return routeSubscribeAt(ns, netns.None(), ch, done, nil, false, 0, nil, false) +} + +// RouteSubscribeOptions contains a set of options to use with +// RouteSubscribeWithOptions. +type RouteSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) + ListExisting bool + ReceiveBufferSize int + ReceiveBufferForceSize bool + ReceiveTimeout *unix.Timeval +} + +// RouteSubscribeWithOptions work like RouteSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback, options.ListExisting, + options.ReceiveBufferSize, options.ReceiveTimeout, options.ReceiveBufferForceSize) +} + +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error), listExisting bool, + rcvbuf int, rcvTimeout *unix.Timeval, rcvbufForce bool) error { + s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) + if err != nil { + return err + } + if rcvTimeout != nil { + if err := s.SetReceiveTimeout(rcvTimeout); err != nil { + return err + } + } + if rcvbuf != 0 { + err = s.SetReceiveBufferSize(rcvbuf, rcvbufForce) + if err != nil { + return err + } + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + if listExisting { + req := pkgHandle.newNetlinkRequest(unix.RTM_GETROUTE, + unix.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(unix.AF_UNSPEC) + req.AddData(infmsg) + if err := s.Send(req); err != nil { + return err + } + } + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(fmt.Errorf("Receive failed: %v", + err)) + } + return + } + if from.Pid != nl.PidKernel { + if cberr != nil { + cberr(fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel)) + } + continue + } + for _, m := range msgs { + if m.Header.Type == unix.NLMSG_DONE { + continue + } + if m.Header.Type == unix.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + continue + } + if cberr != nil { + cberr(fmt.Errorf("error message: %v", + syscall.Errno(-error))) + } + continue + } + route, err := deserializeRoute(m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + continue + } + ch <- RouteUpdate{ + Type: m.Header.Type, + NlFlags: m.Header.Flags & (unix.NLM_F_REPLACE | unix.NLM_F_EXCL | unix.NLM_F_CREATE | unix.NLM_F_APPEND), + Route: route, + } + } + } + }() + + return nil +} + +func (p RouteProtocol) String() string { + switch int(p) { + case unix.RTPROT_BABEL: + return "babel" + case unix.RTPROT_BGP: + return "bgp" + case unix.RTPROT_BIRD: + return "bird" + case unix.RTPROT_BOOT: + return "boot" + case unix.RTPROT_DHCP: + return "dhcp" + case unix.RTPROT_DNROUTED: + return "dnrouted" + case unix.RTPROT_EIGRP: + return "eigrp" + case unix.RTPROT_GATED: + return "gated" + case unix.RTPROT_ISIS: + return "isis" + // case unix.RTPROT_KEEPALIVED: + // return "keepalived" + case unix.RTPROT_KERNEL: + return "kernel" + case unix.RTPROT_MROUTED: + return "mrouted" + case unix.RTPROT_MRT: + return "mrt" + case unix.RTPROT_NTK: + return "ntk" + case unix.RTPROT_OSPF: + return "ospf" + case unix.RTPROT_RA: + return "ra" + case unix.RTPROT_REDIRECT: + return "redirect" + case unix.RTPROT_RIP: + return "rip" + case unix.RTPROT_STATIC: + return "static" + case unix.RTPROT_UNSPEC: + return "unspec" + case unix.RTPROT_XORP: + return "xorp" + case unix.RTPROT_ZEBRA: + return "zebra" + default: + return strconv.Itoa(int(p)) + } +} + +// genZeroIPNet returns 0.0.0.0/0 or ::/0 for IPv4 or IPv6, otherwise nil +func genZeroIPNet(family int) *net.IPNet { + var addLen int + var ip net.IP + switch family { + case FAMILY_V4: + addLen = net.IPv4len + ip = net.IPv4zero + case FAMILY_V6: + addLen = net.IPv6len + ip = net.IPv6zero + } + if addLen != 0 { + return &net.IPNet{ + IP: ip, + Mask: net.CIDRMask(0, 8*addLen), + } + } + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/route_unspecified.go b/vendor/github.com/vishvananda/netlink/route_unspecified.go new file mode 100644 index 000000000..db7372689 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route_unspecified.go @@ -0,0 +1,21 @@ +// +build !linux + +package netlink + +import "strconv" + +func (r *Route) ListFlags() []string { + return []string{} +} + +func (n *NexthopInfo) ListFlags() []string { + return []string{} +} + +func (s Scope) String() string { + return "unknown" +} + +func (p RouteProtocol) String() string { + return strconv.Itoa(int(p)) +} diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go new file mode 100644 index 000000000..9d74c7cd8 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -0,0 +1,82 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Rule represents a netlink rule. +type Rule struct { + Priority int + Family int + Table int + Mark uint32 + Mask *uint32 + Tos uint + TunID uint + Goto int + Src *net.IPNet + Dst *net.IPNet + Flow int + IifName string + OifName string + SuppressIfgroup int + SuppressPrefixlen int + Invert bool + Dport *RulePortRange + Sport *RulePortRange + IPProto int + UIDRange *RuleUIDRange + Protocol uint8 + Type uint8 +} + +func (r Rule) String() string { + from := "all" + if r.Src != nil && r.Src.String() != "" { + from = r.Src.String() + } + + to := "all" + if r.Dst != nil && r.Dst.String() != "" { + to = r.Dst.String() + } + + return fmt.Sprintf("ip rule %d: from %s to %s table %d %s", + r.Priority, from, to, r.Table, r.typeString()) +} + +// NewRule return empty rules. +func NewRule() *Rule { + return &Rule{ + SuppressIfgroup: -1, + SuppressPrefixlen: -1, + Priority: -1, + Mark: 0, + Mask: nil, + Goto: -1, + Flow: -1, + } +} + +// NewRulePortRange creates rule sport/dport range. +func NewRulePortRange(start, end uint16) *RulePortRange { + return &RulePortRange{Start: start, End: end} +} + +// RulePortRange represents rule sport/dport range. +type RulePortRange struct { + Start uint16 + End uint16 +} + +// NewRuleUIDRange creates rule uid range. +func NewRuleUIDRange(start, end uint32) *RuleUIDRange { + return &RuleUIDRange{Start: start, End: end} +} + +// RuleUIDRange represents rule uid range. +type RuleUIDRange struct { + Start uint32 + End uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go new file mode 100644 index 000000000..ddff99cfa --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -0,0 +1,365 @@ +package netlink + +import ( + "bytes" + "fmt" + "net" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const FibRuleInvert = 0x2 + +// RuleAdd adds a rule to the system. +// Equivalent to: ip rule add +func RuleAdd(rule *Rule) error { + return pkgHandle.RuleAdd(rule) +} + +// RuleAdd adds a rule to the system. +// Equivalent to: ip rule add +func (h *Handle) RuleAdd(rule *Rule) error { + req := h.newNetlinkRequest(unix.RTM_NEWRULE, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + return ruleHandle(rule, req) +} + +// RuleDel deletes a rule from the system. +// Equivalent to: ip rule del +func RuleDel(rule *Rule) error { + return pkgHandle.RuleDel(rule) +} + +// RuleDel deletes a rule from the system. +// Equivalent to: ip rule del +func (h *Handle) RuleDel(rule *Rule) error { + req := h.newNetlinkRequest(unix.RTM_DELRULE, unix.NLM_F_ACK) + return ruleHandle(rule, req) +} + +func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { + msg := nl.NewRtMsg() + msg.Family = unix.AF_INET + msg.Protocol = unix.RTPROT_BOOT + msg.Scope = unix.RT_SCOPE_UNIVERSE + msg.Table = unix.RT_TABLE_UNSPEC + msg.Type = rule.Type // usually 0, same as unix.RTN_UNSPEC + if msg.Type == 0 && req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { + msg.Type = unix.RTN_UNICAST + } + if rule.Invert { + msg.Flags |= FibRuleInvert + } + if rule.Family != 0 { + msg.Family = uint8(rule.Family) + } + if rule.Table >= 0 && rule.Table < 256 { + msg.Table = uint8(rule.Table) + } + if rule.Tos != 0 { + msg.Tos = uint8(rule.Tos) + } + + var dstFamily uint8 + var rtAttrs []*nl.RtAttr + if rule.Dst != nil && rule.Dst.IP != nil { + dstLen, _ := rule.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) + dstFamily = msg.Family + var dstData []byte + if msg.Family == unix.AF_INET { + dstData = rule.Dst.IP.To4() + } else { + dstData = rule.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + } + + if rule.Src != nil && rule.Src.IP != nil { + msg.Family = uint8(nl.GetIPFamily(rule.Src.IP)) + if dstFamily != 0 && dstFamily != msg.Family { + return fmt.Errorf("source and destination ip are not the same IP family") + } + srcLen, _ := rule.Src.Mask.Size() + msg.Src_len = uint8(srcLen) + var srcData []byte + if msg.Family == unix.AF_INET { + srcData = rule.Src.IP.To4() + } else { + srcData = rule.Src.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_SRC, srcData)) + } + + req.AddData(msg) + for i := range rtAttrs { + req.AddData(rtAttrs[i]) + } + + if rule.Priority >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Priority)) + req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b)) + } + if rule.Mark != 0 || rule.Mask != nil { + b := make([]byte, 4) + native.PutUint32(b, rule.Mark) + req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b)) + } + if rule.Mask != nil { + b := make([]byte, 4) + native.PutUint32(b, *rule.Mask) + req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b)) + } + if rule.Flow >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Flow)) + req.AddData(nl.NewRtAttr(nl.FRA_FLOW, b)) + } + if rule.TunID > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.TunID)) + req.AddData(nl.NewRtAttr(nl.FRA_TUN_ID, b)) + } + if rule.Table >= 256 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Table)) + req.AddData(nl.NewRtAttr(nl.FRA_TABLE, b)) + } + if msg.Table > 0 { + if rule.SuppressPrefixlen >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.SuppressPrefixlen)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_PREFIXLEN, b)) + } + if rule.SuppressIfgroup >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.SuppressIfgroup)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_IFGROUP, b)) + } + } + if rule.IifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName+"\x00"))) + } + if rule.OifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName+"\x00"))) + } + if rule.Goto >= 0 { + msg.Type = nl.FR_ACT_GOTO + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Goto)) + req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) + } + + if rule.IPProto > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.IPProto)) + req.AddData(nl.NewRtAttr(nl.FRA_IP_PROTO, b)) + } + + if rule.Dport != nil { + b := rule.Dport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_DPORT_RANGE, b)) + } + + if rule.Sport != nil { + b := rule.Sport.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b)) + } + + if rule.UIDRange != nil { + b := rule.UIDRange.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_UID_RANGE, b)) + } + + if rule.Protocol > 0 { + req.AddData(nl.NewRtAttr(nl.FRA_PROTOCOL, nl.Uint8Attr(rule.Protocol))) + } + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// RuleList lists rules in the system. +// Equivalent to: ip rule list +func RuleList(family int) ([]Rule, error) { + return pkgHandle.RuleList(family) +} + +// RuleList lists rules in the system. +// Equivalent to: ip rule list +func (h *Handle) RuleList(family int) ([]Rule, error) { + return h.RuleListFiltered(family, nil, 0) +} + +// RuleListFiltered gets a list of rules in the system filtered by the +// specified rule template `filter`. +// Equivalent to: ip rule list +func RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { + return pkgHandle.RuleListFiltered(family, filter, filterMask) +} + +// RuleListFiltered lists rules in the system. +// Equivalent to: ip rule list +func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ([]Rule, error) { + req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) + if err != nil { + return nil, err + } + + var res = make([]Rule, 0) + for i := range msgs { + msg := nl.DeserializeRtMsg(msgs[i]) + attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():]) + if err != nil { + return nil, err + } + + rule := NewRule() + rule.Priority = 0 // The default priority from kernel + + rule.Invert = msg.Flags&FibRuleInvert > 0 + rule.Family = int(msg.Family) + rule.Tos = uint(msg.Tos) + + for j := range attrs { + switch attrs[j].Attr.Type { + case unix.RTA_TABLE: + rule.Table = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_SRC: + rule.Src = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)), + } + case nl.FRA_DST: + rule.Dst = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), + } + case nl.FRA_FWMARK: + rule.Mark = native.Uint32(attrs[j].Value[0:4]) + case nl.FRA_FWMASK: + mask := native.Uint32(attrs[j].Value[0:4]) + rule.Mask = &mask + case nl.FRA_TUN_ID: + rule.TunID = uint(native.Uint64(attrs[j].Value[0:8])) + case nl.FRA_IIFNAME: + rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_OIFNAME: + rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_SUPPRESS_PREFIXLEN: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressPrefixlen = int(i) + } + case nl.FRA_SUPPRESS_IFGROUP: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressIfgroup = int(i) + } + case nl.FRA_FLOW: + rule.Flow = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_GOTO: + rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_PRIORITY: + rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_IP_PROTO: + rule.IPProto = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_DPORT_RANGE: + rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_SPORT_RANGE: + rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_UID_RANGE: + rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8])) + case nl.FRA_PROTOCOL: + rule.Protocol = uint8(attrs[j].Value[0]) + } + } + + if filter != nil { + switch { + case filterMask&RT_FILTER_SRC != 0 && + (rule.Src == nil || rule.Src.String() != filter.Src.String()): + continue + case filterMask&RT_FILTER_DST != 0 && + (rule.Dst == nil || rule.Dst.String() != filter.Dst.String()): + continue + case filterMask&RT_FILTER_TABLE != 0 && + filter.Table != unix.RT_TABLE_UNSPEC && rule.Table != filter.Table: + continue + case filterMask&RT_FILTER_TOS != 0 && rule.Tos != filter.Tos: + continue + case filterMask&RT_FILTER_PRIORITY != 0 && rule.Priority != filter.Priority: + continue + case filterMask&RT_FILTER_MARK != 0 && rule.Mark != filter.Mark: + continue + case filterMask&RT_FILTER_MASK != 0 && !ptrEqual(rule.Mask, filter.Mask): + continue + } + } + + res = append(res, *rule) + } + + return res, nil +} + +func (pr *RulePortRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 2), make([]byte, 2)} + native.PutUint16(b[0], pr.Start) + native.PutUint16(b[1], pr.End) + return bytes.Join(b, []byte{}) +} + +func (pr *RuleUIDRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 4), make([]byte, 4)} + native.PutUint32(b[0], pr.Start) + native.PutUint32(b[1], pr.End) + return bytes.Join(b, []byte{}) +} + +func ptrEqual(a, b *uint32) bool { + if a == b { + return true + } + if (a == nil) || (b == nil) { + return false + } + return *a == *b +} + +func (r Rule) typeString() string { + switch r.Type { + case unix.RTN_UNSPEC: // zero + return "" + case unix.RTN_UNICAST: + return "" + case unix.RTN_LOCAL: + return "local" + case unix.RTN_BROADCAST: + return "broadcast" + case unix.RTN_ANYCAST: + return "anycast" + case unix.RTN_MULTICAST: + return "multicast" + case unix.RTN_BLACKHOLE: + return "blackhole" + case unix.RTN_UNREACHABLE: + return "unreachable" + case unix.RTN_PROHIBIT: + return "prohibit" + case unix.RTN_THROW: + return "throw" + case unix.RTN_NAT: + return "nat" + case unix.RTN_XRESOLVE: + return "xresolve" + default: + return fmt.Sprintf("type(0x%x)", r.Type) + } +} diff --git a/vendor/github.com/vishvananda/netlink/rule_nonlinux.go b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go new file mode 100644 index 000000000..2b19aa64c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule_nonlinux.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package netlink + +func (r Rule) typeString() string { + return "" +} diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go new file mode 100644 index 000000000..e65efb130 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket.go @@ -0,0 +1,104 @@ +package netlink + +import "net" + +// SocketID identifies a single socket. +type SocketID struct { + SourcePort uint16 + DestinationPort uint16 + Source net.IP + Destination net.IP + Interface uint32 + Cookie [2]uint32 +} + +// Socket represents a netlink socket. +type Socket struct { + Family uint8 + State uint8 + Timer uint8 + Retrans uint8 + ID SocketID + Expires uint32 + RQueue uint32 + WQueue uint32 + UID uint32 + INode uint32 +} + +// UnixSocket represents a netlink unix socket. +type UnixSocket struct { + Type uint8 + Family uint8 + State uint8 + pad uint8 + INode uint32 + Cookie [2]uint32 +} + +// XDPSocket represents an XDP socket (and the common diagnosis part in +// particular). Please note that in contrast to [UnixSocket] the XDPSocket type +// does not feature “State” information. +type XDPSocket struct { + // xdp_diag_msg + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 + Family uint8 + Type uint8 + pad uint16 + Ino uint32 + Cookie [2]uint32 +} + +type XDPInfo struct { + // XDP_DIAG_INFO/xdp_diag_info + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L51 + Ifindex uint32 + QueueID uint32 + + // XDP_DIAG_UID + UID uint32 + + // XDP_RX_RING + // https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L56 + RxRingEntries uint32 + TxRingEntries uint32 + UmemFillRingEntries uint32 + UmemCompletionRingEntries uint32 + + // XDR_DIAG_UMEM + Umem *XDPDiagUmem + + // XDR_DIAG_STATS + Stats *XDPDiagStats +} + +const ( + XDP_DU_F_ZEROCOPY = 1 << iota +) + +// XDPDiagUmem describes the umem attached to an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L62 +type XDPDiagUmem struct { + Size uint64 + ID uint32 + NumPages uint32 + ChunkSize uint32 + Headroom uint32 + Ifindex uint32 + QueueID uint32 + Flags uint32 + Refs uint32 +} + +// XDPDiagStats contains ring statistics for an XDP socket. +// +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L74 +type XDPDiagStats struct { + RxDropped uint64 + RxInvalid uint64 + RxFull uint64 + FillRingEmpty uint64 + TxInvalid uint64 + TxRingEmpty uint64 +} diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go new file mode 100644 index 000000000..4eb4aeafb --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -0,0 +1,590 @@ +package netlink + +import ( + "errors" + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + sizeofSocketID = 0x30 + sizeofSocketRequest = sizeofSocketID + 0x8 + sizeofSocket = sizeofSocketID + 0x18 + sizeofUnixSocketRequest = 0x18 // 24 byte + sizeofUnixSocket = 0x10 // 16 byte +) + +type socketRequest struct { + Family uint8 + Protocol uint8 + Ext uint8 + pad uint8 + States uint32 + ID SocketID +} + +type writeBuffer struct { + Bytes []byte + pos int +} + +func (b *writeBuffer) Write(c byte) { + b.Bytes[b.pos] = c + b.pos++ +} + +func (b *writeBuffer) Next(n int) []byte { + s := b.Bytes[b.pos : b.pos+n] + b.pos += n + return s +} + +func (r *socketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + b.Write(r.Ext) + b.Write(r.pad) + native.PutUint32(b.Next(4), r.States) + networkOrder.PutUint16(b.Next(2), r.ID.SourcePort) + networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort) + if r.Family == unix.AF_INET6 { + copy(b.Next(16), r.ID.Source) + copy(b.Next(16), r.ID.Destination) + } else { + copy(b.Next(16), r.ID.Source.To4()) + copy(b.Next(16), r.ID.Destination.To4()) + } + native.PutUint32(b.Next(4), r.ID.Interface) + native.PutUint32(b.Next(4), r.ID.Cookie[0]) + native.PutUint32(b.Next(4), r.ID.Cookie[1]) + return b.Bytes +} + +func (r *socketRequest) Len() int { return sizeofSocketRequest } + +// According to linux/include/uapi/linux/unix_diag.h +type unixSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + States uint32 + INode uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *unixSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofUnixSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.States) + native.PutUint32(b.Next(4), r.INode) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *unixSocketRequest) Len() int { return sizeofUnixSocketRequest } + +type readBuffer struct { + Bytes []byte + pos int +} + +func (b *readBuffer) Read() byte { + c := b.Bytes[b.pos] + b.pos++ + return c +} + +func (b *readBuffer) Next(n int) []byte { + s := b.Bytes[b.pos : b.pos+n] + b.pos += n + return s +} + +func (s *Socket) deserialize(b []byte) error { + if len(b) < sizeofSocket { + return fmt.Errorf("socket data short read (%d); want %d", len(b), sizeofSocket) + } + rb := readBuffer{Bytes: b} + s.Family = rb.Read() + s.State = rb.Read() + s.Timer = rb.Read() + s.Retrans = rb.Read() + s.ID.SourcePort = networkOrder.Uint16(rb.Next(2)) + s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2)) + if s.Family == unix.AF_INET6 { + s.ID.Source = net.IP(rb.Next(16)) + s.ID.Destination = net.IP(rb.Next(16)) + } else { + s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + } + s.ID.Interface = native.Uint32(rb.Next(4)) + s.ID.Cookie[0] = native.Uint32(rb.Next(4)) + s.ID.Cookie[1] = native.Uint32(rb.Next(4)) + s.Expires = native.Uint32(rb.Next(4)) + s.RQueue = native.Uint32(rb.Next(4)) + s.WQueue = native.Uint32(rb.Next(4)) + s.UID = native.Uint32(rb.Next(4)) + s.INode = native.Uint32(rb.Next(4)) + return nil +} + +func (u *UnixSocket) deserialize(b []byte) error { + if len(b) < sizeofUnixSocket { + return fmt.Errorf("unix diag data short read (%d); want %d", len(b), sizeofUnixSocket) + } + rb := readBuffer{Bytes: b} + u.Type = rb.Read() + u.Family = rb.Read() + u.State = rb.Read() + u.pad = rb.Read() + u.INode = native.Uint32(rb.Next(4)) + u.Cookie[0] = native.Uint32(rb.Next(4)) + u.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// SocketGet returns the Socket identified by its local and remote addresses. +func (h *Handle) SocketGet(local, remote net.Addr) (*Socket, error) { + var protocol uint8 + var localIP, remoteIP net.IP + var localPort, remotePort uint16 + switch l := local.(type) { + case *net.TCPAddr: + r, ok := remote.(*net.TCPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_TCP + case *net.UDPAddr: + r, ok := remote.(*net.UDPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP = l.IP + localPort = uint16(l.Port) + remoteIP = r.IP + remotePort = uint16(r.Port) + protocol = unix.IPPROTO_UDP + default: + return nil, ErrNotImplemented + } + + var family uint8 + if localIP.To4() != nil && remoteIP.To4() != nil { + family = unix.AF_INET + } + + if family == 0 && localIP.To16() != nil && remoteIP.To16() != nil { + family = unix.AF_INET6 + } + + if family == 0 { + return nil, ErrNotImplemented + } + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: protocol, + States: 0xffffffff, + ID: SocketID{ + SourcePort: localPort, + DestinationPort: remotePort, + Source: localIP, + Destination: remoteIP, + Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, + }, + }) + + msgs, err := req.Execute(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY) + if err != nil { + return nil, err + } + if len(msgs) == 0 { + return nil, errors.New("no message nor error from netlink") + } + if len(msgs) > 2 { + return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) + } + + sock := &Socket{} + if err := sock.deserialize(msgs[0]); err != nil { + return nil, err + } + return sock, nil +} + +// SocketGet returns the Socket identified by its local and remote addresses. +func SocketGet(local, remote net.Addr) (*Socket, error) { + return pkgHandle.SocketGet(local, remote) +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func (h *Handle) SocketDestroy(local, remote net.Addr) error { + localTCP, ok := local.(*net.TCPAddr) + if !ok { + return ErrNotImplemented + } + remoteTCP, ok := remote.(*net.TCPAddr) + if !ok { + return ErrNotImplemented + } + localIP := localTCP.IP.To4() + if localIP == nil { + return ErrNotImplemented + } + remoteIP := remoteTCP.IP.To4() + if remoteIP == nil { + return ErrNotImplemented + } + + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return err + } + defer s.Close() + req := h.newNetlinkRequest(nl.SOCK_DESTROY, unix.NLM_F_ACK) + req.AddData(&socketRequest{ + Family: unix.AF_INET, + Protocol: unix.IPPROTO_TCP, + ID: SocketID{ + SourcePort: uint16(localTCP.Port), + DestinationPort: uint16(remoteTCP.Port), + Source: localIP, + Destination: remoteIP, + Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, + }, + }) + + _, err = req.Execute(unix.NETLINK_INET_DIAG, 0) + return err +} + +// SocketDestroy kills the Socket identified by its local and remote addresses. +func SocketDestroy(local, remote net.Addr) error { + return pkgHandle.SocketDestroy(local, remote) +} + +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +func (h *Handle) SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*InetDiagTCPInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *InetDiagTCPInfoResp + if res, err = attrsToInetDiagTCPInfoResp(attrs, sockInfo); err != nil { + return false + } + + result = append(result, res) + return true + }) + + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagTCPInfo requests INET_DIAG_INFO for TCP protocol for specified family type and return with extension TCP info. +func SocketDiagTCPInfo(family uint8) ([]*InetDiagTCPInfoResp, error) { + return pkgHandle.SocketDiagTCPInfo(family) +} + +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func (h *Handle) SocketDiagTCP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_TCP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*Socket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + result = append(result, sockInfo) + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagTCP requests INET_DIAG_INFO for TCP protocol for specified family type and return related socket. +func SocketDiagTCP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagTCP(family) +} + +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func (h *Handle) SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << (INET_DIAG_VEGASINFO - 1) + extensions |= 1 << (INET_DIAG_INFO - 1) + extensions |= 1 << (INET_DIAG_MEMINFO - 1) + + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_UDP, + Ext: extensions, + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*InetDiagUDPInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *InetDiagUDPInfoResp + if res, err = attrsToInetDiagUDPInfoResp(attrs, sockInfo); err != nil { + return false + } + + result = append(result, res) + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagUDPInfo requests INET_DIAG_INFO for UDP protocol for specified family type and return with extension info. +func SocketDiagUDPInfo(family uint8) ([]*InetDiagUDPInfoResp, error) { + return pkgHandle.SocketDiagUDPInfo(family) +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func (h *Handle) SocketDiagUDP(family uint8) ([]*Socket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&socketRequest{ + Family: family, + Protocol: unix.IPPROTO_UDP, + Ext: (1 << (INET_DIAG_VEGASINFO - 1)) | (1 << (INET_DIAG_INFO - 1)), + States: uint32(0xfff), // all states + }) + + // Do the query and parse the result + var result []*Socket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &Socket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + result = append(result, sockInfo) + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// SocketDiagUDP requests INET_DIAG_INFO for UDP protocol for specified family type and return related socket. +func SocketDiagUDP(family uint8) ([]*Socket, error) { + return pkgHandle.SocketDiagUDP(family) +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + // Construct the request + var extensions uint8 + extensions = 1 << UNIX_DIAG_NAME + extensions |= 1 << UNIX_DIAG_PEER + extensions |= 1 << UNIX_DIAG_RQLEN + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + Show: uint32(extensions), + }) + + var result []*UnixDiagInfoResp + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family != unix.AF_UNIX { + return false + } + + var attrs []syscall.NetlinkRouteAttr + if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + return false + } + + var res *UnixDiagInfoResp + if res, err = attrsToUnixDiagInfoResp(attrs, sockInfo); err != nil { + return false + } + result = append(result, res) + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// UnixSocketDiagInfo requests UNIX_DIAG_INFO for unix sockets and return with extension info. +func UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { + return pkgHandle.UnixSocketDiagInfo() +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func (h *Handle) UnixSocketDiag() ([]*UnixSocket, error) { + // Construct the request + req := h.newNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&unixSocketRequest{ + Family: unix.AF_UNIX, + States: ^uint32(0), // all states + }) + + var result []*UnixSocket + var err error + err = req.ExecuteIter(unix.NETLINK_INET_DIAG, nl.SOCK_DIAG_BY_FAMILY, func(msg []byte) bool { + sockInfo := &UnixSocket{} + if err = sockInfo.deserialize(msg); err != nil { + return false + } + + // Diagnosis also delivers sockets with AF_INET family, filter those + if sockInfo.Family == unix.AF_UNIX { + result = append(result, sockInfo) + } + return true + }) + if err != nil { + return nil, err + } + return result, nil +} + +// UnixSocketDiag requests UNIX_DIAG_INFO for unix sockets. +func UnixSocketDiag() ([]*UnixSocket, error) { + return pkgHandle.UnixSocketDiag() +} + +func attrsToInetDiagTCPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagTCPInfoResp, error) { + info := &InetDiagTCPInfoResp{ + InetDiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case INET_DIAG_INFO: + info.TCPInfo = &TCPInfo{} + if err := info.TCPInfo.deserialize(a.Value); err != nil { + return nil, err + } + case INET_DIAG_BBRINFO: + info.TCPBBRInfo = &TCPBBRInfo{} + if err := info.TCPBBRInfo.deserialize(a.Value); err != nil { + return nil, err + } + } + } + + return info, nil +} + +func attrsToInetDiagUDPInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *Socket) (*InetDiagUDPInfoResp, error) { + info := &InetDiagUDPInfoResp{ + InetDiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case INET_DIAG_MEMINFO: + info.Memory = &MemInfo{} + if err := info.Memory.deserialize(a.Value); err != nil { + return nil, err + } + } + } + + return info, nil +} + +func attrsToUnixDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *UnixSocket) (*UnixDiagInfoResp, error) { + info := &UnixDiagInfoResp{ + DiagMsg: sockInfo, + } + for _, a := range attrs { + switch a.Attr.Type { + case UNIX_DIAG_NAME: + name := string(a.Value[:a.Attr.Len]) + info.Name = &name + case UNIX_DIAG_PEER: + peer := native.Uint32(a.Value) + info.Peer = &peer + case UNIX_DIAG_RQLEN: + info.Queue = &QueueInfo{ + RQueue: native.Uint32(a.Value[:4]), + WQueue: native.Uint32(a.Value[4:]), + } + // default: + // fmt.Println("unknown unix attribute type", a.Attr.Type, "with data", a.Value) + } + } + + return info, nil +} diff --git a/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go new file mode 100644 index 000000000..20c82f9c7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket_xdp_linux.go @@ -0,0 +1,195 @@ +package netlink + +import ( + "errors" + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +const ( + sizeofXDPSocketRequest = 1 + 1 + 2 + 4 + 4 + 2*4 + sizeofXDPSocket = 0x10 +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L12 +type xdpSocketRequest struct { + Family uint8 + Protocol uint8 + pad uint16 + Ino uint32 + Show uint32 + Cookie [2]uint32 +} + +func (r *xdpSocketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + native.PutUint16(b.Next(2), r.pad) + native.PutUint32(b.Next(4), r.Ino) + native.PutUint32(b.Next(4), r.Show) + native.PutUint32(b.Next(4), r.Cookie[0]) + native.PutUint32(b.Next(4), r.Cookie[1]) + return b.Bytes +} + +func (r *xdpSocketRequest) Len() int { return sizeofXDPSocketRequest } + +func (s *XDPSocket) deserialize(b []byte) error { + if len(b) < sizeofXDPSocket { + return fmt.Errorf("XDP socket data short read (%d); want %d", len(b), sizeofXDPSocket) + } + rb := readBuffer{Bytes: b} + s.Family = rb.Read() + s.Type = rb.Read() + s.pad = native.Uint16(rb.Next(2)) + s.Ino = native.Uint32(rb.Next(4)) + s.Cookie[0] = native.Uint32(rb.Next(4)) + s.Cookie[1] = native.Uint32(rb.Next(4)) + return nil +} + +// XDPSocketGet returns the XDP socket identified by its inode number and/or +// socket cookie. Specify the cookie as SOCK_ANY_COOKIE if +func SocketXDPGetInfo(ino uint32, cookie uint64) (*XDPDiagInfoResp, error) { + // We have a problem here: dumping AF_XDP sockets currently does not support + // filtering. We thus need to dump all XSKs and then only filter afterwards + // :( + xsks, err := SocketDiagXDP() + if err != nil { + return nil, err + } + checkCookie := cookie != SOCK_ANY_COOKIE && cookie != 0 + crumblingCookie := [2]uint32{uint32(cookie), uint32(cookie >> 32)} + checkIno := ino != 0 + var xskinfo *XDPDiagInfoResp + for _, xsk := range xsks { + if checkIno && xsk.XDPDiagMsg.Ino != ino { + continue + } + if checkCookie && xsk.XDPDiagMsg.Cookie != crumblingCookie { + continue + } + if xskinfo != nil { + return nil, errors.New("multiple matching XDP sockets") + } + xskinfo = xsk + } + if xskinfo == nil { + return nil, errors.New("no matching XDP socket") + } + return xskinfo, nil +} + +// SocketDiagXDP requests XDP_DIAG_INFO for XDP family sockets. +func SocketDiagXDP() ([]*XDPDiagInfoResp, error) { + var result []*XDPDiagInfoResp + err := socketDiagXDPExecutor(func(m syscall.NetlinkMessage) error { + sockInfo := &XDPSocket{} + if err := sockInfo.deserialize(m.Data); err != nil { + return err + } + attrs, err := nl.ParseRouteAttr(m.Data[sizeofXDPSocket:]) + if err != nil { + return err + } + + res, err := attrsToXDPDiagInfoResp(attrs, sockInfo) + if err != nil { + return err + } + + result = append(result, res) + return nil + }) + if err != nil { + return nil, err + } + return result, nil +} + +// socketDiagXDPExecutor requests XDP_DIAG_INFO for XDP family sockets. +func socketDiagXDPExecutor(receiver func(syscall.NetlinkMessage) error) error { + s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + if err != nil { + return err + } + defer s.Close() + + req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, unix.NLM_F_DUMP) + req.AddData(&xdpSocketRequest{ + Family: unix.AF_XDP, + Show: XDP_SHOW_INFO | XDP_SHOW_RING_CFG | XDP_SHOW_UMEM | XDP_SHOW_STATS, + }) + if err := s.Send(req); err != nil { + return err + } + +loop: + for { + msgs, from, err := s.Receive() + if err != nil { + return err + } + if from.Pid != nl.PidKernel { + return fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + } + if len(msgs) == 0 { + return errors.New("no message nor error from netlink") + } + + for _, m := range msgs { + switch m.Header.Type { + case unix.NLMSG_DONE: + break loop + case unix.NLMSG_ERROR: + error := int32(native.Uint32(m.Data[0:4])) + return syscall.Errno(-error) + } + if err := receiver(m); err != nil { + return err + } + } + } + return nil +} + +func attrsToXDPDiagInfoResp(attrs []syscall.NetlinkRouteAttr, sockInfo *XDPSocket) (*XDPDiagInfoResp, error) { + resp := &XDPDiagInfoResp{ + XDPDiagMsg: sockInfo, + XDPInfo: &XDPInfo{}, + } + for _, a := range attrs { + switch a.Attr.Type { + case XDP_DIAG_INFO: + resp.XDPInfo.Ifindex = native.Uint32(a.Value[0:4]) + resp.XDPInfo.QueueID = native.Uint32(a.Value[4:8]) + case XDP_DIAG_UID: + resp.XDPInfo.UID = native.Uint32(a.Value[0:4]) + case XDP_DIAG_RX_RING: + resp.XDPInfo.RxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_TX_RING: + resp.XDPInfo.TxRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_FILL_RING: + resp.XDPInfo.UmemFillRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM_COMPLETION_RING: + resp.XDPInfo.UmemCompletionRingEntries = native.Uint32(a.Value[0:4]) + case XDP_DIAG_UMEM: + umem := &XDPDiagUmem{} + if err := umem.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Umem = umem + case XDP_DIAG_STATS: + stats := &XDPDiagStats{} + if err := stats.deserialize(a.Value); err != nil { + return nil, err + } + resp.XDPInfo.Stats = stats + } + } + return resp, nil +} diff --git a/vendor/github.com/vishvananda/netlink/tcp.go b/vendor/github.com/vishvananda/netlink/tcp.go new file mode 100644 index 000000000..43f80a0fc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp.go @@ -0,0 +1,92 @@ +package netlink + +// TCP States +const ( + TCP_ESTABLISHED = iota + 0x01 + TCP_SYN_SENT + TCP_SYN_RECV + TCP_FIN_WAIT1 + TCP_FIN_WAIT2 + TCP_TIME_WAIT + TCP_CLOSE + TCP_CLOSE_WAIT + TCP_LAST_ACK + TCP_LISTEN + TCP_CLOSING + TCP_NEW_SYN_REC + TCP_MAX_STATES +) + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Snd_wscale uint8 // no uint4 + Rcv_wscale uint8 + Delivery_rate_app_limited uint8 + Fastopen_client_fail uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ + Bytes_received uint64 /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ + Segs_out uint32 /* RFC4898 tcpEStatsPerfSegsOut */ + Segs_in uint32 /* RFC4898 tcpEStatsPerfSegsIn */ + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 /* RFC4898 tcpEStatsDataSegsIn */ + Data_segs_out uint32 /* RFC4898 tcpEStatsDataSegsOut */ + Delivery_rate uint64 + Busy_time uint64 /* Time (usec) busy sending data */ + Rwnd_limited uint64 /* Time (usec) limited by receive window */ + Sndbuf_limited uint64 /* Time (usec) limited by send buffer */ + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ + Bytes_retrans uint64 /* RFC4898 tcpEStatsPerfOctetsRetrans */ + Dsack_dups uint32 /* RFC4898 tcpEStatsStackDSACKDups */ + Reord_seen uint32 /* reordering events seen */ + Rcv_ooopack uint32 /* Out-of-order packets received */ + Snd_wnd uint32 /* peer's advertised receive window after * scaling (bytes) */ +} + +type TCPBBRInfo struct { + BBRBW uint64 + BBRMinRTT uint32 + BBRPacingGain uint32 + BBRCwndGain uint32 +} + +// According to https://man7.org/linux/man-pages/man7/sock_diag.7.html +type MemInfo struct { + RMem uint32 + WMem uint32 + FMem uint32 + TMem uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/tcp_linux.go b/vendor/github.com/vishvananda/netlink/tcp_linux.go new file mode 100644 index 000000000..e98036da5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/tcp_linux.go @@ -0,0 +1,368 @@ +package netlink + +import ( + "bytes" + "errors" + "io" +) + +const ( + tcpBBRInfoLen = 20 + memInfoLen = 16 +) + +func checkDeserErr(err error) error { + if err == io.EOF { + return nil + } + return err +} + +func (t *TCPInfo) deserialize(b []byte) error { + var err error + rb := bytes.NewBuffer(b) + + t.State, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Ca_state, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Retransmits, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Probes, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + t.Backoff, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Options, err = rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + + scales, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Snd_wscale = scales >> 4 // first 4 bits + t.Rcv_wscale = scales & 0xf // last 4 bits + + rateLimAndFastOpen, err := rb.ReadByte() + if err != nil { + return checkDeserErr(err) + } + t.Delivery_rate_app_limited = rateLimAndFastOpen >> 7 // get first bit + t.Fastopen_client_fail = rateLimAndFastOpen >> 5 & 3 // get next two bits + + next := rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rto = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Ato = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_mss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Unacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Sacked = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Lost = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Retrans = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Fackets = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_sent = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_data_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Last_ack_recv = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Pmtu = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rttvar = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_ssthresh = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_cwnd = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Advmss = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reordering = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_rtt = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_space = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Total_retrans = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Max_pacing_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_acked = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_received = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_out = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Notsent_bytes = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Min_rtt = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_in = native.Uint32(next) + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Data_segs_out = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Delivery_rate = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Busy_time = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Rwnd_limited = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Sndbuf_limited = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Delivered_ce = native.Uint32(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_sent = native.Uint64(next) + + next = rb.Next(8) + if len(next) == 0 { + return nil + } + t.Bytes_retrans = native.Uint64(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Dsack_dups = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Reord_seen = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Rcv_ooopack = native.Uint32(next) + + next = rb.Next(4) + if len(next) == 0 { + return nil + } + t.Snd_wnd = native.Uint32(next) + return nil +} + +func (t *TCPBBRInfo) deserialize(b []byte) error { + if len(b) != tcpBBRInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + t.BBRBW = native.Uint64(rb.Next(8)) + t.BBRMinRTT = native.Uint32(rb.Next(4)) + t.BBRPacingGain = native.Uint32(rb.Next(4)) + t.BBRCwndGain = native.Uint32(rb.Next(4)) + + return nil +} + +func (m *MemInfo) deserialize(b []byte) error { + if len(b) != memInfoLen { + return errors.New("Invalid length") + } + + rb := bytes.NewBuffer(b) + m.RMem = native.Uint32(rb.Next(4)) + m.WMem = native.Uint32(rb.Next(4)) + m.FMem = native.Uint32(rb.Next(4)) + m.TMem = native.Uint32(rb.Next(4)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/unix_diag.go b/vendor/github.com/vishvananda/netlink/unix_diag.go new file mode 100644 index 000000000..d81776f36 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/unix_diag.go @@ -0,0 +1,27 @@ +package netlink + +// According to linux/include/uapi/linux/unix_diag.h +const ( + UNIX_DIAG_NAME = iota + UNIX_DIAG_VFS + UNIX_DIAG_PEER + UNIX_DIAG_ICONS + UNIX_DIAG_RQLEN + UNIX_DIAG_MEMINFO + UNIX_DIAG_SHUTDOWN + UNIX_DIAG_UID + UNIX_DIAG_MAX +) + +type UnixDiagInfoResp struct { + DiagMsg *UnixSocket + Name *string + Peer *uint32 + Queue *QueueInfo + Shutdown *uint8 +} + +type QueueInfo struct { + RQueue uint32 + WQueue uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/vdpa_linux.go b/vendor/github.com/vishvananda/netlink/vdpa_linux.go new file mode 100644 index 000000000..7c15986d0 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/vdpa_linux.go @@ -0,0 +1,463 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/vishvananda/netlink/nl" +) + +type vdpaDevID struct { + Name string + ID uint32 +} + +// VDPADev contains info about VDPA device +type VDPADev struct { + vdpaDevID + VendorID uint32 + MaxVQS uint32 + MaxVQSize uint16 + MinVQSize uint16 +} + +// VDPADevConfig contains configuration of the VDPA device +type VDPADevConfig struct { + vdpaDevID + Features uint64 + NegotiatedFeatures uint64 + Net VDPADevConfigNet +} + +// VDPADevVStats conatins vStats for the VDPA device +type VDPADevVStats struct { + vdpaDevID + QueueIndex uint32 + Vendor []VDPADevVStatsVendor + NegotiatedFeatures uint64 +} + +// VDPADevVStatsVendor conatins name and value for vendor specific vstat option +type VDPADevVStatsVendor struct { + Name string + Value uint64 +} + +// VDPADevConfigNet conatins status and net config for the VDPA device +type VDPADevConfigNet struct { + Status VDPADevConfigNetStatus + Cfg VDPADevConfigNetCfg +} + +// VDPADevConfigNetStatus contains info about net status +type VDPADevConfigNetStatus struct { + LinkUp bool + Announce bool +} + +// VDPADevConfigNetCfg contains net config for the VDPA device +type VDPADevConfigNetCfg struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 +} + +// VDPAMGMTDev conatins info about VDPA management device +type VDPAMGMTDev struct { + BusName string + DevName string + SupportedClasses uint64 + SupportedFeatures uint64 + MaxVQS uint32 +} + +// VDPANewDevParams contains parameters for new VDPA device +// use SetBits to configure requried features for the device +// example: +// +// VDPANewDevParams{Features: SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR)} +type VDPANewDevParams struct { + MACAddr net.HardwareAddr + MaxVQP uint16 + MTU uint16 + Features uint64 +} + +// SetBits set provided bits in the uint64 input value +// usage example: +// features := SetBits(0, VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_MAC_ADDR) +func SetBits(input uint64, pos ...int) uint64 { + for _, p := range pos { + input |= 1 << uint64(p) + } + return input +} + +// IsBitSet check if specific bit is set in the uint64 input value +// usage example: +// hasNetClass := IsBitSet(mgmtDev, VIRTIO_ID_NET) +func IsBitSet(input uint64, pos int) bool { + val := input & (1 << uint64(pos)) + return val > 0 +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + return pkgHandle.VDPANewDev(name, mgmtBus, mgmtName, params) +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func VDPADelDev(name string) error { + return pkgHandle.VDPADelDev(name) +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func VDPAGetDevList() ([]*VDPADev, error) { + return pkgHandle.VDPAGetDevList() +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func VDPAGetDevByName(name string) (*VDPADev, error) { + return pkgHandle.VDPAGetDevByName(name) +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigList() +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + return pkgHandle.VDPAGetDevConfigByName(name) +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + return pkgHandle.VDPAGetDevVStats(name, queueIndex) +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevList() +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + return pkgHandle.VDPAGetMGMTDevByBusAndName(bus, name) +} + +type vdpaNetlinkMessage []syscall.NetlinkRouteAttr + +func (id *vdpaDevID) parseIDAttribute(attr syscall.NetlinkRouteAttr) { + switch attr.Attr.Type { + case nl.VDPA_ATTR_DEV_NAME: + id.Name = nl.BytesToString(attr.Value) + case nl.VDPA_ATTR_DEV_ID: + id.ID = native.Uint32(attr.Value) + } +} + +func (netStatus *VDPADevConfigNetStatus) parseStatusAttribute(value []byte) { + a := native.Uint16(value) + netStatus.Announce = (a & VIRTIO_NET_S_ANNOUNCE) > 0 + netStatus.LinkUp = (a & VIRTIO_NET_S_LINK_UP) > 0 +} + +func (d *VDPADev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + d.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_VENDOR_ID: + d.VendorID = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_MAX_VQ_SIZE: + d.MaxVQSize = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_MIN_VQ_SIZE: + d.MinVQSize = native.Uint16(a.Value) + } + } +} + +func (c *VDPADevConfig) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + c.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_NET_CFG_MACADDR: + c.Net.Cfg.MACAddr = a.Value + case nl.VDPA_ATTR_DEV_NET_STATUS: + c.Net.Status.parseStatusAttribute(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP: + c.Net.Cfg.MaxVQP = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_NET_CFG_MTU: + c.Net.Cfg.MTU = native.Uint16(a.Value) + case nl.VDPA_ATTR_DEV_FEATURES: + c.Features = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + c.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (s *VDPADevVStats) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + s.parseIDAttribute(a) + switch a.Attr.Type { + case nl.VDPA_ATTR_DEV_QUEUE_INDEX: + s.QueueIndex = native.Uint32(a.Value) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_NAME: + s.Vendor = append(s.Vendor, VDPADevVStatsVendor{Name: nl.BytesToString(a.Value)}) + case nl.VDPA_ATTR_DEV_VENDOR_ATTR_VALUE: + if len(s.Vendor) == 0 { + break + } + s.Vendor[len(s.Vendor)-1].Value = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_NEGOTIATED_FEATURES: + s.NegotiatedFeatures = native.Uint64(a.Value) + } + } +} + +func (d *VDPAMGMTDev) parseAttributes(attrs vdpaNetlinkMessage) { + for _, a := range attrs { + switch a.Attr.Type { + case nl.VDPA_ATTR_MGMTDEV_BUS_NAME: + d.BusName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_DEV_NAME: + d.DevName = nl.BytesToString(a.Value) + case nl.VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES: + d.SupportedClasses = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_SUPPORTED_FEATURES: + d.SupportedFeatures = native.Uint64(a.Value) + case nl.VDPA_ATTR_DEV_MGMTDEV_MAX_VQS: + d.MaxVQS = native.Uint32(a.Value) + } + } +} + +func (h *Handle) vdpaRequest(command uint8, extraFlags int, attrs []*nl.RtAttr) ([]vdpaNetlinkMessage, error) { + f, err := h.GenlFamilyGet(nl.VDPA_GENL_NAME) + if err != nil { + return nil, err + } + req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_ACK|extraFlags) + req.AddData(&nl.Genlmsg{ + Command: command, + Version: nl.VDPA_GENL_VERSION, + }) + for _, a := range attrs { + req.AddData(a) + } + + resp, err := req.Execute(unix.NETLINK_GENERIC, 0) + if err != nil { + return nil, err + } + messages := make([]vdpaNetlinkMessage, 0, len(resp)) + for _, m := range resp { + attrs, err := nl.ParseRouteAttr(m[nl.SizeofGenlmsg:]) + if err != nil { + return nil, err + } + messages = append(messages, attrs) + } + return messages, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevGet(dev *string) ([]*VDPADev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + devs := make([]*VDPADev, 0, len(messages)) + for _, m := range messages { + d := &VDPADev{} + d.parseAttributes(m) + devs = append(devs, d) + } + return devs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaDevConfigGet(dev *string) ([]*VDPADevConfig, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(*dev))) + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_CONFIG_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPADevConfig, 0, len(messages)) + for _, m := range messages { + cfg := &VDPADevConfig{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// dump all devices if dev is nil +func (h *Handle) vdpaMGMTDevGet(bus, dev *string) ([]*VDPAMGMTDev, error) { + var extraFlags int + var attrs []*nl.RtAttr + if dev != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(*dev)), + ) + if bus != nil { + attrs = append(attrs, + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(*bus)), + ) + } + } else { + extraFlags = extraFlags | unix.NLM_F_DUMP + } + messages, err := h.vdpaRequest(nl.VDPA_CMD_MGMTDEV_GET, extraFlags, attrs) + if err != nil { + return nil, err + } + cfgs := make([]*VDPAMGMTDev, 0, len(messages)) + for _, m := range messages { + cfg := &VDPAMGMTDev{} + cfg.parseAttributes(m) + cfgs = append(cfgs, cfg) + } + return cfgs, nil +} + +// VDPANewDev adds new VDPA device +// Equivalent to: `vdpa dev add name mgmtdev /mgmtName [params]` +func (h *Handle) VDPANewDev(name, mgmtBus, mgmtName string, params VDPANewDevParams) error { + attrs := []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_DEV_NAME, nl.ZeroTerminated(mgmtName)), + } + if mgmtBus != "" { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_MGMTDEV_BUS_NAME, nl.ZeroTerminated(mgmtBus))) + } + if len(params.MACAddr) != 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MACADDR, params.MACAddr)) + } + if params.MaxVQP > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MAX_VQP, nl.Uint16Attr(params.MaxVQP))) + } + if params.MTU > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_NET_CFG_MTU, nl.Uint16Attr(params.MTU))) + } + if params.Features > 0 { + attrs = append(attrs, nl.NewRtAttr(nl.VDPA_ATTR_DEV_FEATURES, nl.Uint64Attr(params.Features))) + } + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_NEW, 0, attrs) + return err +} + +// VDPADelDev removes VDPA device +// Equivalent to: `vdpa dev del ` +func (h *Handle) VDPADelDev(name string) error { + _, err := h.vdpaRequest(nl.VDPA_CMD_DEV_DEL, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name))}) + return err +} + +// VDPAGetDevList returns list of VDPA devices +// Equivalent to: `vdpa dev show` +func (h *Handle) VDPAGetDevList() ([]*VDPADev, error) { + return h.vdpaDevGet(nil) +} + +// VDPAGetDevByName returns VDPA device selected by name +// Equivalent to: `vdpa dev show ` +func (h *Handle) VDPAGetDevByName(name string) (*VDPADev, error) { + devs, err := h.vdpaDevGet(&name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("device not found") + } + return devs[0], nil +} + +// VDPAGetDevConfigList returns list of VDPA devices configurations +// Equivalent to: `vdpa dev config show` +func (h *Handle) VDPAGetDevConfigList() ([]*VDPADevConfig, error) { + return h.vdpaDevConfigGet(nil) +} + +// VDPAGetDevConfigByName returns VDPA device configuration selected by name +// Equivalent to: `vdpa dev config show ` +func (h *Handle) VDPAGetDevConfigByName(name string) (*VDPADevConfig, error) { + cfgs, err := h.vdpaDevConfigGet(&name) + if err != nil { + return nil, err + } + if len(cfgs) == 0 { + return nil, fmt.Errorf("configuration not found") + } + return cfgs[0], nil +} + +// VDPAGetDevVStats returns vstats for VDPA device +// Equivalent to: `vdpa dev vstats show qidx ` +func (h *Handle) VDPAGetDevVStats(name string, queueIndex uint32) (*VDPADevVStats, error) { + messages, err := h.vdpaRequest(nl.VDPA_CMD_DEV_VSTATS_GET, 0, []*nl.RtAttr{ + nl.NewRtAttr(nl.VDPA_ATTR_DEV_NAME, nl.ZeroTerminated(name)), + nl.NewRtAttr(nl.VDPA_ATTR_DEV_QUEUE_INDEX, nl.Uint32Attr(queueIndex)), + }) + if err != nil { + return nil, err + } + if len(messages) == 0 { + return nil, fmt.Errorf("stats not found") + } + stats := &VDPADevVStats{} + stats.parseAttributes(messages[0]) + return stats, nil +} + +// VDPAGetMGMTDevList returns list of mgmt devices +// Equivalent to: `vdpa mgmtdev show` +func (h *Handle) VDPAGetMGMTDevList() ([]*VDPAMGMTDev, error) { + return h.vdpaMGMTDevGet(nil, nil) +} + +// VDPAGetMGMTDevByBusAndName returns mgmt devices selected by bus and name +// Equivalent to: `vdpa mgmtdev show /` +func (h *Handle) VDPAGetMGMTDevByBusAndName(bus, name string) (*VDPAMGMTDev, error) { + var busPtr *string + if bus != "" { + busPtr = &bus + } + devs, err := h.vdpaMGMTDevGet(busPtr, &name) + if err != nil { + return nil, err + } + if len(devs) == 0 { + return nil, fmt.Errorf("mgmtdev not found") + } + return devs[0], nil +} diff --git a/vendor/github.com/vishvananda/netlink/virtio.go b/vendor/github.com/vishvananda/netlink/virtio.go new file mode 100644 index 000000000..78a497bbc --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/virtio.go @@ -0,0 +1,132 @@ +package netlink + +// features for virtio net +const ( + VIRTIO_NET_F_CSUM = 0 // Host handles pkts w/ partial csum + VIRTIO_NET_F_GUEST_CSUM = 1 // Guest handles pkts w/ partial csum + VIRTIO_NET_F_CTRL_GUEST_OFFLOADS = 2 // Dynamic offload configuration. + VIRTIO_NET_F_MTU = 3 // Initial MTU advice + VIRTIO_NET_F_MAC = 5 // Host has given MAC address. + VIRTIO_NET_F_GUEST_TSO4 = 7 // Guest can handle TSOv4 in. + VIRTIO_NET_F_GUEST_TSO6 = 8 // Guest can handle TSOv6 in. + VIRTIO_NET_F_GUEST_ECN = 9 // Guest can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_GUEST_UFO = 10 // Guest can handle UFO in. + VIRTIO_NET_F_HOST_TSO4 = 11 // Host can handle TSOv4 in. + VIRTIO_NET_F_HOST_TSO6 = 12 // Host can handle TSOv6 in. + VIRTIO_NET_F_HOST_ECN = 13 // Host can handle TSO[6] w/ ECN in. + VIRTIO_NET_F_HOST_UFO = 14 // Host can handle UFO in. + VIRTIO_NET_F_MRG_RXBUF = 15 // Host can merge receive buffers. + VIRTIO_NET_F_STATUS = 16 // virtio_net_config.status available + VIRTIO_NET_F_CTRL_VQ = 17 // Control channel available + VIRTIO_NET_F_CTRL_RX = 18 // Control channel RX mode support + VIRTIO_NET_F_CTRL_VLAN = 19 // Control channel VLAN filtering + VIRTIO_NET_F_CTRL_RX_EXTRA = 20 // Extra RX mode control support + VIRTIO_NET_F_GUEST_ANNOUNCE = 21 // Guest can announce device on the* network + VIRTIO_NET_F_MQ = 22 // Device supports Receive Flow Steering + VIRTIO_NET_F_CTRL_MAC_ADDR = 23 // Set MAC address + VIRTIO_NET_F_VQ_NOTF_COAL = 52 // Device supports virtqueue notification coalescing + VIRTIO_NET_F_NOTF_COAL = 53 // Device supports notifications coalescing + VIRTIO_NET_F_GUEST_USO4 = 54 // Guest can handle USOv4 in. + VIRTIO_NET_F_GUEST_USO6 = 55 // Guest can handle USOv6 in. + VIRTIO_NET_F_HOST_USO = 56 // Host can handle USO in. + VIRTIO_NET_F_HASH_REPORT = 57 // Supports hash report + VIRTIO_NET_F_GUEST_HDRLEN = 59 // Guest provides the exact hdr_len value. + VIRTIO_NET_F_RSS = 60 // Supports RSS RX steering + VIRTIO_NET_F_RSC_EXT = 61 // extended coalescing info + VIRTIO_NET_F_STANDBY = 62 // Act as standby for another device with the same MAC. + VIRTIO_NET_F_SPEED_DUPLEX = 63 // Device set linkspeed and duplex + VIRTIO_NET_F_GSO = 6 // Host handles pkts any GSO type +) + +// virtio net status +const ( + VIRTIO_NET_S_LINK_UP = 1 // Link is up + VIRTIO_NET_S_ANNOUNCE = 2 // Announcement is needed +) + +// virtio config +const ( + // Do we get callbacks when the ring is completely used, even if we've + // suppressed them? + VIRTIO_F_NOTIFY_ON_EMPTY = 24 + // Can the device handle any descriptor layout? + VIRTIO_F_ANY_LAYOUT = 27 + // v1.0 compliant + VIRTIO_F_VERSION_1 = 32 + // If clear - device has the platform DMA (e.g. IOMMU) bypass quirk feature. + // If set - use platform DMA tools to access the memory. + // Note the reverse polarity (compared to most other features), + // this is for compatibility with legacy systems. + VIRTIO_F_ACCESS_PLATFORM = 33 + // Legacy name for VIRTIO_F_ACCESS_PLATFORM (for compatibility with old userspace) + VIRTIO_F_IOMMU_PLATFORM = VIRTIO_F_ACCESS_PLATFORM + // This feature indicates support for the packed virtqueue layout. + VIRTIO_F_RING_PACKED = 34 + // Inorder feature indicates that all buffers are used by the device + // in the same order in which they have been made available. + VIRTIO_F_IN_ORDER = 35 + // This feature indicates that memory accesses by the driver and the + // device are ordered in a way described by the platform. + VIRTIO_F_ORDER_PLATFORM = 36 + // Does the device support Single Root I/O Virtualization? + VIRTIO_F_SR_IOV = 37 + // This feature indicates that the driver passes extra data (besides + // identifying the virtqueue) in its device notifications. + VIRTIO_F_NOTIFICATION_DATA = 38 + // This feature indicates that the driver uses the data provided by the device + // as a virtqueue identifier in available buffer notifications. + VIRTIO_F_NOTIF_CONFIG_DATA = 39 + // This feature indicates that the driver can reset a queue individually. + VIRTIO_F_RING_RESET = 40 +) + +// virtio device ids +const ( + VIRTIO_ID_NET = 1 // virtio net + VIRTIO_ID_BLOCK = 2 // virtio block + VIRTIO_ID_CONSOLE = 3 // virtio console + VIRTIO_ID_RNG = 4 // virtio rng + VIRTIO_ID_BALLOON = 5 // virtio balloon + VIRTIO_ID_IOMEM = 6 // virtio ioMemory + VIRTIO_ID_RPMSG = 7 // virtio remote processor messaging + VIRTIO_ID_SCSI = 8 // virtio scsi + VIRTIO_ID_9P = 9 // 9p virtio console + VIRTIO_ID_MAC80211_WLAN = 10 // virtio WLAN MAC + VIRTIO_ID_RPROC_SERIAL = 11 // virtio remoteproc serial link + VIRTIO_ID_CAIF = 12 // Virtio caif + VIRTIO_ID_MEMORY_BALLOON = 13 // virtio memory balloon + VIRTIO_ID_GPU = 16 // virtio GPU + VIRTIO_ID_CLOCK = 17 // virtio clock/timer + VIRTIO_ID_INPUT = 18 // virtio input + VIRTIO_ID_VSOCK = 19 // virtio vsock transport + VIRTIO_ID_CRYPTO = 20 // virtio crypto + VIRTIO_ID_SIGNAL_DIST = 21 // virtio signal distribution device + VIRTIO_ID_PSTORE = 22 // virtio pstore device + VIRTIO_ID_IOMMU = 23 // virtio IOMMU + VIRTIO_ID_MEM = 24 // virtio mem + VIRTIO_ID_SOUND = 25 // virtio sound + VIRTIO_ID_FS = 26 // virtio filesystem + VIRTIO_ID_PMEM = 27 // virtio pmem + VIRTIO_ID_RPMB = 28 // virtio rpmb + VIRTIO_ID_MAC80211_HWSIM = 29 // virtio mac80211-hwsim + VIRTIO_ID_VIDEO_ENCODER = 30 // virtio video encoder + VIRTIO_ID_VIDEO_DECODER = 31 // virtio video decoder + VIRTIO_ID_SCMI = 32 // virtio SCMI + VIRTIO_ID_NITRO_SEC_MOD = 33 // virtio nitro secure module + VIRTIO_ID_I2C_ADAPTER = 34 // virtio i2c adapter + VIRTIO_ID_WATCHDOG = 35 // virtio watchdog + VIRTIO_ID_CAN = 36 // virtio can + VIRTIO_ID_DMABUF = 37 // virtio dmabuf + VIRTIO_ID_PARAM_SERV = 38 // virtio parameter server + VIRTIO_ID_AUDIO_POLICY = 39 // virtio audio policy + VIRTIO_ID_BT = 40 // virtio bluetooth + VIRTIO_ID_GPIO = 41 // virtio gpio + // Virtio Transitional IDs + VIRTIO_TRANS_ID_NET = 0x1000 // transitional virtio net + VIRTIO_TRANS_ID_BLOCK = 0x1001 // transitional virtio block + VIRTIO_TRANS_ID_BALLOON = 0x1002 // transitional virtio balloon + VIRTIO_TRANS_ID_CONSOLE = 0x1003 // transitional virtio console + VIRTIO_TRANS_ID_SCSI = 0x1004 // transitional virtio SCSI + VIRTIO_TRANS_ID_RNG = 0x1005 // transitional virtio rng + VIRTIO_TRANS_ID_9P = 0x1009 // transitional virtio 9p console +) diff --git a/vendor/github.com/vishvananda/netlink/xdp_diag.go b/vendor/github.com/vishvananda/netlink/xdp_diag.go new file mode 100644 index 000000000..e88825bf5 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_diag.go @@ -0,0 +1,34 @@ +package netlink + +import "github.com/vishvananda/netlink/nl" + +const SOCK_ANY_COOKIE = uint64(nl.TCPDIAG_NOCOOKIE)<<32 + uint64(nl.TCPDIAG_NOCOOKIE) + +// XDP diagnosis show flag constants to request particular information elements. +const ( + XDP_SHOW_INFO = 1 << iota + XDP_SHOW_RING_CFG + XDP_SHOW_UMEM + XDP_SHOW_MEMINFO + XDP_SHOW_STATS +) + +// XDP diag element constants +const ( + XDP_DIAG_NONE = iota + XDP_DIAG_INFO // when using XDP_SHOW_INFO + XDP_DIAG_UID // when using XDP_SHOW_INFO + XDP_DIAG_RX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_TX_RING // when using XDP_SHOW_RING_CFG + XDP_DIAG_UMEM // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_FILL_RING // when using XDP_SHOW_UMEM + XDP_DIAG_UMEM_COMPLETION_RING // when using XDP_SHOW_UMEM + XDP_DIAG_MEMINFO // when using XDP_SHOW_MEMINFO + XDP_DIAG_STATS // when using XDP_SHOW_STATS +) + +// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/xdp_diag.h#L21 +type XDPDiagInfoResp struct { + XDPDiagMsg *XDPSocket + XDPInfo *XDPInfo +} diff --git a/vendor/github.com/vishvananda/netlink/xdp_linux.go b/vendor/github.com/vishvananda/netlink/xdp_linux.go new file mode 100644 index 000000000..896a406de --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xdp_linux.go @@ -0,0 +1,46 @@ +package netlink + +import ( + "bytes" + "fmt" +) + +const ( + xdrDiagUmemLen = 8 + 8*4 + xdrDiagStatsLen = 6 * 8 +) + +func (x *XDPDiagUmem) deserialize(b []byte) error { + if len(b) < xdrDiagUmemLen { + return fmt.Errorf("XDP umem diagnosis data short read (%d); want %d", len(b), xdrDiagUmemLen) + } + + rb := bytes.NewBuffer(b) + x.Size = native.Uint64(rb.Next(8)) + x.ID = native.Uint32(rb.Next(4)) + x.NumPages = native.Uint32(rb.Next(4)) + x.ChunkSize = native.Uint32(rb.Next(4)) + x.Headroom = native.Uint32(rb.Next(4)) + x.Ifindex = native.Uint32(rb.Next(4)) + x.QueueID = native.Uint32(rb.Next(4)) + x.Flags = native.Uint32(rb.Next(4)) + x.Refs = native.Uint32(rb.Next(4)) + + return nil +} + +func (x *XDPDiagStats) deserialize(b []byte) error { + if len(b) < xdrDiagStatsLen { + return fmt.Errorf("XDP diagnosis statistics short read (%d); want %d", len(b), xdrDiagStatsLen) + } + + rb := bytes.NewBuffer(b) + x.RxDropped = native.Uint64(rb.Next(8)) + x.RxInvalid = native.Uint64(rb.Next(8)) + x.RxFull = native.Uint64(rb.Next(8)) + x.FillRingEmpty = native.Uint64(rb.Next(8)) + x.TxInvalid = native.Uint64(rb.Next(8)) + x.TxRingEmpty = native.Uint64(rb.Next(8)) + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_linux.go new file mode 100644 index 000000000..dd38ed8e0 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_linux.go @@ -0,0 +1,75 @@ +package netlink + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +// Proto is an enum representing an ipsec protocol. +type Proto uint8 + +const ( + XFRM_PROTO_ROUTE2 Proto = unix.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP + XFRM_PROTO_AH Proto = unix.IPPROTO_AH + XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS + XFRM_PROTO_COMP Proto = unix.IPPROTO_COMP + XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW +) + +func (p Proto) String() string { + switch p { + case XFRM_PROTO_ROUTE2: + return "route2" + case XFRM_PROTO_ESP: + return "esp" + case XFRM_PROTO_AH: + return "ah" + case XFRM_PROTO_HAO: + return "hao" + case XFRM_PROTO_COMP: + return "comp" + case XFRM_PROTO_IPSEC_ANY: + return "ipsec-any" + } + return fmt.Sprintf("%d", p) +} + +// Mode is an enum representing an ipsec transport. +type Mode uint8 + +const ( + XFRM_MODE_TRANSPORT Mode = iota + XFRM_MODE_TUNNEL + XFRM_MODE_ROUTEOPTIMIZATION + XFRM_MODE_IN_TRIGGER + XFRM_MODE_BEET + XFRM_MODE_MAX +) + +func (m Mode) String() string { + switch m { + case XFRM_MODE_TRANSPORT: + return "transport" + case XFRM_MODE_TUNNEL: + return "tunnel" + case XFRM_MODE_ROUTEOPTIMIZATION: + return "ro" + case XFRM_MODE_IN_TRIGGER: + return "in_trigger" + case XFRM_MODE_BEET: + return "beet" + } + return fmt.Sprintf("%d", m) +} + +// XfrmMark represents the mark associated to the state or policy +type XfrmMark struct { + Value uint32 + Mask uint32 +} + +func (m *XfrmMark) String() string { + return fmt.Sprintf("(0x%x,0x%x)", m.Value, m.Mask) +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go new file mode 100644 index 000000000..985d3a915 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -0,0 +1,101 @@ +package netlink + +import ( + "fmt" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" + "golang.org/x/sys/unix" +) + +type XfrmMsg interface { + Type() nl.XfrmMsgType +} + +type XfrmMsgExpire struct { + XfrmState *XfrmState + Hard bool +} + +func (ue *XfrmMsgExpire) Type() nl.XfrmMsgType { + return nl.XFRM_MSG_EXPIRE +} + +func parseXfrmMsgExpire(b []byte) *XfrmMsgExpire { + var e XfrmMsgExpire + + msg := nl.DeserializeXfrmUserExpire(b) + e.XfrmState = xfrmStateFromXfrmUsersaInfo(&msg.XfrmUsersaInfo) + e.Hard = msg.Hard == 1 + + return &e +} + +func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error, + types ...nl.XfrmMsgType) error { + + groups, err := xfrmMcastGroups(types) + if err != nil { + return nil + } + s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_XFRM, groups...) + if err != nil { + return err + } + + if done != nil { + go func() { + <-done + s.Close() + }() + + } + + go func() { + defer close(ch) + for { + msgs, from, err := s.Receive() + if err != nil { + errorChan <- err + return + } + if from.Pid != nl.PidKernel { + errorChan <- fmt.Errorf("Wrong sender portid %d, expected %d", from.Pid, nl.PidKernel) + return + } + for _, m := range msgs { + switch m.Header.Type { + case nl.XFRM_MSG_EXPIRE: + ch <- parseXfrmMsgExpire(m.Data) + default: + errorChan <- fmt.Errorf("unsupported msg type: %x", m.Header.Type) + } + } + } + }() + + return nil +} + +func xfrmMcastGroups(types []nl.XfrmMsgType) ([]uint, error) { + groups := make([]uint, 0) + + if len(types) == 0 { + return nil, fmt.Errorf("no xfrm msg type specified") + } + + for _, t := range types { + var group uint + + switch t { + case nl.XFRM_MSG_EXPIRE: + group = nl.XFRMNLGRP_EXPIRE + default: + return nil, fmt.Errorf("unsupported group: %x", t) + } + + groups = append(groups, group) + } + + return groups, nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go new file mode 100644 index 000000000..d526739ce --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -0,0 +1,364 @@ +package netlink + +import ( + "fmt" + "net" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// Dir is an enum representing an ipsec template direction. +type Dir uint8 + +const ( + XFRM_DIR_IN Dir = iota + XFRM_DIR_OUT + XFRM_DIR_FWD + XFRM_SOCKET_IN + XFRM_SOCKET_OUT + XFRM_SOCKET_FWD +) + +func (d Dir) String() string { + switch d { + case XFRM_DIR_IN: + return "dir in" + case XFRM_DIR_OUT: + return "dir out" + case XFRM_DIR_FWD: + return "dir fwd" + case XFRM_SOCKET_IN: + return "socket in" + case XFRM_SOCKET_OUT: + return "socket out" + case XFRM_SOCKET_FWD: + return "socket fwd" + } + return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) +} + +// PolicyAction is an enum representing an ipsec policy action. +type PolicyAction uint8 + +const ( + XFRM_POLICY_ALLOW PolicyAction = 0 + XFRM_POLICY_BLOCK PolicyAction = 1 +) + +func (a PolicyAction) String() string { + switch a { + case XFRM_POLICY_ALLOW: + return "allow" + case XFRM_POLICY_BLOCK: + return "block" + default: + return fmt.Sprintf("action %d", a) + } +} + +// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec +// policy. These rules are matched with XfrmState to determine encryption +// and authentication algorithms. +type XfrmPolicyTmpl struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + Optional int +} + +func (t XfrmPolicyTmpl) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", + t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) +} + +// XfrmPolicy represents an ipsec policy. It represents the overlay network +// and has a list of XfrmPolicyTmpls representing the base addresses of +// the policy. +type XfrmPolicy struct { + Dst *net.IPNet + Src *net.IPNet + Proto Proto + DstPort int + SrcPort int + Dir Dir + Priority int + Index int + Action PolicyAction + Ifindex int + Ifid int + Mark *XfrmMark + Tmpls []XfrmPolicyTmpl +} + +func (p XfrmPolicy) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Action: %s, Ifindex: %d, Ifid: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Action, p.Ifindex, p.Ifid, p.Mark, p.Tmpls) +} + +func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { + sel.Family = uint16(nl.FAMILY_V4) + if policy.Dst != nil { + sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP)) + sel.Daddr.FromIP(policy.Dst.IP) + prefixlenD, _ := policy.Dst.Mask.Size() + sel.PrefixlenD = uint8(prefixlenD) + } + if policy.Src != nil { + sel.Saddr.FromIP(policy.Src.IP) + prefixlenS, _ := policy.Src.Mask.Size() + sel.PrefixlenS = uint8(prefixlenS) + } + sel.Proto = uint8(policy.Proto) + sel.Dport = nl.Swap16(uint16(policy.DstPort)) + sel.Sport = nl.Swap16(uint16(policy.SrcPort)) + if sel.Dport != 0 { + sel.DportMask = ^uint16(0) + } + if sel.Sport != 0 { + sel.SportMask = ^uint16(0) + } + sel.Ifindex = int32(policy.Ifindex) +} + +// XfrmPolicyAdd will add an xfrm policy to the system. +// Equivalent to: `ip xfrm policy add $policy` +func XfrmPolicyAdd(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyAdd(policy) +} + +// XfrmPolicyAdd will add an xfrm policy to the system. +// Equivalent to: `ip xfrm policy add $policy` +func (h *Handle) XfrmPolicyAdd(policy *XfrmPolicy) error { + return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_NEWPOLICY) +} + +// XfrmPolicyUpdate will update an xfrm policy to the system. +// Equivalent to: `ip xfrm policy update $policy` +func XfrmPolicyUpdate(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyUpdate(policy) +} + +// XfrmPolicyUpdate will update an xfrm policy to the system. +// Equivalent to: `ip xfrm policy update $policy` +func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { + return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_UPDPOLICY) +} + +func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyInfo{} + selFromPolicy(&msg.Sel, policy) + msg.Priority = uint32(policy.Priority) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + msg.Action = uint8(policy.Action) + msg.Lft.SoftByteLimit = nl.XFRM_INF + msg.Lft.HardByteLimit = nl.XFRM_INF + msg.Lft.SoftPacketLimit = nl.XFRM_INF + msg.Lft.HardPacketLimit = nl.XFRM_INF + req.AddData(msg) + + tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls)) + for i, tmpl := range policy.Tmpls { + start := i * nl.SizeofXfrmUserTmpl + userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) + userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) + userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.Family = uint16(nl.GetIPFamily(tmpl.Dst)) + userTmpl.XfrmId.Proto = uint8(tmpl.Proto) + userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) + userTmpl.Mode = uint8(tmpl.Mode) + userTmpl.Reqid = uint32(tmpl.Reqid) + userTmpl.Optional = uint8(tmpl.Optional) + userTmpl.Aalgos = ^uint32(0) + userTmpl.Ealgos = ^uint32(0) + userTmpl.Calgos = ^uint32(0) + } + if len(tmplData) > 0 { + tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData) + req.AddData(tmpls) + } + if policy.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark)) + req.AddData(out) + } + + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } + + _, err := req.Execute(unix.NETLINK_XFRM, 0) + return err +} + +// XfrmPolicyDel will delete an xfrm policy from the system. Note that +// the Tmpls are ignored when matching the policy to delete. +// Equivalent to: `ip xfrm policy del $policy` +func XfrmPolicyDel(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyDel(policy) +} + +// XfrmPolicyDel will delete an xfrm policy from the system. Note that +// the Tmpls are ignored when matching the policy to delete. +// Equivalent to: `ip xfrm policy del $policy` +func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error { + _, err := h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_DELPOLICY) + return err +} + +// XfrmPolicyList gets a list of xfrm policies in the system. +// Equivalent to: `ip xfrm policy show`. +// The list can be filtered by ip family. +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + return pkgHandle.XfrmPolicyList(family) +} + +// XfrmPolicyList gets a list of xfrm policies in the system. +// Equivalent to: `ip xfrm policy show`. +// The list can be filtered by ip family. +func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + if err != nil { + return nil, err + } + + var res []XfrmPolicy + for _, m := range msgs { + if policy, err := parseXfrmPolicy(m, family); err == nil { + res = append(res, *policy) + } else if err == familyError { + continue + } else { + return nil, err + } + } + return res, nil +} + +// XfrmPolicyGet gets a the policy described by the index or selector, if found. +// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`. +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return pkgHandle.XfrmPolicyGet(policy) +} + +// XfrmPolicyGet gets a the policy described by the index or selector, if found. +// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`. +func (h *Handle) XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_GETPOLICY) +} + +// XfrmPolicyFlush will flush the policies on the system. +// Equivalent to: `ip xfrm policy flush` +func XfrmPolicyFlush() error { + return pkgHandle.XfrmPolicyFlush() +} + +// XfrmPolicyFlush will flush the policies on the system. +// Equivalent to: `ip xfrm policy flush` +func (h *Handle) XfrmPolicyFlush() error { + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, unix.NLM_F_ACK) + _, err := req.Execute(unix.NETLINK_XFRM, 0) + return err +} + +func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyId{} + selFromPolicy(&msg.Sel, policy) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + req.AddData(msg) + + if policy.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark)) + req.AddData(out) + } + + if policy.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(policy.Ifid))) + req.AddData(ifId) + } + + resType := nl.XFRM_MSG_NEWPOLICY + if nlProto == nl.XFRM_MSG_DELPOLICY { + resType = 0 + } + + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + if err != nil { + return nil, err + } + + if nlProto == nl.XFRM_MSG_DELPOLICY { + return nil, err + } + + return parseXfrmPolicy(msgs[0], FAMILY_ALL) +} + +func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { + msg := nl.DeserializeXfrmUserpolicyInfo(m) + + // This is mainly for the policy dump + if family != FAMILY_ALL && family != int(msg.Sel.Family) { + return nil, familyError + } + + var policy XfrmPolicy + + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, uint16(family)) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, uint16(family)) + policy.Proto = Proto(msg.Sel.Proto) + policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) + policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) + policy.Ifindex = int(msg.Sel.Ifindex) + policy.Priority = int(msg.Priority) + policy.Index = int(msg.Index) + policy.Dir = Dir(msg.Dir) + policy.Action = PolicyAction(msg.Action) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_TMPL: + max := len(attr.Value) + for i := 0; i < max; i += nl.SizeofXfrmUserTmpl { + var resTmpl XfrmPolicyTmpl + tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl]) + resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP() + resTmpl.Src = tmpl.Saddr.ToIP() + resTmpl.Proto = Proto(tmpl.XfrmId.Proto) + resTmpl.Mode = Mode(tmpl.Mode) + resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi)) + resTmpl.Reqid = int(tmpl.Reqid) + resTmpl.Optional = int(tmpl.Optional) + policy.Tmpls = append(policy.Tmpls, resTmpl) + } + case nl.XFRMA_MARK: + mark := nl.DeserializeXfrmMark(attr.Value[:]) + policy.Mark = new(XfrmMark) + policy.Mark.Value = mark.Value + policy.Mark.Mask = mark.Mask + case nl.XFRMA_IF_ID: + policy.Ifid = int(native.Uint32(attr.Value)) + } + } + + return &policy, nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go new file mode 100644 index 000000000..554f2498c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -0,0 +1,668 @@ +package netlink + +import ( + "fmt" + "net" + "time" + "unsafe" + + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. +type XfrmStateAlgo struct { + Name string + Key []byte + TruncateLen int // Auth only + ICVLen int // AEAD only +} + +func (a XfrmStateAlgo) String() string { + base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) + if a.TruncateLen != 0 { + base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) + } + if a.ICVLen != 0 { + base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) + } + return fmt.Sprintf("%s}", base) +} + +// EncapType is an enum representing the optional packet encapsulation. +type EncapType uint8 + +const ( + XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 + XFRM_ENCAP_ESPINUDP +) + +func (e EncapType) String() string { + switch e { + case XFRM_ENCAP_ESPINUDP_NONIKE: + return "espinudp-non-ike" + case XFRM_ENCAP_ESPINUDP: + return "espinudp" + } + return "unknown" +} + +// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. +type XfrmStateEncap struct { + Type EncapType + SrcPort int + DstPort int + OriginalAddress net.IP +} + +func (e XfrmStateEncap) String() string { + return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", + e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) +} + +// XfrmStateLimits represents the configured limits for the state. +type XfrmStateLimits struct { + ByteSoft uint64 + ByteHard uint64 + PacketSoft uint64 + PacketHard uint64 + TimeSoft uint64 + TimeHard uint64 + TimeUseSoft uint64 + TimeUseHard uint64 +} + +// XfrmStateStats represents the current number of bytes/packets +// processed by this State, the State's installation and first use +// time and the replay window counters. +type XfrmStateStats struct { + ReplayWindow uint32 + Replay uint32 + Failed uint32 + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +// XfrmReplayState represents the sequence number states for +// "legacy" anti-replay mode. +type XfrmReplayState struct { + OSeq uint32 + Seq uint32 + BitMap uint32 +} + +func (r XfrmReplayState) String() string { + return fmt.Sprintf("{OSeq: 0x%x, Seq: 0x%x, BitMap: 0x%x}", + r.OSeq, r.Seq, r.BitMap) +} + +// XfrmState represents the state of an ipsec policy. It optionally +// contains an XfrmStateAlgo for encryption and one for authentication. +type XfrmState struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + ReplayWindow int + Limits XfrmStateLimits + Statistics XfrmStateStats + Mark *XfrmMark + OutputMark *XfrmMark + Ifid int + Auth *XfrmStateAlgo + Crypt *XfrmStateAlgo + Aead *XfrmStateAlgo + Encap *XfrmStateEncap + ESN bool + DontEncapDSCP bool + OSeqMayWrap bool + Replay *XfrmReplayState + Selector *XfrmPolicy +} + +func (sa XfrmState) String() string { + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) +} +func (sa XfrmState) Print(stats bool) string { + if !stats { + return sa.String() + } + at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) + ut := "-" + if sa.Statistics.UseTime > 0 { + ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) + } + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ + "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", + sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, + sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) +} + +func printLimit(lmt uint64) string { + if lmt == ^uint64(0) { + return "(INF)" + } + return fmt.Sprintf("%d", lmt) +} +func writeStateAlgo(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgo{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeStateAlgoAuth(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgoAuth{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgTruncLen: uint32(a.TruncateLen), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeStateAlgoAead(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgoAEAD{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgICVLen: uint32(a.ICVLen), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeMark(m *XfrmMark) []byte { + mark := &nl.XfrmMark{ + Value: m.Value, + Mask: m.Mask, + } + if mark.Mask == 0 { + mark.Mask = ^uint32(0) + } + return mark.Serialize() +} + +func writeReplayEsn(replayWindow int) []byte { + replayEsn := &nl.XfrmReplayStateEsn{ + OSeq: 0, + Seq: 0, + OSeqHi: 0, + SeqHi: 0, + ReplayWindow: uint32(replayWindow), + } + + // Linux stores the bitmap to identify the already received sequence packets in blocks of uint32 elements. + // Therefore bitmap length is the minimum number of uint32 elements needed. The following is a ceiling operation. + bytesPerElem := int(unsafe.Sizeof(replayEsn.BmpLen)) // Any uint32 variable is good for this + replayEsn.BmpLen = uint32((replayWindow + (bytesPerElem * 8) - 1) / (bytesPerElem * 8)) + + return replayEsn.Serialize() +} + +func writeReplay(r *XfrmReplayState) []byte { + return (&nl.XfrmReplayState{ + OSeq: r.OSeq, + Seq: r.Seq, + BitMap: r.BitMap, + }).Serialize() +} + +// XfrmStateAdd will add an xfrm state to the system. +// Equivalent to: `ip xfrm state add $state` +func XfrmStateAdd(state *XfrmState) error { + return pkgHandle.XfrmStateAdd(state) +} + +// XfrmStateAdd will add an xfrm state to the system. +// Equivalent to: `ip xfrm state add $state` +func (h *Handle) XfrmStateAdd(state *XfrmState) error { + return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_NEWSA) +} + +// XfrmStateAllocSpi will allocate an xfrm state in the system. +// Equivalent to: `ip xfrm state allocspi` +func XfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { + return pkgHandle.xfrmStateAllocSpi(state) +} + +// XfrmStateUpdate will update an xfrm state to the system. +// Equivalent to: `ip xfrm state update $state` +func XfrmStateUpdate(state *XfrmState) error { + return pkgHandle.XfrmStateUpdate(state) +} + +// XfrmStateUpdate will update an xfrm state to the system. +// Equivalent to: `ip xfrm state update $state` +func (h *Handle) XfrmStateUpdate(state *XfrmState) error { + return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_UPDSA) +} + +func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { + + // A state with spi 0 can't be deleted so don't allow it to be set + if state.Spi == 0 { + return fmt.Errorf("Spi must be set when adding xfrm state") + } + req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + + msg := xfrmUsersaInfoFromXfrmState(state) + + if state.ESN { + if state.ReplayWindow == 0 { + return fmt.Errorf("ESN flag set without ReplayWindow") + } + msg.Flags |= nl.XFRM_STATE_ESN + msg.ReplayWindow = 0 + } + + limitsToLft(state.Limits, &msg.Lft) + req.AddData(msg) + + if state.Auth != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth)) + req.AddData(out) + } + if state.Crypt != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt)) + req.AddData(out) + } + if state.Aead != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_AEAD, writeStateAlgoAead(state.Aead)) + req.AddData(out) + } + if state.Encap != nil { + encapData := make([]byte, nl.SizeofXfrmEncapTmpl) + encap := nl.DeserializeXfrmEncapTmpl(encapData) + encap.EncapType = uint16(state.Encap.Type) + encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort)) + encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort)) + encap.EncapOa.FromIP(state.Encap.OriginalAddress) + out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData) + req.AddData(out) + } + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + if state.ESN { + out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) + req.AddData(out) + } + if state.OutputMark != nil { + out := nl.NewRtAttr(nl.XFRMA_SET_MARK, nl.Uint32Attr(state.OutputMark.Value)) + req.AddData(out) + if state.OutputMark.Mask != 0 { + out = nl.NewRtAttr(nl.XFRMA_SET_MARK_MASK, nl.Uint32Attr(state.OutputMark.Mask)) + req.AddData(out) + } + } + if state.OSeqMayWrap || state.DontEncapDSCP { + var flags uint32 + if state.DontEncapDSCP { + flags |= nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP + } + if state.OSeqMayWrap { + flags |= nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP + } + out := nl.NewRtAttr(nl.XFRMA_SA_EXTRA_FLAGS, nl.Uint32Attr(flags)) + req.AddData(out) + } + if state.Replay != nil { + out := nl.NewRtAttr(nl.XFRMA_REPLAY_VAL, writeReplay(state.Replay)) + req.AddData(out) + } + + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } + + _, err := req.Execute(unix.NETLINK_XFRM, 0) + return err +} + +func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, + unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + + msg := &nl.XfrmUserSpiInfo{} + msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) + // 1-255 is reserved by IANA for future use + msg.Min = 0x100 + msg.Max = 0xffffffff + req.AddData(msg) + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + + msgs, err := req.Execute(unix.NETLINK_XFRM, 0) + if err != nil { + return nil, err + } + + return parseXfrmState(msgs[0], FAMILY_ALL) +} + +// XfrmStateDel will delete an xfrm state from the system. Note that +// the Algos are ignored when matching the state to delete. +// Equivalent to: `ip xfrm state del $state` +func XfrmStateDel(state *XfrmState) error { + return pkgHandle.XfrmStateDel(state) +} + +// XfrmStateDel will delete an xfrm state from the system. Note that +// the Algos are ignored when matching the state to delete. +// Equivalent to: `ip xfrm state del $state` +func (h *Handle) XfrmStateDel(state *XfrmState) error { + _, err := h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_DELSA) + return err +} + +// XfrmStateList gets a list of xfrm states in the system. +// Equivalent to: `ip [-4|-6] xfrm state show`. +// The list can be filtered by ip family. +func XfrmStateList(family int) ([]XfrmState, error) { + return pkgHandle.XfrmStateList(family) +} + +// XfrmStateList gets a list of xfrm states in the system. +// Equivalent to: `ip xfrm state show`. +// The list can be filtered by ip family. +func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) + + msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + if err != nil { + return nil, err + } + + var res []XfrmState + for _, m := range msgs { + if state, err := parseXfrmState(m, family); err == nil { + res = append(res, *state) + } else if err == familyError { + continue + } else { + return nil, err + } + } + return res, nil +} + +// XfrmStateGet gets the xfrm state described by the ID, if found. +// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`. +// Only the fields which constitue the SA ID must be filled in: +// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ] +// mark is optional +func XfrmStateGet(state *XfrmState) (*XfrmState, error) { + return pkgHandle.XfrmStateGet(state) +} + +// XfrmStateGet gets the xfrm state described by the ID, if found. +// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`. +// Only the fields which constitue the SA ID must be filled in: +// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ] +// mark is optional +func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { + return h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_GETSA) +} + +func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { + req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + + msg := &nl.XfrmUsersaId{} + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Daddr.FromIP(state.Dst) + msg.Proto = uint8(state.Proto) + msg.Spi = nl.Swap32(uint32(state.Spi)) + req.AddData(msg) + + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + if state.Src != nil { + out := nl.NewRtAttr(nl.XFRMA_SRCADDR, state.Src.To16()) + req.AddData(out) + } + + if state.Ifid != 0 { + ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) + req.AddData(ifId) + } + + resType := nl.XFRM_MSG_NEWSA + if nlProto == nl.XFRM_MSG_DELSA { + resType = 0 + } + + msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + if err != nil { + return nil, err + } + + if nlProto == nl.XFRM_MSG_DELSA { + return nil, nil + } + + s, err := parseXfrmState(msgs[0], FAMILY_ALL) + if err != nil { + return nil, err + } + + return s, nil +} + +var familyError = fmt.Errorf("family error") + +func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { + var state XfrmState + state.Dst = msg.Id.Daddr.ToIP() + state.Src = msg.Saddr.ToIP() + state.Proto = Proto(msg.Id.Proto) + state.Mode = Mode(msg.Mode) + state.Spi = int(nl.Swap32(msg.Id.Spi)) + state.Reqid = int(msg.Reqid) + state.ReplayWindow = int(msg.ReplayWindow) + lftToLimits(&msg.Lft, &state.Limits) + curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) + state.Selector = &XfrmPolicy{ + Dst: msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, msg.Sel.Family), + Src: msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, msg.Sel.Family), + Proto: Proto(msg.Sel.Proto), + DstPort: int(nl.Swap16(msg.Sel.Dport)), + SrcPort: int(nl.Swap16(msg.Sel.Sport)), + Ifindex: int(msg.Sel.Ifindex), + } + + return &state +} + +func parseXfrmState(m []byte, family int) (*XfrmState, error) { + msg := nl.DeserializeXfrmUsersaInfo(m) + // This is mainly for the state dump + if family != FAMILY_ALL && family != int(msg.Family) { + return nil, familyError + } + state := xfrmStateFromXfrmUsersaInfo(msg) + attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT: + var resAlgo *XfrmStateAlgo + if attr.Attr.Type == nl.XFRMA_ALG_AUTH { + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + resAlgo = state.Auth + } else { + state.Crypt = new(XfrmStateAlgo) + resAlgo = state.Crypt + } + algo := nl.DeserializeXfrmAlgo(attr.Value[:]) + (*resAlgo).Name = nl.BytesToString(algo.AlgName[:]) + (*resAlgo).Key = algo.AlgKey + case nl.XFRMA_ALG_AUTH_TRUNC: + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:]) + state.Auth.Name = nl.BytesToString(algo.AlgName[:]) + state.Auth.Key = algo.AlgKey + state.Auth.TruncateLen = int(algo.AlgTruncLen) + case nl.XFRMA_ALG_AEAD: + state.Aead = new(XfrmStateAlgo) + algo := nl.DeserializeXfrmAlgoAEAD(attr.Value[:]) + state.Aead.Name = nl.BytesToString(algo.AlgName[:]) + state.Aead.Key = algo.AlgKey + state.Aead.ICVLen = int(algo.AlgICVLen) + case nl.XFRMA_ENCAP: + encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:]) + state.Encap = new(XfrmStateEncap) + state.Encap.Type = EncapType(encap.EncapType) + state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport)) + state.Encap.DstPort = int(nl.Swap16(encap.EncapDport)) + state.Encap.OriginalAddress = encap.EncapOa.ToIP() + case nl.XFRMA_MARK: + mark := nl.DeserializeXfrmMark(attr.Value[:]) + state.Mark = new(XfrmMark) + state.Mark.Value = mark.Value + state.Mark.Mask = mark.Mask + case nl.XFRMA_SA_EXTRA_FLAGS: + flags := native.Uint32(attr.Value) + if (flags & nl.XFRM_SA_XFLAG_DONT_ENCAP_DSCP) != 0 { + state.DontEncapDSCP = true + } + if (flags & nl.XFRM_SA_XFLAG_OSEQ_MAY_WRAP) != 0 { + state.OSeqMayWrap = true + } + case nl.XFRMA_SET_MARK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Value = native.Uint32(attr.Value) + case nl.XFRMA_SET_MARK_MASK: + if state.OutputMark == nil { + state.OutputMark = new(XfrmMark) + } + state.OutputMark.Mask = native.Uint32(attr.Value) + if state.OutputMark.Mask == 0xffffffff { + state.OutputMark.Mask = 0 + } + case nl.XFRMA_IF_ID: + state.Ifid = int(native.Uint32(attr.Value)) + case nl.XFRMA_REPLAY_VAL: + if state.Replay == nil { + state.Replay = new(XfrmReplayState) + } + replay := nl.DeserializeXfrmReplayState(attr.Value[:]) + state.Replay.OSeq = replay.OSeq + state.Replay.Seq = replay.Seq + state.Replay.BitMap = replay.BitMap + } + } + + return state, nil +} + +// XfrmStateFlush will flush the xfrm state on the system. +// proto = 0 means any transformation protocols +// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` +func XfrmStateFlush(proto Proto) error { + return pkgHandle.XfrmStateFlush(proto) +} + +// XfrmStateFlush will flush the xfrm state on the system. +// proto = 0 means any transformation protocols +// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` +func (h *Handle) XfrmStateFlush(proto Proto) error { + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, unix.NLM_F_ACK) + + req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) + + _, err := req.Execute(unix.NETLINK_XFRM, 0) + return err +} + +func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) { + if lmts.ByteSoft != 0 { + lft.SoftByteLimit = lmts.ByteSoft + } else { + lft.SoftByteLimit = nl.XFRM_INF + } + if lmts.ByteHard != 0 { + lft.HardByteLimit = lmts.ByteHard + } else { + lft.HardByteLimit = nl.XFRM_INF + } + if lmts.PacketSoft != 0 { + lft.SoftPacketLimit = lmts.PacketSoft + } else { + lft.SoftPacketLimit = nl.XFRM_INF + } + if lmts.PacketHard != 0 { + lft.HardPacketLimit = lmts.PacketHard + } else { + lft.HardPacketLimit = nl.XFRM_INF + } + lft.SoftAddExpiresSeconds = lmts.TimeSoft + lft.HardAddExpiresSeconds = lmts.TimeHard + lft.SoftUseExpiresSeconds = lmts.TimeUseSoft + lft.HardUseExpiresSeconds = lmts.TimeUseHard +} + +func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { + *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) +} + +func curToStats(cur *nl.XfrmLifetimeCur, wstats *nl.XfrmStats, stats *XfrmStateStats) { + stats.Bytes = cur.Bytes + stats.Packets = cur.Packets + stats.AddTime = cur.AddTime + stats.UseTime = cur.UseTime + stats.ReplayWindow = wstats.ReplayWindow + stats.Replay = wstats.Replay + stats.Failed = wstats.IntegrityFailed +} + +func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { + msg := &nl.XfrmUsersaInfo{} + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Id.Daddr.FromIP(state.Dst) + msg.Saddr.FromIP(state.Src) + msg.Id.Proto = uint8(state.Proto) + msg.Mode = uint8(state.Mode) + msg.Id.Spi = nl.Swap32(uint32(state.Spi)) + msg.Reqid = uint32(state.Reqid) + msg.ReplayWindow = uint8(state.ReplayWindow) + msg.Sel = nl.XfrmSelector{} + if state.Selector != nil { + selFromPolicy(&msg.Sel, state.Selector) + } + return msg +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go new file mode 100644 index 000000000..12fdd26d7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_unspecified.go @@ -0,0 +1,7 @@ +//go:build !linux +// +build !linux + +package netlink + +type XfrmPolicy struct{} +type XfrmState struct{} diff --git a/vendor/github.com/vishvananda/netns/.golangci.yml b/vendor/github.com/vishvananda/netns/.golangci.yml new file mode 100644 index 000000000..600bef78e --- /dev/null +++ b/vendor/github.com/vishvananda/netns/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/vendor/github.com/vishvananda/netns/LICENSE b/vendor/github.com/vishvananda/netns/LICENSE new file mode 100644 index 000000000..9f64db858 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vishvananda/netns/README.md b/vendor/github.com/vishvananda/netns/README.md new file mode 100644 index 000000000..bdfedbe81 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/README.md @@ -0,0 +1,51 @@ +# netns - network namespaces in go # + +The netns package provides an ultra-simple interface for handling +network namespaces in go. Changing namespaces requires elevated +privileges, so in most cases this code needs to be run as root. + +## Local Build and Test ## + +You can use go get command: + + go get github.com/vishvananda/netns + +Testing (requires root): + + sudo -E go test github.com/vishvananda/netns + +## Example ## + +```go +package main + +import ( + "fmt" + "net" + "runtime" + + "github.com/vishvananda/netns" +) + +func main() { + // Lock the OS Thread so we don't accidentally switch namespaces + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Save the current network namespace + origns, _ := netns.Get() + defer origns.Close() + + // Create a new network namespace + newns, _ := netns.New() + defer newns.Close() + + // Do something with the network namespace + ifaces, _ := net.Interfaces() + fmt.Printf("Interfaces: %v\n", ifaces) + + // Switch back to the original namespace + netns.Set(origns) +} + +``` diff --git a/vendor/github.com/vishvananda/netns/doc.go b/vendor/github.com/vishvananda/netns/doc.go new file mode 100644 index 000000000..cd4093a4d --- /dev/null +++ b/vendor/github.com/vishvananda/netns/doc.go @@ -0,0 +1,9 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go new file mode 100644 index 000000000..2ed7c7e2f --- /dev/null +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -0,0 +1,283 @@ +package netns + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// Deprecated: use golang.org/x/sys/unix pkg instead. +const ( + CLONE_NEWUTS = unix.CLONE_NEWUTS /* New utsname group? */ + CLONE_NEWIPC = unix.CLONE_NEWIPC /* New ipcs */ + CLONE_NEWUSER = unix.CLONE_NEWUSER /* New user namespace */ + CLONE_NEWPID = unix.CLONE_NEWPID /* New pid namespace */ + CLONE_NEWNET = unix.CLONE_NEWNET /* New network namespace */ + CLONE_IO = unix.CLONE_IO /* Get io context */ +) + +const bindMountPath = "/run/netns" /* Bind mount path for named netns */ + +// Setns sets namespace using golang.org/x/sys/unix.Setns. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return unix.Setns(int(ns), nstype) +} + +// Set sets the current network namespace to the namespace represented +// by NsHandle. +func Set(ns NsHandle) (err error) { + return unix.Setns(int(ns), unix.CLONE_NEWNET) +} + +// New creates a new network namespace, sets it as current and returns +// a handle to it. +func New() (ns NsHandle, err error) { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { + return -1, err + } + return Get() +} + +// NewNamed creates a new named network namespace, sets it as current, +// and returns a handle to it +func NewNamed(name string) (NsHandle, error) { + if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { + err = os.MkdirAll(bindMountPath, 0755) + if err != nil { + return None(), err + } + } + + newNs, err := New() + if err != nil { + return None(), err + } + + namedPath := path.Join(bindMountPath, name) + + f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) + if err != nil { + newNs.Close() + return None(), err + } + f.Close() + + nsPath := fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid()) + err = unix.Mount(nsPath, namedPath, "bind", unix.MS_BIND, "") + if err != nil { + newNs.Close() + return None(), err + } + + return newNs, nil +} + +// DeleteNamed deletes a named network namespace +func DeleteNamed(name string) error { + namedPath := path.Join(bindMountPath, name) + + err := unix.Unmount(namedPath, unix.MNT_DETACH) + if err != nil { + return err + } + + return os.Remove(namedPath) +} + +// Get gets a handle to the current threads network namespace. +func Get() (NsHandle, error) { + return GetFromThread(os.Getpid(), unix.Gettid()) +} + +// GetFromPath gets a handle to a network namespace +// identified by the path +func GetFromPath(path string) (NsHandle, error) { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return -1, err + } + return NsHandle(fd), nil +} + +// GetFromName gets a handle to a named network namespace such as one +// created by `ip netns add`. +func GetFromName(name string) (NsHandle, error) { + return GetFromPath(filepath.Join(bindMountPath, name)) +} + +// GetFromPid gets a handle to the network namespace of a given pid. +func GetFromPid(pid int) (NsHandle, error) { + return GetFromPath(fmt.Sprintf("/proc/%d/ns/net", pid)) +} + +// GetFromThread gets a handle to the network namespace of a given pid and tid. +func GetFromThread(pid, tid int) (NsHandle, error) { + return GetFromPath(fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)) +} + +// GetFromDocker gets a handle to the network namespace of a docker container. +// Id is prefixed matched against the running docker containers, so a short +// identifier can be used as long as it isn't ambiguous. +func GetFromDocker(id string) (NsHandle, error) { + pid, err := getPidForContainer(id) + if err != nil { + return -1, err + } + return GetFromPid(pid) +} + +// borrowed from docker/utils/utils.go +func findCgroupMountpoint(cgroupType string) (int, string, error) { + output, err := os.ReadFile("/proc/mounts") + if err != nil { + return -1, "", err + } + + // /proc/mounts has 6 fields per line, one mount per line, e.g. + // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 + for _, line := range strings.Split(string(output), "\n") { + parts := strings.Split(line, " ") + if len(parts) == 6 { + switch parts[2] { + case "cgroup2": + return 2, parts[1], nil + case "cgroup": + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return 1, parts[1], nil + } + } + } + } + } + + return -1, "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) +} + +// Returns the relative path to the cgroup docker is running in. +// borrowed from docker/utils/utils.go +// modified to get the docker pid instead of using /proc/self +func getDockerCgroup(cgroupVer int, cgroupType string) (string, error) { + dockerpid, err := os.ReadFile("/var/run/docker.pid") + if err != nil { + return "", err + } + result := strings.Split(string(dockerpid), "\n") + if len(result) == 0 || len(result[0]) == 0 { + return "", fmt.Errorf("docker pid not found in /var/run/docker.pid") + } + pid, err := strconv.Atoi(result[0]) + if err != nil { + return "", err + } + output, err := os.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + if err != nil { + return "", err + } + for _, line := range strings.Split(string(output), "\n") { + parts := strings.Split(line, ":") + // any type used by docker should work + if (cgroupVer == 1 && parts[1] == cgroupType) || + (cgroupVer == 2 && parts[1] == "") { + return parts[2], nil + } + } + return "", fmt.Errorf("cgroup '%s' not found in /proc/%d/cgroup", cgroupType, pid) +} + +// Returns the first pid in a container. +// borrowed from docker/utils/utils.go +// modified to only return the first pid +// modified to glob with id +// modified to search for newer docker containers +// modified to look for cgroups v2 +func getPidForContainer(id string) (int, error) { + pid := 0 + + // memory is chosen randomly, any cgroup used by docker works + cgroupType := "memory" + + cgroupVer, cgroupRoot, err := findCgroupMountpoint(cgroupType) + if err != nil { + return pid, err + } + + cgroupDocker, err := getDockerCgroup(cgroupVer, cgroupType) + if err != nil { + return pid, err + } + + id += "*" + + var pidFile string + if cgroupVer == 1 { + pidFile = "tasks" + } else if cgroupVer == 2 { + pidFile = "cgroup.procs" + } else { + return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) + } + + attempts := []string{ + filepath.Join(cgroupRoot, cgroupDocker, id, pidFile), + // With more recent lxc versions use, cgroup will be in lxc/ + filepath.Join(cgroupRoot, cgroupDocker, "lxc", id, pidFile), + // With more recent docker, cgroup will be in docker/ + filepath.Join(cgroupRoot, cgroupDocker, "docker", id, pidFile), + // Even more recent docker versions under systemd use docker-.scope/ + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", pidFile), + // Even more recent docker versions under cgroup/systemd/docker// + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, pidFile), + // Kubernetes with docker and CNI is even more different. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "pod*", id, pidFile), + // Another flavor of containers location in recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, cgroupDocker, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), + // When runs inside of a container with recent kubernetes 1.11+. Works for BestEffort and Burstable QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), + // Same as above but for Guaranteed QoS + filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), + } + + var filename string + for _, attempt := range attempts { + filenames, _ := filepath.Glob(attempt) + if len(filenames) > 1 { + return pid, fmt.Errorf("Ambiguous id supplied: %v", filenames) + } else if len(filenames) == 1 { + filename = filenames[0] + break + } + } + + if filename == "" { + return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) + } + + output, err := os.ReadFile(filename) + if err != nil { + return pid, err + } + + result := strings.Split(string(output), "\n") + if len(result) == 0 || len(result[0]) == 0 { + return pid, fmt.Errorf("No pid found for container") + } + + pid, err = strconv.Atoi(result[0]) + if err != nil { + return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err) + } + + return pid, nil +} diff --git a/vendor/github.com/vishvananda/netns/netns_others.go b/vendor/github.com/vishvananda/netns/netns_others.go new file mode 100644 index 000000000..048983774 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/netns_others.go @@ -0,0 +1,60 @@ +//go:build !linux +// +build !linux + +package netns + +import ( + "errors" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +// Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It +// is not implemented on other platforms. +// +// Deprecated: Use golang.org/x/sys/unix.Setns instead. +func Setns(ns NsHandle, nstype int) (err error) { + return ErrNotImplemented +} + +func Set(ns NsHandle) (err error) { + return ErrNotImplemented +} + +func New() (ns NsHandle, err error) { + return -1, ErrNotImplemented +} + +func NewNamed(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func DeleteNamed(name string) error { + return ErrNotImplemented +} + +func Get() (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromPath(path string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromName(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromPid(pid int) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromThread(pid, tid int) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromDocker(id string) (NsHandle, error) { + return -1, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netns/nshandle_linux.go b/vendor/github.com/vishvananda/netns/nshandle_linux.go new file mode 100644 index 000000000..1baffb66a --- /dev/null +++ b/vendor/github.com/vishvananda/netns/nshandle_linux.go @@ -0,0 +1,73 @@ +package netns + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +// NsHandle is a handle to a network namespace. It can be cast directly +// to an int and used as a file descriptor. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. This is done by comparing the device and inode that the +// file descriptors point to. +func (ns NsHandle) Equal(other NsHandle) bool { + if ns == other { + return true + } + var s1, s2 unix.Stat_t + if err := unix.Fstat(int(ns), &s1); err != nil { + return false + } + if err := unix.Fstat(int(other), &s2); err != nil { + return false + } + return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino) +} + +// String shows the file descriptor number and its dev and inode. +func (ns NsHandle) String() string { + if ns == -1 { + return "NS(none)" + } + var s unix.Stat_t + if err := unix.Fstat(int(ns), &s); err != nil { + return fmt.Sprintf("NS(%d: unknown)", ns) + } + return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino) +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. +func (ns NsHandle) UniqueId() string { + if ns == -1 { + return "NS(none)" + } + var s unix.Stat_t + if err := unix.Fstat(int(ns), &s); err != nil { + return "NS(unknown)" + } + return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino) +} + +// IsOpen returns true if Close() has not been called. +func (ns NsHandle) IsOpen() bool { + return ns != -1 +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is not safe to use an NsHandle after Close() is called. +func (ns *NsHandle) Close() error { + if err := unix.Close(int(*ns)); err != nil { + return err + } + *ns = -1 + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/github.com/vishvananda/netns/nshandle_others.go b/vendor/github.com/vishvananda/netns/nshandle_others.go new file mode 100644 index 000000000..af727bc09 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/nshandle_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package netns + +// NsHandle is a handle to a network namespace. It can only be used on Linux, +// but provides stub methods on other platforms. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. It is only implemented on Linux. +func (ns NsHandle) Equal(_ NsHandle) bool { + return false +} + +// String shows the file descriptor number and its dev and inode. +// It is only implemented on Linux, and returns "NS(none)" on other +// platforms. +func (ns NsHandle) String() string { + return "NS(none)" +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. It is only implemented on Linux, +// and returns "NS(none)" on other platforms. +func (ns NsHandle) UniqueId() string { + return "NS(none)" +} + +// IsOpen returns true if Close() has not been called. It is only implemented +// on Linux and always returns false on other platforms. +func (ns NsHandle) IsOpen() bool { + return false +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is only implemented on Linux. +func (ns *NsHandle) Close() error { + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/github.com/vmware/go-ipfix/LICENSE b/vendor/github.com/vmware/go-ipfix/LICENSE new file mode 100644 index 000000000..020c824af --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/LICENSE @@ -0,0 +1,208 @@ +go-ipfix +Copyright 2020 VMware, Inc. + +The Apache 2.0 license (the "License") set forth below applies to all parts of the go-ipfix project. +You may not use this file except in compliance with the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vmware/go-ipfix/NOTICE b/vendor/github.com/vmware/go-ipfix/NOTICE new file mode 100644 index 000000000..5be597032 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/NOTICE @@ -0,0 +1,6 @@ +go-ipfix +Copyright 2020 VMware, Inc. + +This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. + +This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/ie.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/ie.go new file mode 100644 index 000000000..e18428fbc --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/ie.go @@ -0,0 +1,591 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package entities + +import ( + "encoding/binary" + "fmt" + "math" + "net" +) + +type IEDataType uint8 + +const ( + OctetArray IEDataType = iota + Unsigned8 + Unsigned16 + Unsigned32 + Unsigned64 + Signed8 + Signed16 + Signed32 + Signed64 + Float32 + Float64 + Boolean + MacAddress + String + DateTimeSeconds + DateTimeMilliseconds + DateTimeMicroseconds + DateTimeNanoseconds + Ipv4Address + Ipv6Address + BasicList + SubTemplateList + SubTemplateMultiList + InvalidDataType = 255 +) + +const VariableLength uint16 = 65535 + +var InfoElementLength = map[IEDataType]uint16{ + OctetArray: VariableLength, + Unsigned8: 1, + Unsigned16: 2, + Unsigned32: 4, + Unsigned64: 8, + Signed8: 1, + Signed16: 2, + Signed32: 4, + Signed64: 8, + Float32: 4, + Float64: 8, + Boolean: 1, + MacAddress: 6, + String: VariableLength, + DateTimeSeconds: 4, + DateTimeMilliseconds: 8, + DateTimeMicroseconds: 8, + DateTimeNanoseconds: 8, + Ipv4Address: 4, + Ipv6Address: 16, + BasicList: VariableLength, + SubTemplateList: VariableLength, + SubTemplateMultiList: VariableLength, + InvalidDataType: 0, +} + +// InfoElement (IE) follows the specification in Section 2.1 of RFC7012 +type InfoElement struct { + // Name of the IE + Name string + // Identifier for IE; follows Section 4.3 of RFC7013 + ElementId uint16 + // dataType follows the specification in RFC7012(section 3.1)/RFC5610(section 3.1) + DataType IEDataType + // Enterprise number or 0 (0 for IANA registry) + EnterpriseId uint32 + // Length of IE + Len uint16 +} + +func NewInfoElement(name string, ieID uint16, ieType IEDataType, entID uint32, len uint16) *InfoElement { + return &InfoElement{ + Name: name, + ElementId: ieID, + DataType: ieType, + EnterpriseId: entID, + Len: len, + } +} + +func IENameToType(name string) IEDataType { + switch name { + case "octetArray": + return OctetArray + case "unsigned8": + return Unsigned8 + case "unsigned16": + return Unsigned16 + case "unsigned32": + return Unsigned32 + case "unsigned64": + return Unsigned64 + case "signed8": + return Signed8 + case "signed16": + return Signed16 + case "signed32": + return Signed32 + case "signed64": + return Signed64 + case "float32": + return Float32 + case "float64": + return Float64 + case "boolean": + return Boolean + case "macAddress": + return MacAddress + case "string": + return String + case "dateTimeSeconds": + return DateTimeSeconds + case "dateTimeMilliseconds": + return DateTimeMilliseconds + case "dateTimeMicroseconds": + return DateTimeMicroseconds + case "dateTimeNanoseconds": + return DateTimeNanoseconds + case "ipv4Address": + return Ipv4Address + case "ipv6Address": + return Ipv6Address + case "basicList": + return BasicList + case "subTemplateList": + return SubTemplateList + case "subTemplateMultiList": + return SubTemplateMultiList + } + return InvalidDataType +} + +// decodeToIEDataType is to decode to specific type. This is only used for testing. +func decodeToIEDataType(dataType IEDataType, val interface{}) (interface{}, error) { + value, ok := val.([]byte) + if !ok { + return nil, fmt.Errorf("error when converting value to []bytes for decoding") + } + switch dataType { + case Unsigned8: + return value[0], nil + case Unsigned16: + return binary.BigEndian.Uint16(value), nil + case Unsigned32: + return binary.BigEndian.Uint32(value), nil + case Unsigned64: + return binary.BigEndian.Uint64(value), nil + case Signed8: + return int8(value[0]), nil + case Signed16: + return int16(binary.BigEndian.Uint16(value)), nil + case Signed32: + return int32(binary.BigEndian.Uint32(value)), nil + case Signed64: + return int64(binary.BigEndian.Uint64(value)), nil + case Float32: + return math.Float32frombits(binary.BigEndian.Uint32(value)), nil + case Float64: + return math.Float64frombits(binary.BigEndian.Uint64(value)), nil + case Boolean: + if int8(value[0]) == 1 { + return true, nil + } else { + return false, nil + } + case DateTimeSeconds: + v := binary.BigEndian.Uint32(value) + return v, nil + case DateTimeMilliseconds: + v := binary.BigEndian.Uint64(value) + return v, nil + case DateTimeMicroseconds, DateTimeNanoseconds: + return nil, fmt.Errorf("API does not support micro and nano seconds types yet") + case MacAddress: + return net.HardwareAddr(value), nil + case Ipv4Address, Ipv6Address: + return net.IP(value), nil + case String: + return string(value), nil + default: + return nil, fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") + } +} + +// DecodeAndCreateInfoElementWithValue takes in the info element and its value in bytes, and +// returns appropriate InfoElementWithValue. +func DecodeAndCreateInfoElementWithValue(element *InfoElement, value []byte) (InfoElementWithValue, error) { + switch element.DataType { + case Unsigned8: + var val uint8 + if value == nil { + val = 0 + } else { + val = value[0] + } + return NewUnsigned8InfoElement(element, val), nil + case Unsigned16: + var val uint16 + if value == nil { + val = 0 + } else { + val = binary.BigEndian.Uint16(value) + } + return NewUnsigned16InfoElement(element, val), nil + case Unsigned32: + var val uint32 + if value == nil { + val = 0 + } else { + val = binary.BigEndian.Uint32(value) + } + return NewUnsigned32InfoElement(element, val), nil + case Unsigned64: + var val uint64 + if value == nil { + val = 0 + } else { + val = binary.BigEndian.Uint64(value) + } + return NewUnsigned64InfoElement(element, val), nil + case Signed8: + var val int8 + if value == nil { + val = 0 + } else { + val = int8(value[0]) + } + return NewSigned8InfoElement(element, val), nil + case Signed16: + var val int16 + if value == nil { + val = 0 + } else { + val = int16(binary.BigEndian.Uint16(value)) + } + return NewSigned16InfoElement(element, val), nil + case Signed32: + var val int32 + if value == nil { + val = 0 + } else { + val = int32(binary.BigEndian.Uint32(value)) + } + return NewSigned32InfoElement(element, val), nil + case Signed64: + var val int64 + if value == nil { + val = 0 + } + val = int64(binary.BigEndian.Uint64(value)) + return NewSigned64InfoElement(element, val), nil + case Float32: + var val float32 + if value == nil { + val = 0 + } else { + val = math.Float32frombits(binary.BigEndian.Uint32(value)) + } + return NewFloat32InfoElement(element, val), nil + case Float64: + var val float64 + if value == nil { + val = 0 + } else { + val = math.Float64frombits(binary.BigEndian.Uint64(value)) + } + return NewFloat64InfoElement(element, val), nil + case Boolean: + if value == nil { + return NewBoolInfoElement(element, false), nil + } + if int8(value[0]) == 1 { + return NewBoolInfoElement(element, true), nil + } else { + return NewBoolInfoElement(element, false), nil + } + case DateTimeSeconds: + var val uint32 + if value == nil { + val = 0 + } else { + val = binary.BigEndian.Uint32(value) + } + return NewDateTimeSecondsInfoElement(element, val), nil + case DateTimeMilliseconds: + var val uint64 + if value == nil { + val = 0 + } else { + val = binary.BigEndian.Uint64(value) + } + return NewDateTimeMillisecondsInfoElement(element, val), nil + case DateTimeMicroseconds, DateTimeNanoseconds: + return nil, fmt.Errorf("API does not support micro and nano seconds types yet") + case MacAddress: + if value == nil { + return NewMacAddressInfoElement(element, nil), nil + } else { + // make sure that we make a copy of the slice, instead of using it as is + // otherwise the underlying array for value may not be GC'd until the IE is GC'd + // the underlying array may be much larger than the value slice itself + addr := append([]byte{}, value...) + return NewMacAddressInfoElement(element, addr), nil + } + case Ipv4Address, Ipv6Address: + if value == nil { + return NewIPAddressInfoElement(element, nil), nil + } else { + // make sure that we make a copy of the slice, instead of using it as is + // otherwise the underlying array for value may not be GC'd until the IE is GC'd + // the underlying array may be much larger than the value slice itself + addr := append([]byte{}, value...) + return NewIPAddressInfoElement(element, addr), nil + } + case String: + var val string + if value == nil { + val = "" + } else { + val = string(value) + } + return NewStringInfoElement(element, val), nil + default: + return nil, fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") + } +} + +// EncodeToIEDataType is to encode data to specific type to the buff. This is only +// used for testing. +func EncodeToIEDataType(dataType IEDataType, val interface{}) ([]byte, error) { + switch dataType { + case Unsigned8: + v, ok := val.(uint8) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint8", val) + } + return []byte{v}, nil + case Unsigned16: + v, ok := val.(uint16) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint16", val) + } + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, v) + return b, nil + case Unsigned32: + v, ok := val.(uint32) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint32", val) + } + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, v) + return b, nil + case Unsigned64: + v, ok := val.(uint64) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint64", val) + } + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b, nil + case Signed8: + v, ok := val.(int8) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type int8", val) + } + return []byte{byte(v)}, nil + case Signed16: + v, ok := val.(int16) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type int16", val) + } + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, uint16(v)) + return b, nil + case Signed32: + v, ok := val.(int32) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type int32", val) + } + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + return b, nil + case Signed64: + v, ok := val.(int64) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type int64", val) + } + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b, nil + case Float32: + v, ok := val.(float32) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type float32", val) + } + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, math.Float32bits(v)) + return b, nil + case Float64: + v, ok := val.(float64) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type float64", val) + } + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, math.Float64bits(v)) + return b, nil + case Boolean: + v, ok := val.(bool) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type bool", val) + } + b := make([]byte, 1) + // Following boolean spec from RFC7011 + if v { + b[0] = byte(int8(1)) + } else { + b[0] = byte(int8(2)) + } + return b, nil + case DateTimeSeconds: + v, ok := val.(uint32) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint32", val) + } + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, v) + return b, nil + case DateTimeMilliseconds: + v, ok := val.(uint64) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type uint64", val) + } + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + return b, nil + // Currently only supporting seconds and milliseconds + case DateTimeMicroseconds, DateTimeNanoseconds: + // TODO: RFC 7011 has extra spec for these data types. Need to follow that + return nil, fmt.Errorf("API does not support micro and nano seconds types yet") + case MacAddress: + // Expects net.Hardware type + v, ok := val.(net.HardwareAddr) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type net.HardwareAddr for this element", val) + } + return v, nil + case Ipv4Address: + // Expects net.IP type + v, ok := val.(net.IP) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type net.IP for this element", val) + } + if ipv4Add := v.To4(); ipv4Add != nil { + return ipv4Add, nil + } else { + return nil, fmt.Errorf("provided IP %v does not belong to IPv4 address family", v) + } + case Ipv6Address: + // Expects net.IP type + v, ok := val.(net.IP) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type net.IP for this element", val) + } + if ipv6Add := v.To16(); ipv6Add != nil { + return ipv6Add, nil + } else { + return nil, fmt.Errorf("provided IPv6 address %v is not of correct length", v) + } + case String: + v, ok := val.(string) + if !ok { + return nil, fmt.Errorf("val argument %v is not of type string for this element", val) + } + var encodedBytes []byte + if len(v) < 255 { + encodedBytes = make([]byte, len(v)+1) + encodedBytes[0] = uint8(len(v)) + copy(encodedBytes[1:], v) + } else if len(v) <= math.MaxUint16 { + encodedBytes = make([]byte, len(v)+3) + encodedBytes[0] = byte(255) + binary.BigEndian.PutUint16(encodedBytes[1:3], uint16(len(v))) + copy(encodedBytes[3:], v) + } else { + return nil, fmt.Errorf("provided String value is too long and cannot be encoded: len=%d, maxlen=%d", len(v), math.MaxUint16) + } + return encodedBytes, nil + } + return nil, fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") +} + +// encodeInfoElementValueToBuff is to encode data to specific type to the buff +func encodeInfoElementValueToBuff(element InfoElementWithValue, buffer []byte, index int) error { + if index+element.GetLength() > len(buffer) { + return fmt.Errorf("buffer size is not enough for encoding") + } + switch element.GetDataType() { + case Unsigned8: + copy(buffer[index:index+1], []byte{element.GetUnsigned8Value()}) + case Unsigned16: + binary.BigEndian.PutUint16(buffer[index:], element.GetUnsigned16Value()) + case Unsigned32: + binary.BigEndian.PutUint32(buffer[index:], element.GetUnsigned32Value()) + case Unsigned64: + binary.BigEndian.PutUint64(buffer[index:], element.GetUnsigned64Value()) + case Signed8: + copy(buffer[index:index+1], []byte{byte(element.GetSigned8Value())}) + case Signed16: + binary.BigEndian.PutUint16(buffer[index:], uint16(element.GetSigned16Value())) + case Signed32: + binary.BigEndian.PutUint32(buffer[index:], uint32(element.GetSigned32Value())) + case Signed64: + binary.BigEndian.PutUint64(buffer[index:], uint64(element.GetSigned64Value())) + case Float32: + binary.BigEndian.PutUint32(buffer[index:], math.Float32bits(element.GetFloat32Value())) + case Float64: + binary.BigEndian.PutUint64(buffer[index:], math.Float64bits(element.GetFloat64Value())) + case Boolean: + // Following boolean spec from RFC7011 + indicator := byte(int8(1)) + if !element.GetBooleanValue() { + indicator = byte(int8(2)) + } + copy(buffer[index:index+1], []byte{indicator}) + case DateTimeSeconds: + binary.BigEndian.PutUint32(buffer[index:], element.GetUnsigned32Value()) + case DateTimeMilliseconds: + binary.BigEndian.PutUint64(buffer[index:], element.GetUnsigned64Value()) + // Currently only supporting seconds and milliseconds + case DateTimeMicroseconds, DateTimeNanoseconds: + // TODO: RFC 7011 has extra spec for these data types. Need to follow that + return fmt.Errorf("API does not support micro and nano seconds types yet") + case MacAddress: + copy(buffer[index:], element.GetMacAddressValue()) + case Ipv4Address: + if ipv4Add := element.GetIPAddressValue().To4(); ipv4Add != nil { + copy(buffer[index:], ipv4Add) + } else { + return fmt.Errorf("provided IP %v does not belong to IPv4 address family", element.GetIPAddressValue()) + } + case Ipv6Address: + if ipv6Add := element.GetIPAddressValue().To16(); ipv6Add != nil { + copy(buffer[index:], ipv6Add) + } else { + return fmt.Errorf("provided IPv6 address %v is not of correct length", element.GetIPAddressValue()) + } + case String: + v := element.GetStringValue() + if len(v) < 255 { + buffer[index] = uint8(len(v)) + // See https://pkg.go.dev/builtin#copy + // As a special case, it also will copy bytes from a string to a slice of bytes. + copy(buffer[index+1:], v) + } else if len(v) <= math.MaxUint16 { + buffer[index] = byte(255) // marker byte for long strings + binary.BigEndian.PutUint16(buffer[index+1:index+3], uint16(len(v))) + copy(buffer[index+3:], v) + } else { + return fmt.Errorf("provided String value is too long and cannot be encoded: len=%d, maxlen=%d", len(v), math.MaxUint16) + } + default: + return fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") + } + return nil +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/ie_value.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/ie_value.go new file mode 100644 index 000000000..fef66b03c --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/ie_value.go @@ -0,0 +1,653 @@ +package entities + +import ( + "net" +) + +type InfoElementWithValue interface { + GetName() string + GetDataType() IEDataType + // GetInfoElement retrieves the info element. This is called after AddInfoElement. + // TODO: Handle error to make it more robust if it is called prior to AddInfoElement. + GetInfoElement() *InfoElement + AddInfoElement(infoElement *InfoElement) + GetUnsigned8Value() uint8 + GetUnsigned16Value() uint16 + GetUnsigned32Value() uint32 + GetUnsigned64Value() uint64 + GetSigned8Value() int8 + GetSigned16Value() int16 + GetSigned32Value() int32 + GetSigned64Value() int64 + GetFloat32Value() float32 + GetFloat64Value() float64 + GetBooleanValue() bool + GetMacAddressValue() net.HardwareAddr + GetStringValue() string + GetIPAddressValue() net.IP + SetUnsigned8Value(val uint8) + SetUnsigned16Value(val uint16) + SetUnsigned32Value(val uint32) + SetUnsigned64Value(val uint64) + SetSigned8Value(val int8) + SetSigned16Value(val int16) + SetSigned32Value(val int32) + SetSigned64Value(val int64) + SetFloat32Value(val float32) + SetFloat64Value(val float64) + SetBooleanValue(val bool) + SetMacAddressValue(val net.HardwareAddr) + SetStringValue(val string) + SetIPAddressValue(val net.IP) + IsValueEmpty() bool + GetLength() int + ResetValue() +} + +type baseInfoElement struct { + element *InfoElement +} + +func (b *baseInfoElement) GetName() string { + return b.element.Name +} + +func (b *baseInfoElement) GetDataType() IEDataType { + return b.element.DataType +} + +func (b *baseInfoElement) GetInfoElement() *InfoElement { + return b.element +} + +func (b *baseInfoElement) AddInfoElement(infoElement *InfoElement) { + b.element = infoElement +} + +func (b *baseInfoElement) GetUnsigned8Value() uint8 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetUnsigned16Value() uint16 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetUnsigned32Value() uint32 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetUnsigned64Value() uint64 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetSigned8Value() int8 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetSigned16Value() int16 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetSigned32Value() int32 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetSigned64Value() int64 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetFloat32Value() float32 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetFloat64Value() float64 { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetBooleanValue() bool { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetMacAddressValue() net.HardwareAddr { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetStringValue() string { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) GetIPAddressValue() net.IP { + panic("accessing value of wrong data type") +} + +func (b *baseInfoElement) SetUnsigned8Value(val uint8) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetUnsigned16Value(val uint16) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetUnsigned32Value(val uint32) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetUnsigned64Value(val uint64) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetSigned8Value(val int8) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetSigned16Value(val int16) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetSigned32Value(val int32) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetSigned64Value(val int64) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetFloat32Value(val float32) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetFloat64Value(val float64) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetBooleanValue(val bool) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetMacAddressValue(val net.HardwareAddr) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetStringValue(val string) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) SetIPAddressValue(val net.IP) { + panic("setting value with wrong data type") +} + +func (b *baseInfoElement) GetLength() int { + return int(b.element.Len) +} + +type Unsigned8InfoElement struct { + value uint8 + baseInfoElement +} + +func NewUnsigned8InfoElement(element *InfoElement, val uint8) *Unsigned8InfoElement { + infoElem := &Unsigned8InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (u8 *Unsigned8InfoElement) GetUnsigned8Value() uint8 { + return u8.value +} + +func (u8 *Unsigned8InfoElement) SetUnsigned8Value(val uint8) { + u8.value = val +} + +func (u8 *Unsigned8InfoElement) IsValueEmpty() bool { + return u8.value == 0 +} + +func (u8 *Unsigned8InfoElement) ResetValue() { + u8.value = 0 +} + +type Unsigned16InfoElement struct { + value uint16 + baseInfoElement +} + +func NewUnsigned16InfoElement(element *InfoElement, val uint16) *Unsigned16InfoElement { + infoElem := &Unsigned16InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (u16 *Unsigned16InfoElement) GetUnsigned16Value() uint16 { + return u16.value +} + +func (u16 *Unsigned16InfoElement) SetUnsigned16Value(val uint16) { + u16.value = val +} + +func (u16 *Unsigned16InfoElement) IsValueEmpty() bool { + return u16.value == 0 +} + +func (u16 *Unsigned16InfoElement) ResetValue() { + u16.value = 0 +} + +type Unsigned32InfoElement struct { + value uint32 + baseInfoElement +} + +func NewUnsigned32InfoElement(element *InfoElement, val uint32) *Unsigned32InfoElement { + infoElem := &Unsigned32InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (u32 *Unsigned32InfoElement) GetUnsigned32Value() uint32 { + return u32.value +} + +func (u32 *Unsigned32InfoElement) SetUnsigned32Value(val uint32) { + u32.value = val +} + +func (u32 *Unsigned32InfoElement) IsValueEmpty() bool { + return u32.value == 0 +} + +func (u32 *Unsigned32InfoElement) ResetValue() { + u32.value = 0 +} + +type Unsigned64InfoElement struct { + value uint64 + baseInfoElement +} + +func NewUnsigned64InfoElement(element *InfoElement, val uint64) *Unsigned64InfoElement { + infoElem := &Unsigned64InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (u64 *Unsigned64InfoElement) GetUnsigned64Value() uint64 { + return u64.value +} + +func (u64 *Unsigned64InfoElement) SetUnsigned64Value(val uint64) { + u64.value = val +} + +func (u64 *Unsigned64InfoElement) IsValueEmpty() bool { + return u64.value == 0 +} + +func (u64 *Unsigned64InfoElement) ResetValue() { + u64.value = 0 +} + +type Signed8InfoElement struct { + value int8 + baseInfoElement +} + +func NewSigned8InfoElement(element *InfoElement, val int8) *Signed8InfoElement { + infoElem := &Signed8InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (i8 *Signed8InfoElement) GetSigned8Value() int8 { + return i8.value +} + +func (i8 *Signed8InfoElement) SetSigned8Value(val int8) { + i8.value = val +} + +func (i8 *Signed8InfoElement) IsValueEmpty() bool { + return i8.value == 0 +} + +func (i8 *Signed8InfoElement) ResetValue() { + i8.value = 0 +} + +type Signed16InfoElement struct { + value int16 + baseInfoElement +} + +func NewSigned16InfoElement(element *InfoElement, val int16) *Signed16InfoElement { + infoElem := &Signed16InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (i16 *Signed16InfoElement) GetSigned16Value() int16 { + return i16.value +} + +func (i16 *Signed16InfoElement) SetSigned16Value(val int16) { + i16.value = val +} + +func (i16 *Signed16InfoElement) IsValueEmpty() bool { + return i16.value == 0 +} + +func (i16 *Signed16InfoElement) ResetValue() { + i16.value = 0 +} + +type Signed32InfoElement struct { + value int32 + baseInfoElement +} + +func NewSigned32InfoElement(element *InfoElement, val int32) *Signed32InfoElement { + infoElem := &Signed32InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (i32 *Signed32InfoElement) GetSigned32Value() int32 { + return i32.value +} + +func (i32 *Signed32InfoElement) SetSigned32Value(val int32) { + i32.value = val +} + +func (i32 *Signed32InfoElement) IsValueEmpty() bool { + return i32.value == 0 +} + +func (i32 *Signed32InfoElement) ResetValue() { + i32.value = 0 +} + +type Signed64InfoElement struct { + value int64 + baseInfoElement +} + +func NewSigned64InfoElement(element *InfoElement, val int64) *Signed64InfoElement { + infoElem := &Signed64InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (i64 *Signed64InfoElement) GetSigned64Value() int64 { + return i64.value +} + +func (i64 *Signed64InfoElement) SetSigned64Value(val int64) { + i64.value = val +} + +func (i64 *Signed64InfoElement) IsValueEmpty() bool { + return i64.value == 0 +} + +func (i64 *Signed64InfoElement) ResetValue() { + i64.value = 0 +} + +type Float32InfoElement struct { + value float32 + baseInfoElement +} + +func NewFloat32InfoElement(element *InfoElement, val float32) *Float32InfoElement { + infoElem := &Float32InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (f32 *Float32InfoElement) GetFloat32Value() float32 { + return f32.value +} + +func (f32 *Float32InfoElement) SetFloat32Value(val float32) { + f32.value = val +} + +func (f32 *Float32InfoElement) IsValueEmpty() bool { + return f32.value == 0 +} + +func (f32 *Float32InfoElement) ResetValue() { + f32.value = 0 +} + +type Float64InfoElement struct { + value float64 + baseInfoElement +} + +func NewFloat64InfoElement(element *InfoElement, val float64) *Float64InfoElement { + infoElem := &Float64InfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (f64 *Float64InfoElement) GetFloat64Value() float64 { + return f64.value +} + +func (f64 *Float64InfoElement) SetFloat64Value(val float64) { + f64.value = val +} + +func (f64 *Float64InfoElement) IsValueEmpty() bool { + return f64.value == 0 +} + +func (f64 *Float64InfoElement) ResetValue() { + f64.value = 0 +} + +type BooleanInfoElement struct { + value bool + baseInfoElement +} + +func NewBoolInfoElement(element *InfoElement, val bool) *BooleanInfoElement { + infoElem := &BooleanInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (b *BooleanInfoElement) GetBooleanValue() bool { + return b.value +} + +func (b *BooleanInfoElement) SetBooleanValue(val bool) { + b.value = val +} + +func (b *BooleanInfoElement) IsValueEmpty() bool { + return b.value == false +} + +func (b *BooleanInfoElement) ResetValue() { + b.value = false +} + +type MacAddressInfoElement struct { + value net.HardwareAddr + baseInfoElement +} + +func NewMacAddressInfoElement(element *InfoElement, val net.HardwareAddr) *MacAddressInfoElement { + infoElem := &MacAddressInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (mac *MacAddressInfoElement) GetMacAddressValue() net.HardwareAddr { + return mac.value +} + +func (mac *MacAddressInfoElement) SetMacAddressValue(val net.HardwareAddr) { + mac.value = val +} + +func (mac *MacAddressInfoElement) IsValueEmpty() bool { + return mac.value == nil +} + +func (mac *MacAddressInfoElement) ResetValue() { + mac.value = nil +} + +type StringInfoElement struct { + baseInfoElement + value string +} + +func NewStringInfoElement(element *InfoElement, val string) *StringInfoElement { + infoElem := &StringInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (s *StringInfoElement) GetStringValue() string { + return s.value +} + +func (s *StringInfoElement) GetLength() int { + if len(s.value) < 255 { + return len(s.value) + 1 + } else { + return len(s.value) + 3 + } +} + +func (s *StringInfoElement) SetStringValue(val string) { + s.value = val +} + +func (s *StringInfoElement) IsValueEmpty() bool { + return s.value == "" +} + +func (s *StringInfoElement) ResetValue() { + s.value = "" +} + +type DateTimeSecondsInfoElement struct { + value uint32 + baseInfoElement +} + +func NewDateTimeSecondsInfoElement(element *InfoElement, val uint32) *DateTimeSecondsInfoElement { + infoElem := &DateTimeSecondsInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (dsec *DateTimeSecondsInfoElement) GetUnsigned32Value() uint32 { + return dsec.value +} + +func (dsec *DateTimeSecondsInfoElement) SetUnsigned32Value(val uint32) { + dsec.value = val +} + +func (dsec *DateTimeSecondsInfoElement) IsValueEmpty() bool { + return dsec.value == 0 +} + +func (dsec *DateTimeSecondsInfoElement) ResetValue() { + dsec.value = 0 +} + +type DateTimeMillisecondsInfoElement struct { + value uint64 + baseInfoElement +} + +func NewDateTimeMillisecondsInfoElement(element *InfoElement, val uint64) *DateTimeMillisecondsInfoElement { + infoElem := &DateTimeMillisecondsInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (dmsec *DateTimeMillisecondsInfoElement) GetUnsigned64Value() uint64 { + return dmsec.value +} + +func (dmsec *DateTimeMillisecondsInfoElement) SetUnsigned64Value(val uint64) { + dmsec.value = val +} + +func (dmsec *DateTimeMillisecondsInfoElement) IsValueEmpty() bool { + return dmsec.value == 0 +} + +func (dmsec *DateTimeMillisecondsInfoElement) ResetValue() { + dmsec.value = 0 +} + +type IPAddressInfoElement struct { + baseInfoElement + value net.IP +} + +func NewIPAddressInfoElement(element *InfoElement, val net.IP) *IPAddressInfoElement { + infoElem := &IPAddressInfoElement{ + value: val, + } + infoElem.element = element + return infoElem +} + +func (ip *IPAddressInfoElement) GetIPAddressValue() net.IP { + return ip.value +} + +func (ip *IPAddressInfoElement) SetIPAddressValue(val net.IP) { + ip.value = val +} + +func (ip *IPAddressInfoElement) IsValueEmpty() bool { + return ip.value == nil +} + +func (ip *IPAddressInfoElement) ResetValue() { + ip.value = nil +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/message.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/message.go new file mode 100644 index 000000000..b2cab7ec8 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/message.go @@ -0,0 +1,126 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package entities + +import ( + "encoding/binary" +) + +const ( + MaxSocketMsgSize int = 65535 + MsgHeaderLength int = 16 +) + +// Message represents IPFIX message. +// TODO: Currently, it supports only one set. This will be extended to support multiple +// sets. +type Message struct { + msgHeader []byte + version uint16 + length uint16 + seqNumber uint32 + obsDomainID uint32 + exportTime uint32 + exportAddress string + isDecoding bool + set Set +} + +func NewMessage(isDecoding bool) *Message { + return &Message{ + msgHeader: make([]byte, MsgHeaderLength), + isDecoding: isDecoding, + } +} + +func (m *Message) GetVersion() uint16 { + return m.version +} + +func (m *Message) SetVersion(version uint16) { + m.version = version + if !m.isDecoding { + binary.BigEndian.PutUint16(m.msgHeader[0:2], version) + } +} + +func (m *Message) GetMessageLen() uint16 { + return m.length +} + +func (m *Message) SetMessageLen(len uint16) { + m.length = len + if !m.isDecoding { + binary.BigEndian.PutUint16(m.msgHeader[2:4], len) + } +} + +func (m *Message) GetSequenceNum() uint32 { + return m.seqNumber +} + +func (m *Message) SetSequenceNum(seqNum uint32) { + m.seqNumber = seqNum + if !m.isDecoding { + binary.BigEndian.PutUint32(m.msgHeader[8:12], seqNum) + } +} + +func (m *Message) GetObsDomainID() uint32 { + return m.obsDomainID +} + +func (m *Message) SetObsDomainID(obsDomainID uint32) { + m.obsDomainID = obsDomainID + if !m.isDecoding { + binary.BigEndian.PutUint32(m.msgHeader[12:], obsDomainID) + } +} + +func (m *Message) GetExportTime() uint32 { + return m.exportTime +} + +func (m *Message) SetExportTime(exportTime uint32) { + m.exportTime = exportTime + if !m.isDecoding { + binary.BigEndian.PutUint32(m.msgHeader[4:8], exportTime) + } +} + +func (m *Message) GetExportAddress() string { + return m.exportAddress +} + +func (m *Message) SetExportAddress(ipAddr string) { + m.exportAddress = ipAddr +} + +func (m *Message) GetSet() Set { + return m.set +} + +func (m *Message) AddSet(set Set) { + m.set = set +} + +func (m *Message) GetMsgHeader() []byte { + return m.msgHeader +} + +func (m *Message) ResetMsgHeader() { + m.msgHeader = nil + m.msgHeader = make([]byte, MsgHeaderLength) +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go new file mode 100644 index 000000000..8c2524de0 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/record.go @@ -0,0 +1,289 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package entities + +import ( + "encoding/binary" + "fmt" + + "k8s.io/klog/v2" +) + +//go:generate mockgen -copyright_file ../../license_templates/license_header.raw.txt -destination=testing/mock_record.go -package=testing github.com/vmware/go-ipfix/pkg/entities Record + +// This package contains encoding of fields in the record. +// Build the record here with local buffer and write to message buffer afterwards +// Instead should we write the field directly on to message instead of having a local buffer? +// To begin with, we will have local buffer in record. +// Have an interface and expose functions to user. + +type Record interface { + PrepareRecord() error + AddInfoElement(element InfoElementWithValue) error + // TODO: Functions for multiple elements as well. + GetBuffer() []byte + GetTemplateID() uint16 + GetFieldCount() uint16 + GetOrderedElementList() []InfoElementWithValue + GetInfoElementWithValue(name string) (InfoElementWithValue, int, bool) + GetRecordLength() int + GetMinDataRecordLen() uint16 + GetElementMap() map[string]interface{} +} + +type baseRecord struct { + buffer []byte + fieldCount uint16 + templateID uint16 + orderedElementList []InfoElementWithValue + isDecoding bool + len int + Record +} + +type dataRecord struct { + baseRecord +} + +func NewDataRecord(id uint16, numElements, numExtraElements int, isDecoding bool) *dataRecord { + return &dataRecord{ + baseRecord{ + fieldCount: 0, + templateID: id, + isDecoding: isDecoding, + orderedElementList: make([]InfoElementWithValue, numElements, numElements+numExtraElements), + }, + } +} + +func NewDataRecordFromElements(id uint16, elements []InfoElementWithValue, isDecoding bool) *dataRecord { + length := 0 + if !isDecoding { + for idx := range elements { + length += elements[idx].GetLength() + } + } + return &dataRecord{ + baseRecord{ + fieldCount: uint16(len(elements)), + templateID: id, + isDecoding: isDecoding, + len: length, + orderedElementList: elements, + }, + } +} + +type templateRecord struct { + baseRecord + // Minimum data record length required to be sent for this template. + // Elements with variable length are considered to be one byte. + minDataRecLength uint16 + // index is used when adding elements to orderedElementList + index int +} + +func NewTemplateRecord(id uint16, numElements int, isDecoding bool) *templateRecord { + return &templateRecord{ + baseRecord{ + buffer: make([]byte, 4), + fieldCount: uint16(numElements), + templateID: id, + isDecoding: isDecoding, + orderedElementList: make([]InfoElementWithValue, numElements, numElements), + }, + 0, + 0, + } +} + +func NewTemplateRecordFromElements(id uint16, elements []InfoElementWithValue, isDecoding bool) *templateRecord { + r := &templateRecord{ + baseRecord{ + buffer: make([]byte, 4), + fieldCount: uint16(len(elements)), + templateID: id, + isDecoding: isDecoding, + orderedElementList: elements, + }, + 0, + len(elements), + } + for idx := range elements { + infoElement := elements[idx].GetInfoElement() + r.addInfoElement(infoElement) + } + return r +} + +func (b *baseRecord) GetTemplateID() uint16 { + return b.templateID +} + +func (b *baseRecord) GetFieldCount() uint16 { + return b.fieldCount +} + +func (b *baseRecord) GetOrderedElementList() []InfoElementWithValue { + return b.orderedElementList +} + +func (b *baseRecord) GetInfoElementWithValue(name string) (InfoElementWithValue, int, bool) { + for i, element := range b.orderedElementList { + if element.GetName() == name { + return element, i, true + } + } + return nil, 0, false +} + +func (b *baseRecord) GetElementMap() map[string]interface{} { + elements := make(map[string]interface{}) + orderedElements := b.GetOrderedElementList() + for _, element := range orderedElements { + switch element.GetDataType() { + case Unsigned8: + elements[element.GetName()] = element.GetUnsigned8Value() + case Unsigned16: + elements[element.GetName()] = element.GetUnsigned16Value() + case Unsigned32: + elements[element.GetName()] = element.GetUnsigned32Value() + case Unsigned64: + elements[element.GetName()] = element.GetUnsigned64Value() + case Signed8: + elements[element.GetName()] = element.GetSigned8Value() + case Signed16: + elements[element.GetName()] = element.GetSigned16Value() + case Signed32: + elements[element.GetName()] = element.GetSigned32Value() + case Signed64: + elements[element.GetName()] = element.GetSigned64Value() + case Float32: + elements[element.GetName()] = element.GetFloat32Value() + case Float64: + elements[element.GetName()] = element.GetFloat64Value() + case Boolean: + elements[element.GetName()] = element.GetBooleanValue() + case DateTimeSeconds: + elements[element.GetName()] = element.GetUnsigned32Value() + case DateTimeMilliseconds: + elements[element.GetName()] = element.GetUnsigned64Value() + case DateTimeMicroseconds, DateTimeNanoseconds: + err := fmt.Errorf("API does not support micro and nano seconds types yet") + elements[element.GetName()] = err + case MacAddress: + elements[element.GetName()] = element.GetMacAddressValue() + case Ipv4Address, Ipv6Address: + elements[element.GetName()] = element.GetIPAddressValue() + case String: + elements[element.GetName()] = element.GetStringValue() + default: + err := fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") + elements[element.GetName()] = err + } + } + return elements +} + +func (d *dataRecord) PrepareRecord() error { + // We do not have to do anything if it is data record + return nil +} + +func (d *dataRecord) GetBuffer() []byte { + if len(d.buffer) == d.len || d.isDecoding { + return d.buffer + } + d.buffer = make([]byte, d.len) + index := 0 + for _, element := range d.orderedElementList { + err := encodeInfoElementValueToBuff(element, d.buffer, index) + if err != nil { + klog.Error(err) + } + index += element.GetLength() + } + return d.buffer +} + +func (d *dataRecord) GetRecordLength() int { + return d.len +} + +func (d *dataRecord) AddInfoElement(element InfoElementWithValue) error { + if !d.isDecoding { + d.len = d.len + element.GetLength() + } + if len(d.orderedElementList) <= int(d.fieldCount) { + d.orderedElementList = append(d.orderedElementList, element) + } else { + d.orderedElementList[d.fieldCount] = element + } + d.fieldCount++ + return nil +} + +func (t *templateRecord) PrepareRecord() error { + // Add Template Record Header + binary.BigEndian.PutUint16(t.buffer[0:2], t.templateID) + binary.BigEndian.PutUint16(t.buffer[2:4], t.fieldCount) + return nil +} + +func (t *templateRecord) addInfoElement(infoElement *InfoElement) { + initialLength := len(t.buffer) + // Add field specifier {elementID: uint16, elementLen: uint16} + addBytes := make([]byte, 4) + binary.BigEndian.PutUint16(addBytes[0:2], infoElement.ElementId) + binary.BigEndian.PutUint16(addBytes[2:4], infoElement.Len) + t.buffer = append(t.buffer, addBytes...) + if infoElement.EnterpriseId != 0 { + // Set the MSB of elementID to 1 as per RFC7011 + t.buffer[initialLength] = t.buffer[initialLength] | 0x80 + addBytes = make([]byte, 4) + binary.BigEndian.PutUint32(addBytes, infoElement.EnterpriseId) + t.buffer = append(t.buffer, addBytes...) + } + // Keep track of minimum data record length required for sanity check + if infoElement.Len == VariableLength { + t.minDataRecLength = t.minDataRecLength + 1 + } else { + t.minDataRecLength = t.minDataRecLength + infoElement.Len + } +} + +func (t *templateRecord) AddInfoElement(element InfoElementWithValue) error { + infoElement := element.GetInfoElement() + // val could be used to specify smaller length than default? For now assert it to be nil + if !element.IsValueEmpty() { + return fmt.Errorf("template record cannot take element %v with non-empty value", infoElement.Name) + } + t.addInfoElement(infoElement) + t.orderedElementList[t.index] = element + t.index++ + return nil +} + +func (t *templateRecord) GetBuffer() []byte { + return t.buffer +} + +func (t *templateRecord) GetRecordLength() int { + return len(t.buffer) +} + +func (t *templateRecord) GetMinDataRecordLen() uint16 { + return t.minDataRecLength +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go b/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go new file mode 100644 index 000000000..972378c75 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/entities/set.go @@ -0,0 +1,188 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package entities + +import ( + "encoding/binary" + "fmt" +) + +//go:generate mockgen -copyright_file ../../license_templates/license_header.raw.txt -destination=testing/mock_set.go -package=testing github.com/vmware/go-ipfix/pkg/entities Set + +const ( + // TemplateRefreshTimeOut is the template refresh time out for exporting process + TemplateRefreshTimeOut uint32 = 1800 + // TemplateTTL is the template time to live for collecting process + TemplateTTL = TemplateRefreshTimeOut * 3 + // TemplateSetID is the setID for template record + TemplateSetID uint16 = 2 + SetHeaderLen int = 4 +) + +type ContentType uint8 + +const ( + Template ContentType = iota + Data + // Add OptionsTemplate too when it is supported + Undefined = 255 +) + +type Set interface { + PrepareSet(setType ContentType, templateID uint16) error + ResetSet() + GetHeaderBuffer() []byte + GetSetLength() int + GetSetType() ContentType + UpdateLenInHeader() + AddRecord(elements []InfoElementWithValue, templateID uint16) error + AddRecordWithExtraElements(elements []InfoElementWithValue, numExtraElements int, templateID uint16) error + // Unlike AddRecord, AddRecordV2 uses the elements slice directly, instead of creating a new + // one. This can result in fewer memory allocations. The caller should not modify the + // contents of the slice after calling AddRecordV2. + AddRecordV2(elements []InfoElementWithValue, templateID uint16) error + GetRecords() []Record + GetNumberOfRecords() uint32 +} + +type set struct { + headerBuffer []byte + setType ContentType + records []Record + isDecoding bool + length int +} + +func NewSet(isDecoding bool) Set { + if isDecoding { + return &set{ + records: make([]Record, 0), + isDecoding: isDecoding, + } + } else { + return &set{ + headerBuffer: make([]byte, SetHeaderLen), + records: make([]Record, 0), + isDecoding: isDecoding, + length: SetHeaderLen, + } + } +} + +func (s *set) PrepareSet(setType ContentType, templateID uint16) error { + if setType == Undefined { + return fmt.Errorf("set type is not properly defined") + } else { + s.setType = setType + } + if !s.isDecoding { + // Create the set header and append it when encoding + s.createHeader(s.setType, templateID) + } + return nil +} + +func (s *set) ResetSet() { + if !s.isDecoding { + s.headerBuffer = nil + s.headerBuffer = make([]byte, SetHeaderLen) + s.length = SetHeaderLen + } + s.setType = Undefined + s.records = nil + s.records = make([]Record, 0) +} + +func (s *set) GetHeaderBuffer() []byte { + return s.headerBuffer +} + +func (s *set) GetSetLength() int { + return s.length +} + +func (s *set) GetSetType() ContentType { + return s.setType +} + +func (s *set) UpdateLenInHeader() { + // TODO:Add padding to the length when multiple sets are sent in IPFIX message + if !s.isDecoding { + // Add length to the set header + binary.BigEndian.PutUint16(s.headerBuffer[2:4], uint16(s.length)) + } +} + +func (s *set) AddRecord(elements []InfoElementWithValue, templateID uint16) error { + return s.AddRecordWithExtraElements(elements, 0, templateID) +} + +func (s *set) AddRecordWithExtraElements(elements []InfoElementWithValue, numExtraElements int, templateID uint16) error { + var record Record + if s.setType == Data { + record = NewDataRecord(templateID, len(elements), numExtraElements, s.isDecoding) + } else if s.setType == Template { + record = NewTemplateRecord(templateID, len(elements), s.isDecoding) + err := record.PrepareRecord() + if err != nil { + return err + } + } else { + return fmt.Errorf("set type is not supported") + } + for i := range elements { + err := record.AddInfoElement(elements[i]) + if err != nil { + return err + } + } + s.records = append(s.records, record) + s.length += record.GetRecordLength() + return nil +} + +func (s *set) AddRecordV2(elements []InfoElementWithValue, templateID uint16) error { + var record Record + if s.setType == Data { + record = NewDataRecordFromElements(templateID, elements, s.isDecoding) + } else if s.setType == Template { + record = NewTemplateRecordFromElements(templateID, elements, s.isDecoding) + err := record.PrepareRecord() + if err != nil { + return err + } + } else { + return fmt.Errorf("set type is not supported") + } + s.records = append(s.records, record) + s.length += record.GetRecordLength() + return nil +} + +func (s *set) GetRecords() []Record { + return s.records +} + +func (s *set) GetNumberOfRecords() uint32 { + return uint32(len(s.records)) +} + +func (s *set) createHeader(setType ContentType, templateID uint16) { + if setType == Template { + binary.BigEndian.PutUint16(s.headerBuffer[0:2], TemplateSetID) + } else if setType == Data { + binary.BigEndian.PutUint16(s.headerBuffer[0:2], templateID) + } +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/exporter/msg.go b/vendor/github.com/vmware/go-ipfix/pkg/exporter/msg.go new file mode 100644 index 000000000..1fb256122 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/exporter/msg.go @@ -0,0 +1,56 @@ +// Copyright 2023 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporter + +import ( + "fmt" + "time" + + "github.com/vmware/go-ipfix/pkg/entities" +) + +func CreateIPFIXMsg(set entities.Set, obsDomainID uint32, seqNumber uint32, exportTime time.Time) ([]byte, error) { + // Create a new message and use it to send the set. + msg := entities.NewMessage(false) + + // Check if message is exceeding the limit after adding the set. Include message + // header length too. + msgLen := entities.MsgHeaderLength + set.GetSetLength() + if msgLen > entities.MaxSocketMsgSize { + // This is applicable for both TCP and UDP sockets. + return nil, fmt.Errorf("message size exceeds max socket buffer size") + } + + // Set the fields in the message header. + // IPFIX version number is 10. + // https://www.iana.org/assignments/ipfix/ipfix.xhtml#ipfix-version-numbers + msg.SetVersion(10) + msg.SetObsDomainID(obsDomainID) + msg.SetMessageLen(uint16(msgLen)) + msg.SetExportTime(uint32(exportTime.Unix())) + msg.SetSequenceNum(seqNumber) + + bytesSlice := make([]byte, msgLen) + copy(bytesSlice[:entities.MsgHeaderLength], msg.GetMsgHeader()) + copy(bytesSlice[entities.MsgHeaderLength:entities.MsgHeaderLength+entities.SetHeaderLen], set.GetHeaderBuffer()) + index := entities.MsgHeaderLength + entities.SetHeaderLen + for _, record := range set.GetRecords() { + len := record.GetRecordLength() + copy(bytesSlice[index:index+len], record.GetBuffer()) + index += len + } + + return bytesSlice, nil +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go b/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go new file mode 100644 index 000000000..bce94d2ef --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/exporter/process.go @@ -0,0 +1,485 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporter + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/pion/dtls/v2" + "k8s.io/klog/v2" + + "github.com/vmware/go-ipfix/pkg/entities" +) + +const startTemplateID uint16 = 255 +const defaultCheckConnInterval = 10 * time.Second +const defaultJSONBufferLen = 5000 + +type templateValue struct { + elements []*entities.InfoElement + minDataRecLen uint16 +} + +// 1. Tested one exportingProcess process per exporter. Can support multiple collector scenario by +// creating different instances of exporting process. Need to be tested +// 2. Only one observation point per observation domain is supported, +// so observation point ID not defined. +// 3. Supports only TCP and UDP; one session at a time. SCTP is not supported. +// 4. UDP needs to send MTU size packets as per RFC7011. We are not honoring that, +// and relying on IP fragmentation and assuming data loss in the network is minimal. +// We will revisit this if there are any issues, and get PathMTU from the user +// as part of exporter input. +type ExportingProcess struct { + connToCollector net.Conn + obsDomainID uint32 + seqNumber uint32 + templateID uint16 + templatesMap map[uint16]templateValue + templateRefCh chan struct{} + templateMutex sync.Mutex + sendJSONRecord bool + jsonBufferLen int +} + +type ExporterTLSClientConfig struct { + // ServerName is passed to the server for SNI and is used in the client to check server + // certificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string + // CAData holds PEM-encoded bytes for trusted root certificates for server. + CAData []byte + // CertData holds PEM-encoded bytes. + CertData []byte + // KeyData holds PEM-encoded bytes. + KeyData []byte +} + +type ExporterInput struct { + // CollectorAddress needs to be provided in hostIP:port format. + CollectorAddress string + // CollectorProtocol needs to be provided in lower case format. + // We support "tcp" and "udp" protocols. + CollectorProtocol string + ObservationDomainID uint32 + TempRefTimeout uint32 + // TLSClientConfig is set to use an encrypted connection to the collector. + TLSClientConfig *ExporterTLSClientConfig + IsIPv6 bool + SendJSONRecord bool + JSONBufferLen int + CheckConnInterval time.Duration +} + +// InitExportingProcess takes in collector address(net.Addr format), obsID(observation ID) +// and tempRefTimeout(template refresh timeout). tempRefTimeout is applicable only +// for collectors listening over UDP; unit is seconds. For TCP, you can pass any +// value. For UDP, if 0 is passed, consider 1800s as default. +// +// PathMTU is recommended for UDP transport. If not given a valid value, i.e., either +// 0 or a value more than 1500, we consider a default value of 512B as per RFC7011. +// PathMTU is optional for TCP as we use max socket buffer size of 65535. It can +// be provided as 0. +// JSONBufferLen is recommended for sending json record. If not given a valid value, +// we consider a default 5000B. +func InitExportingProcess(input ExporterInput) (*ExportingProcess, error) { + var conn net.Conn + var err error + if input.TLSClientConfig != nil { + tlsConfig := input.TLSClientConfig + if input.CollectorProtocol == "tcp" { // use TLS + config, configErr := createClientConfig(tlsConfig) + if configErr != nil { + return nil, configErr + } + conn, err = tls.Dial(input.CollectorProtocol, input.CollectorAddress, config) + if err != nil { + klog.Errorf("Cannot the create the tls connection to the Collector %s: %v", input.CollectorAddress, err) + return nil, err + } + } else if input.CollectorProtocol == "udp" { // use DTLS + // TODO: support client authentication + if len(tlsConfig.CertData) > 0 || len(tlsConfig.KeyData) > 0 { + klog.Error("Client-authentication is not supported yet for DTLS, cert and key data will be ignored") + } + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM(tlsConfig.CAData) + if !ok { + return nil, fmt.Errorf("failed to parse root certificate") + } + config := &dtls.Config{ + RootCAs: roots, + ExtendedMasterSecret: dtls.RequireExtendedMasterSecret, + ServerName: tlsConfig.ServerName, + } + udpAddr, err := net.ResolveUDPAddr(input.CollectorProtocol, input.CollectorAddress) + if err != nil { + return nil, err + } + conn, err = dtls.Dial(udpAddr.Network(), udpAddr, config) + if err != nil { + klog.Errorf("Cannot the create the dtls connection to the Collector %s: %v", udpAddr.String(), err) + return nil, err + } + } + } else { + conn, err = net.Dial(input.CollectorProtocol, input.CollectorAddress) + if err != nil { + klog.Errorf("Cannot the create the connection to the Collector %s: %v", input.CollectorAddress, err) + return nil, err + } + } + expProc := &ExportingProcess{ + connToCollector: conn, + obsDomainID: input.ObservationDomainID, + seqNumber: 0, + templateID: startTemplateID, + templatesMap: make(map[uint16]templateValue), + templateRefCh: make(chan struct{}), + sendJSONRecord: input.SendJSONRecord, + } + + // Start a goroutine for checking whether connection to collector is still open + if input.CollectorProtocol == "tcp" { + interval := input.CheckConnInterval + if interval == 0 { + interval = defaultCheckConnInterval + } + go func() { + ticker := time.NewTicker(interval) + oneByteForRead := make([]byte, 1) + defer ticker.Stop() + for { + select { + case <-ticker.C: + isConnected := expProc.checkConnToCollector(oneByteForRead) + if !isConnected { + expProc.CloseConnToCollector() + klog.Error("Error when connecting to collector because connection is closed.") + return + } + } + } + }() + } + + // Template refresh logic is only for UDP transport. + if input.CollectorProtocol == "udp" { + if input.TempRefTimeout == 0 { + // Default value + input.TempRefTimeout = entities.TemplateRefreshTimeOut + } + go func() { + ticker := time.NewTicker(time.Duration(input.TempRefTimeout) * time.Second) + defer ticker.Stop() + for { + select { + case <-expProc.templateRefCh: + break + case <-ticker.C: + err := expProc.sendRefreshedTemplates() + if err != nil { + // Other option is sending messages through channel to library consumers + klog.Errorf("Error when sending refreshed templates: %v. Closing the connection to IPFIX controller", err) + expProc.CloseConnToCollector() + } + } + } + }() + } + if expProc.sendJSONRecord { + if input.JSONBufferLen <= 0 { + expProc.jsonBufferLen = defaultJSONBufferLen + } else { + expProc.jsonBufferLen = input.JSONBufferLen + } + } + return expProc, nil +} + +func (ep *ExportingProcess) SendSet(set entities.Set) (int, error) { + // Iterate over all records in the set. + setType := set.GetSetType() + if setType == entities.Undefined { + return 0, fmt.Errorf("set type is not properly defined") + } + for _, record := range set.GetRecords() { + if setType == entities.Template { + ep.updateTemplate(record.GetTemplateID(), record.GetOrderedElementList(), record.GetMinDataRecordLen()) + } else if setType == entities.Data { + err := ep.dataRecSanityCheck(record) + if err != nil { + return 0, fmt.Errorf("error when doing sanity check:%v", err) + } + } + } + // Update the length in set header before sending the message. + set.UpdateLenInHeader() + + var bytesSent int + var err error + if !ep.sendJSONRecord { + bytesSent, err = ep.createAndSendIPFIXMsg(set) + } else { + if setType == entities.Data { + bytesSent, err = ep.createAndSendJSONMsg(set) + } + } + if err != nil { + return bytesSent, err + } + return bytesSent, nil +} + +func (ep *ExportingProcess) GetMsgSizeLimit() int { + return entities.MaxSocketMsgSize +} + +func (ep *ExportingProcess) CloseConnToCollector() { + if !isChanClosed(ep.templateRefCh) { + close(ep.templateRefCh) // Close template refresh channel + } + err := ep.connToCollector.Close() + // Just log the error that happened when closing the connection. Not returning error as we do not expect library + // consumers to exit their programs with this error. + if err != nil { + klog.Errorf("Error when closing connection to collector: %v", err) + } +} + +// checkConnToCollector checks whether the connection from exporter is still open +// by trying to read from connection. Closed connection will return EOF from read. +func (ep *ExportingProcess) checkConnToCollector(oneByteForRead []byte) bool { + ep.connToCollector.SetReadDeadline(time.Now().Add(time.Millisecond)) + if _, err := ep.connToCollector.Read(oneByteForRead); err == io.EOF { + return false + } + return true +} + +// NewTemplateID is called to get ID when creating new template record. +func (ep *ExportingProcess) NewTemplateID() uint16 { + ep.templateID++ + return ep.templateID +} + +// createAndSendIPFIXMsg takes in a set as input, creates the IPFIX message, and sends it out. +// TODO: This method will change when we support sending multiple sets. +func (ep *ExportingProcess) createAndSendIPFIXMsg(set entities.Set) (int, error) { + if set.GetSetType() == entities.Data { + ep.seqNumber = ep.seqNumber + set.GetNumberOfRecords() + } + bytesSlice, err := CreateIPFIXMsg(set, ep.obsDomainID, ep.seqNumber, time.Now()) + if err != nil { + return 0, err + } + + // Send the message on the exporter connection. + bytesSent, err := ep.connToCollector.Write(bytesSlice) + + if err != nil { + return bytesSent, fmt.Errorf("error when sending message on the connection: %v", err) + } else if bytesSent != len(bytesSlice) { + return bytesSent, fmt.Errorf("could not send the complete message on the connection") + } + + return bytesSent, nil +} + +// createAndSendJSONMsg takes in a set as input, creates the JSON record, and sends it out. +func (ep *ExportingProcess) createAndSendJSONMsg(set entities.Set) (int, error) { + var bytesSent int + for _, record := range set.GetRecords() { + elements := make(map[string]interface{}) + orderedElements := record.GetOrderedElementList() + for _, element := range orderedElements { + switch element.GetDataType() { + case entities.Unsigned8: + elements[element.GetName()] = element.GetUnsigned8Value() + case entities.Unsigned16: + elements[element.GetName()] = element.GetUnsigned16Value() + case entities.Unsigned32: + elements[element.GetName()] = element.GetUnsigned32Value() + case entities.Unsigned64: + elements[element.GetName()] = element.GetUnsigned64Value() + case entities.Signed8: + elements[element.GetName()] = element.GetSigned8Value() + case entities.Signed16: + elements[element.GetName()] = element.GetSigned16Value() + case entities.Signed32: + elements[element.GetName()] = element.GetSigned32Value() + case entities.Signed64: + elements[element.GetName()] = element.GetSigned64Value() + case entities.Float32: + elements[element.GetName()] = element.GetFloat32Value() + case entities.Float64: + elements[element.GetName()] = element.GetFloat64Value() + case entities.Boolean: + elements[element.GetName()] = element.GetBooleanValue() + case entities.DateTimeSeconds: + elements[element.GetName()] = element.GetUnsigned32Value() + case entities.DateTimeMilliseconds: + elements[element.GetName()] = element.GetUnsigned64Value() + case entities.DateTimeMicroseconds, entities.DateTimeNanoseconds: + return bytesSent, fmt.Errorf("API does not support micro and nano seconds types yet") + case entities.MacAddress: + elements[element.GetName()] = element.GetMacAddressValue() + case entities.Ipv4Address, entities.Ipv6Address: + elements[element.GetName()] = element.GetIPAddressValue() + case entities.String: + elements[element.GetName()] = element.GetStringValue() + default: + return bytesSent, fmt.Errorf("API supports only valid information elements with datatypes given in RFC7011") + } + } + message := make(map[string]interface{}, 2) + message["ipfix"] = elements + message["@timestamp"] = time.Now().Format(time.RFC3339) + writer := bytes.NewBuffer(make([]byte, 0, ep.jsonBufferLen)) + encoder := json.NewEncoder(writer) + err := encoder.Encode(message) + if err != nil { + return bytesSent, fmt.Errorf("error when encoding message to JSON: %v", err) + } + // Send the message on the exporter connection. + bytes, err := ep.connToCollector.Write(writer.Bytes()) + if err != nil { + return bytes, fmt.Errorf("error when sending message on the connection: %v", err) + } + bytesSent += bytes + } + return bytesSent, nil +} + +func (ep *ExportingProcess) updateTemplate(id uint16, elements []entities.InfoElementWithValue, minDataRecLen uint16) { + ep.templateMutex.Lock() + defer ep.templateMutex.Unlock() + + if _, exist := ep.templatesMap[id]; exist { + return + } + ep.templatesMap[id] = templateValue{ + make([]*entities.InfoElement, len(elements)), + minDataRecLen, + } + for i, elem := range elements { + ep.templatesMap[id].elements[i] = elem.GetInfoElement() + } + return +} + +//nolint:unused // Keeping this function for reference. +func (ep *ExportingProcess) deleteTemplate(id uint16) error { + ep.templateMutex.Lock() + defer ep.templateMutex.Unlock() + + if _, exist := ep.templatesMap[id]; !exist { + return fmt.Errorf("template %d does not exist in exporting process", id) + } + delete(ep.templatesMap, id) + return nil +} + +func (ep *ExportingProcess) sendRefreshedTemplates() error { + // Send refreshed template for every template in template map + templateSets := make([]entities.Set, 0) + + ep.templateMutex.Lock() + for templateID, tempValue := range ep.templatesMap { + tempSet := entities.NewSet(false) + if err := tempSet.PrepareSet(entities.Template, templateID); err != nil { + return err + } + elements := make([]entities.InfoElementWithValue, len(tempValue.elements)) + var err error + for i, element := range tempValue.elements { + if elements[i], err = entities.DecodeAndCreateInfoElementWithValue(element, nil); err != nil { + return err + } + } + err = tempSet.AddRecord(elements, templateID) + if err != nil { + return err + } + templateSets = append(templateSets, tempSet) + } + ep.templateMutex.Unlock() + + for _, templateSet := range templateSets { + if _, err := ep.SendSet(templateSet); err != nil { + return err + } + } + return nil +} + +func (ep *ExportingProcess) dataRecSanityCheck(rec entities.Record) error { + templateID := rec.GetTemplateID() + + ep.templateMutex.Lock() + defer ep.templateMutex.Unlock() + + if _, exist := ep.templatesMap[templateID]; !exist { + return fmt.Errorf("process: templateID %d does not exist in exporting process", templateID) + } + if rec.GetFieldCount() != uint16(len(ep.templatesMap[templateID].elements)) { + return fmt.Errorf("process: field count of data does not match templateID %d", templateID) + } + if len(rec.GetBuffer()) < int(ep.templatesMap[templateID].minDataRecLen) { + return fmt.Errorf("process: Data Record does not pass the min required length (%d) check for template ID %d", ep.templatesMap[templateID].minDataRecLen, templateID) + } + return nil +} + +func isChanClosed(ch <-chan struct{}) bool { + select { + case <-ch: + return true + default: + } + return false +} + +func createClientConfig(config *ExporterTLSClientConfig) (*tls.Config, error) { + roots := x509.NewCertPool() + ok := roots.AppendCertsFromPEM(config.CAData) + if !ok { + return nil, fmt.Errorf("failed to parse root certificate") + } + if config.CertData == nil { + return &tls.Config{ + RootCAs: roots, + MinVersion: tls.VersionTLS12, + ServerName: config.ServerName, + }, nil + } + cert, err := tls.X509KeyPair(config.CertData, config.KeyData) + if err != nil { + return nil, err + } + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + MinVersion: tls.VersionTLS12, + ServerName: config.ServerName, + }, nil +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/registry/registry.go b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry.go new file mode 100644 index 000000000..de160026f --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry.go @@ -0,0 +1,201 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "fmt" + "strings" + + "github.com/vmware/go-ipfix/pkg/entities" +) + +const ( + // AntreaEnterpriseID is the enterprise ID for Antrea Information Elements + AntreaEnterpriseID uint32 = 56506 + // IANAEnterpriseID is the enterprise ID for IANA Information Elements + IANAEnterpriseID uint32 = 0 + // Enterprise ID for reverse Information Elements + IANAReversedEnterpriseID uint32 = 29305 +) + +// enum for flowType field in Antrea registry. +const ( + FlowTypeIntraNode = uint8(1) + FlowTypeInterNode = uint8(2) + FlowTypeToExternal = uint8(3) + FlowTypeFromExternal = uint8(4) +) + +// enum for ingressNetworkPolicyRuleAction and egressNetworkPolicyRuleAction field in Antrea registry. +const ( + NetworkPolicyRuleActionNoAction = uint8(0) + NetworkPolicyRuleActionAllow = uint8(1) + NetworkPolicyRuleActionDrop = uint8(2) + NetworkPolicyRuleActionReject = uint8(3) +) + +// enum for ingressNetworkPolicyType and egressNetworkPolicyType field in Antrea registry. +const ( + PolicyTypeK8sNetworkPolicy = uint8(1) + PolicyTypeAntreaNetworkPolicy = uint8(2) + PolicyTypeAntreaClusterNetworkPolicy = uint8(3) +) + +// placeholder of NetworkPolicyRulePriority for K8s Network Policy. +const ( + K8sNetworkPolicyRulePriority = int32(-1) +) + +// enum for flowEndReason field in IANA registry. +// List of RFC supported reasons: https://www.iana.org/assignments/ipfix/ipfix.xhtml#ipfix-flow-end-reason +const ( + IdleTimeoutReason = uint8(0x01) + ActiveTimeoutReason = uint8(0x02) + EndOfFlowReason = uint8(0x03) +) + +var ( + // globalRegistryByID shows mapping EnterpriseID -> Info element ID -> Info element + globalRegistryByID map[uint32]map[uint16]*entities.InfoElement + // globalRegistryByName shows mapping EnterpriseID -> Info element name -> Info element + globalRegistryByName map[uint32]map[string]*entities.InfoElement +) + +func InitNewRegistry(customEnterpriseID uint32) error { + if globalRegistryByID == nil { + return fmt.Errorf("Please use LoadRegistry before registering custom registry") + } + globalRegistryByID[customEnterpriseID] = make(map[uint16]*entities.InfoElement) + globalRegistryByName[customEnterpriseID] = make(map[string]*entities.InfoElement) + return nil +} + +func PutInfoElement(ie entities.InfoElement, enterpriseID uint32) error { + if _, exist := globalRegistryByName[enterpriseID]; !exist { + return fmt.Errorf("Registry with EnterpriseID %d is not Initialized, Please use InitNewRegistry", ie.EnterpriseId) + } else if _, exist = globalRegistryByName[enterpriseID][ie.Name]; exist { + return fmt.Errorf("Information element %s in registry with EnterpriseID %d has already been registered", ie.Name, ie.EnterpriseId) + } + globalRegistryByID[ie.EnterpriseId][ie.ElementId] = &ie + globalRegistryByName[ie.EnterpriseId][ie.Name] = &ie + return nil +} + +func LoadRegistry() { + globalRegistryByID = make(map[uint32]map[uint16]*entities.InfoElement) + globalRegistryByID[AntreaEnterpriseID] = make(map[uint16]*entities.InfoElement) + globalRegistryByID[IANAEnterpriseID] = make(map[uint16]*entities.InfoElement) + globalRegistryByID[IANAReversedEnterpriseID] = make(map[uint16]*entities.InfoElement) + + globalRegistryByName = make(map[uint32]map[string]*entities.InfoElement) + globalRegistryByName[AntreaEnterpriseID] = make(map[string]*entities.InfoElement) + globalRegistryByName[IANAEnterpriseID] = make(map[string]*entities.InfoElement) + globalRegistryByName[IANAReversedEnterpriseID] = make(map[string]*entities.InfoElement) + + loadIANARegistry() + loadAntreaRegistry() +} + +func GetInfoElementFromID(elementID uint16, enterpriseID uint32) (*entities.InfoElement, error) { + if _, exist := globalRegistryByID[enterpriseID]; !exist { + return nil, fmt.Errorf("Registry with EnterpriseID %d is not supported.", enterpriseID) + } + if element, exist := globalRegistryByID[enterpriseID][elementID]; !exist { + return element, fmt.Errorf("Information element with elementID %d in registry with enterpriseID %d cannot be found.", elementID, enterpriseID) + } else { + return element, nil + } +} + +func GetInfoElement(name string, enterpriseID uint32) (*entities.InfoElement, error) { + if _, exist := globalRegistryByName[enterpriseID]; !exist { + return nil, fmt.Errorf("Registry with EnterpriseID %d is not supported.", enterpriseID) + } + if element, exist := globalRegistryByName[enterpriseID][name]; !exist { + return element, fmt.Errorf("Information element with name %s in registry with enterpriseID %d cannot be found.", name, enterpriseID) + } else { + return element, nil + } +} + +func registerInfoElement(ie entities.InfoElement, enterpriseID uint32) error { + if _, exist := globalRegistryByName[enterpriseID]; !exist { + return fmt.Errorf("Registry with EnterpriseID %d is not supported.", ie.EnterpriseId) + } else if _, exist = globalRegistryByName[enterpriseID][ie.Name]; exist { + return fmt.Errorf("Information element %s in registry with EnterpriseID %d has already been registered", ie.Name, ie.EnterpriseId) + } + globalRegistryByID[ie.EnterpriseId][ie.ElementId] = &ie + globalRegistryByName[ie.EnterpriseId][ie.Name] = &ie + + if ie.EnterpriseId == IANAEnterpriseID { // handle reverse information element for IANA registry + reverseIE, err := getIANAReverseInfoElement(ie.Name) + if err == nil { // the information element has reverse information element + globalRegistryByID[IANAReversedEnterpriseID][reverseIE.ElementId] = reverseIE + globalRegistryByName[IANAReversedEnterpriseID][reverseIE.Name] = reverseIE + } + } + return nil +} + +func getIANAReverseInfoElement(name string) (*entities.InfoElement, error) { + var exist bool + var ie *entities.InfoElement + if ie, exist = globalRegistryByName[IANAEnterpriseID][name]; !exist { + err := fmt.Errorf("IANA Registry: There is no information element with name %s", name) + return ie, err + } + if !isReversible(ie.Name) { + err := fmt.Errorf("IANA Registry: The information element %s is not reverse element", name) + return ie, err + } + reverseName := "reverse" + if ie.Name != "" { + reverseName += strings.ToUpper(ie.Name[:1]) + ie.Name[1:] + } + return entities.NewInfoElement(reverseName, ie.ElementId, ie.DataType, IANAReversedEnterpriseID, ie.Len), nil +} + +// Non-reversible Information Elements follow Section 6.1 of RFC5103 +var nonReversibleIEs = map[string]bool{ + "biflowDirection": true, + "collectorIPv4Address": true, + "collectorIPv6Address": true, + "collectorTransportPort": true, + "commonPropertiesId": true, + "exportedMessageTotalCount": true, + "exportedOctetTotalCount": true, + "exportedFlowRecordTotalCount": true, + "exporterIPv4Address": true, + "exporterIPv6Address": true, + "exporterTransportPort": true, + "exportInterface": true, + "exportProtocolVersion": true, + "exportTransportProtocol": true, + "flowId": true, + "flowKeyIndicator": true, + "ignoredPacketTotalCount": true, + "ignoredOctetTotalCount": true, + "notSentFlowTotalCount": true, + "notSentPacketTotalCount": true, + "notSentOctetTotalCount": true, + "observationDomainId": true, + "observedFlowTotalCount": true, + "paddingOctets": true, + "templateId": true, +} + +func isReversible(name string) bool { + return !nonReversibleIEs[name] +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_IANA.go b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_IANA.go new file mode 100644 index 000000000..9cbb38c26 --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_IANA.go @@ -0,0 +1,490 @@ +// Copyright 2020 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "github.com/vmware/go-ipfix/pkg/entities" +) + +// AUTO GENERATED, DO NOT CHANGE + +func loadIANARegistry() { + registerInfoElement(*entities.NewInfoElement("octetDeltaCount", 1, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("packetDeltaCount", 2, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("deltaFlowCount", 3, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("protocolIdentifier", 4, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipClassOfService", 5, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("tcpControlBits", 6, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("sourceTransportPort", 7, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv4Address", 8, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv4PrefixLength", 9, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ingressInterface", 10, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("destinationTransportPort", 11, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv4Address", 12, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv4PrefixLength", 13, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("egressInterface", 14, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("ipNextHopIPv4Address", 15, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("bgpSourceAsNumber", 16, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("bgpDestinationAsNumber", 17, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("bgpNextHopIPv4Address", 18, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postMCastPacketDeltaCount", 19, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postMCastOctetDeltaCount", 20, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowEndSysUpTime", 21, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowStartSysUpTime", 22, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postOctetDeltaCount", 23, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postPacketDeltaCount", 24, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("minimumIpTotalLength", 25, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("maximumIpTotalLength", 26, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv6Address", 27, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv6Address", 28, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv6PrefixLength", 29, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv6PrefixLength", 30, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("flowLabelIPv6", 31, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("icmpTypeCodeIPv4", 32, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("igmpType", 33, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("samplingInterval", 34, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingAlgorithm", 35, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("flowActiveTimeout", 36, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("flowIdleTimeout", 37, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("engineType", 38, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("engineId", 39, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("exportedOctetTotalCount", 40, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("exportedMessageTotalCount", 41, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("exportedFlowRecordTotalCount", 42, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ipv4RouterSc", 43, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv4Prefix", 44, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv4Prefix", 45, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelType", 46, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelIPv4Address", 47, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplerId", 48, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("samplerMode", 49, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("samplerRandomInterval", 50, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("classId", 51, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("minimumTTL", 52, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("maximumTTL", 53, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("fragmentIdentification", 54, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postIpClassOfService", 55, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("sourceMacAddress", 56, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("postDestinationMacAddress", 57, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("vlanId", 58, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("postVlanId", 59, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("ipVersion", 60, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("flowDirection", 61, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipNextHopIPv6Address", 62, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("bgpNextHopIPv6Address", 63, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("ipv6ExtensionHeaders", 64, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("Assigned for NetFlow v9 compatibility", 0, 255, 0, 0), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelStackSection", 70, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection2", 71, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection3", 72, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection4", 73, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection5", 74, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection6", 75, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection7", 76, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection8", 77, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection9", 78, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection10", 79, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("destinationMacAddress", 80, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("postSourceMacAddress", 81, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("interfaceName", 82, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("interfaceDescription", 83, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("samplerName", 84, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("octetTotalCount", 85, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("packetTotalCount", 86, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flagsAndSamplerId", 87, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("fragmentOffset", 88, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("forwardingStatus", 89, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("mplsVpnRouteDistinguisher", 90, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelPrefixLength", 91, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("srcTrafficIndex", 92, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("dstTrafficIndex", 93, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("applicationDescription", 94, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("applicationId", 95, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("applicationName", 96, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("Assigned for NetFlow v9 compatibility", 97, 255, 0, 0), 0) + registerInfoElement(*entities.NewInfoElement("postIpDiffServCodePoint", 98, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("multicastReplicationFactor", 99, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("className", 100, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("classificationEngineId", 101, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("layer2packetSectionOffset", 102, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("layer2packetSectionSize", 103, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("layer2packetSectionData", 104, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("Assigned for NetFlow v9 compatibility", 0, 255, 0, 0), 0) + registerInfoElement(*entities.NewInfoElement("bgpNextAdjacentAsNumber", 128, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("bgpPrevAdjacentAsNumber", 129, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("exporterIPv4Address", 130, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("exporterIPv6Address", 131, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("droppedOctetDeltaCount", 132, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("droppedPacketDeltaCount", 133, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("droppedOctetTotalCount", 134, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("droppedPacketTotalCount", 135, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowEndReason", 136, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("commonPropertiesId", 137, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationPointId", 138, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("icmpTypeCodeIPv6", 139, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelIPv6Address", 140, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("lineCardId", 141, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("portId", 142, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("meteringProcessId", 143, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("exportingProcessId", 144, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("templateId", 145, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("wlanChannelId", 146, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("wlanSSID", 147, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("flowId", 148, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationDomainId", 149, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowStartSeconds", 150, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowEndSeconds", 151, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowStartMilliseconds", 152, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowEndMilliseconds", 153, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowStartMicroseconds", 154, 16, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowEndMicroseconds", 155, 16, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowStartNanoseconds", 156, 17, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowEndNanoseconds", 157, 17, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowStartDeltaMicroseconds", 158, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowEndDeltaMicroseconds", 159, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("systemInitTimeMilliseconds", 160, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowDurationMilliseconds", 161, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("flowDurationMicroseconds", 162, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("observedFlowTotalCount", 163, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ignoredPacketTotalCount", 164, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ignoredOctetTotalCount", 165, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("notSentFlowTotalCount", 166, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("notSentPacketTotalCount", 167, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("notSentOctetTotalCount", 168, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("destinationIPv6Prefix", 169, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("sourceIPv6Prefix", 170, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("postOctetTotalCount", 171, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postPacketTotalCount", 172, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowKeyIndicator", 173, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postMCastPacketTotalCount", 174, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postMCastOctetTotalCount", 175, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("icmpTypeIPv4", 176, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("icmpCodeIPv4", 177, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("icmpTypeIPv6", 178, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("icmpCodeIPv6", 179, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("udpSourcePort", 180, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("udpDestinationPort", 181, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpSourcePort", 182, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpDestinationPort", 183, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpSequenceNumber", 184, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("tcpAcknowledgementNumber", 185, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("tcpWindowSize", 186, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpUrgentPointer", 187, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpHeaderLength", 188, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipHeaderLength", 189, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("totalLengthIPv4", 190, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("payloadLengthIPv6", 191, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("ipTTL", 192, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("nextHeaderIPv6", 193, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("mplsPayloadLength", 194, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("ipDiffServCodePoint", 195, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipPrecedence", 196, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("fragmentFlags", 197, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("octetDeltaSumOfSquares", 198, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("octetTotalSumOfSquares", 199, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelTTL", 200, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackLength", 201, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackDepth", 202, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mplsTopLabelExp", 203, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipPayloadLength", 204, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("udpMessageLength", 205, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("isMulticast", 206, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipv4IHL", 207, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ipv4Options", 208, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("tcpOptions", 209, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("paddingOctets", 210, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("collectorIPv4Address", 211, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("collectorIPv6Address", 212, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("exportInterface", 213, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("exportProtocolVersion", 214, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("exportTransportProtocol", 215, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("collectorTransportPort", 216, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("exporterTransportPort", 217, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("tcpSynTotalCount", 218, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("tcpFinTotalCount", 219, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("tcpRstTotalCount", 220, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("tcpPshTotalCount", 221, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("tcpAckTotalCount", 222, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("tcpUrgTotalCount", 223, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ipTotalLength", 224, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postNATSourceIPv4Address", 225, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postNATDestinationIPv4Address", 226, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postNAPTSourceTransportPort", 227, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("postNAPTDestinationTransportPort", 228, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("natOriginatingAddressRealm", 229, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("natEvent", 230, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("initiatorOctets", 231, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("responderOctets", 232, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("firewallEvent", 233, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ingressVRFID", 234, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("egressVRFID", 235, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("VRFname", 236, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("postMplsTopLabelExp", 237, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("tcpWindowScale", 238, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("biflowDirection", 239, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ethernetHeaderLength", 240, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("ethernetPayloadLength", 241, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("ethernetTotalLength", 242, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("dot1qVlanId", 243, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("dot1qPriority", 244, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("dot1qCustomerVlanId", 245, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("dot1qCustomerPriority", 246, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("metroEvcId", 247, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("metroEvcType", 248, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("pseudoWireId", 249, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("pseudoWireType", 250, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("pseudoWireControlWord", 251, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("ingressPhysicalInterface", 252, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("egressPhysicalInterface", 253, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("postDot1qVlanId", 254, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("postDot1qCustomerVlanId", 255, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("ethernetType", 256, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("postIpPrecedence", 257, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("collectionTimeMilliseconds", 258, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("exportSctpStreamId", 259, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("maxExportSeconds", 260, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("maxFlowEndSeconds", 261, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("messageMD5Checksum", 262, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("messageScope", 263, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("minExportSeconds", 264, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("minFlowStartSeconds", 265, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("opaqueOctets", 266, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("sessionScope", 267, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("maxFlowEndMicroseconds", 268, 16, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("maxFlowEndMilliseconds", 269, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("maxFlowEndNanoseconds", 270, 17, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("minFlowStartMicroseconds", 271, 16, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("minFlowStartMilliseconds", 272, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("minFlowStartNanoseconds", 273, 17, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("collectorCertificate", 274, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("exporterCertificate", 275, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("dataRecordsReliability", 276, 11, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("observationPointType", 277, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("newConnectionDeltaCount", 278, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("connectionSumDurationSeconds", 279, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("connectionTransactionId", 280, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postNATSourceIPv6Address", 281, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("postNATDestinationIPv6Address", 282, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("natPoolId", 283, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("natPoolName", 284, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("anonymizationFlags", 285, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("anonymizationTechnique", 286, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("informationElementIndex", 287, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("p2pTechnology", 288, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("tunnelTechnology", 289, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("encryptedTechnology", 290, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("basicList", 291, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("subTemplateList", 292, 21, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("subTemplateMultiList", 293, 22, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpValidityState", 294, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("IPSecSPI", 295, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("greKey", 296, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("natType", 297, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("initiatorPackets", 298, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("responderPackets", 299, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationDomainName", 300, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("selectionSequenceId", 301, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("selectorId", 302, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("informationElementId", 303, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("selectorAlgorithm", 304, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("samplingPacketInterval", 305, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingPacketSpace", 306, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingTimeInterval", 307, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingTimeSpace", 308, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingSize", 309, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingPopulation", 310, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("samplingProbability", 311, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("dataLinkFrameSize", 312, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("ipHeaderPacketSection", 313, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("ipPayloadPacketSection", 314, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("dataLinkFrameSection", 315, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsLabelStackSection", 316, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mplsPayloadPacketSection", 317, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("selectorIdTotalPktsObserved", 318, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("selectorIdTotalPktsSelected", 319, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("absoluteError", 320, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("relativeError", 321, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationTimeSeconds", 322, 14, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("observationTimeMilliseconds", 323, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationTimeMicroseconds", 324, 16, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("observationTimeNanoseconds", 325, 17, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("digestHashValue", 326, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashIPPayloadOffset", 327, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashIPPayloadSize", 328, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashOutputRangeMin", 329, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashOutputRangeMax", 330, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashSelectedRangeMin", 331, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashSelectedRangeMax", 332, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashDigestOutput", 333, 11, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("hashInitialiserValue", 334, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("selectorName", 335, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("upperCILimit", 336, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("lowerCILimit", 337, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("confidenceLevel", 338, 10, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("informationElementDataType", 339, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("informationElementDescription", 340, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("informationElementName", 341, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("informationElementRangeBegin", 342, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("informationElementRangeEnd", 343, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("informationElementSemantics", 344, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("informationElementUnits", 345, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("privateEnterpriseNumber", 346, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("virtualStationInterfaceId", 347, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("virtualStationInterfaceName", 348, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("virtualStationUUID", 349, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("virtualStationName", 350, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("layer2SegmentId", 351, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2OctetDeltaCount", 352, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2OctetTotalCount", 353, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ingressUnicastPacketTotalCount", 354, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ingressMulticastPacketTotalCount", 355, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ingressBroadcastPacketTotalCount", 356, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("egressUnicastPacketTotalCount", 357, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("egressBroadcastPacketTotalCount", 358, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("monitoringIntervalStartMilliSeconds", 359, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("monitoringIntervalEndMilliSeconds", 360, 15, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("portRangeStart", 361, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("portRangeEnd", 362, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("portRangeStepSize", 363, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("portRangeNumPorts", 364, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("staMacAddress", 365, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("staIPv4Address", 366, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("wtpMacAddress", 367, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("ingressInterfaceType", 368, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("egressInterfaceType", 369, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("rtpSequenceNumber", 370, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("userName", 371, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("applicationCategoryName", 372, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("applicationSubCategoryName", 373, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("applicationGroupName", 374, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("originalFlowsPresent", 375, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("originalFlowsInitiated", 376, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("originalFlowsCompleted", 377, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfSourceIPAddress", 378, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfDestinationIPAddress", 379, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfSourceIPv4Address", 380, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfDestinationIPv4Address", 381, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfSourceIPv6Address", 382, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("distinctCountOfDestinationIPv6Address", 383, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("valueDistributionMethod", 384, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("rfc3550JitterMilliseconds", 385, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("rfc3550JitterMicroseconds", 386, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("rfc3550JitterNanoseconds", 387, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("dot1qDEI", 388, 11, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("dot1qCustomerDEI", 389, 11, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("flowSelectorAlgorithm", 390, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("flowSelectedOctetDeltaCount", 391, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowSelectedPacketDeltaCount", 392, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowSelectedFlowDeltaCount", 393, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("selectorIDTotalFlowsObserved", 394, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("selectorIDTotalFlowsSelected", 395, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("samplingFlowInterval", 396, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("samplingFlowSpacing", 397, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowSamplingTimeInterval", 398, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("flowSamplingTimeSpacing", 399, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("hashFlowDomain", 400, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("transportOctetDeltaCount", 401, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("transportPacketDeltaCount", 402, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("originalExporterIPv4Address", 403, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("originalExporterIPv6Address", 404, 19, 0, 16), 0) + registerInfoElement(*entities.NewInfoElement("originalObservationDomainId", 405, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("intermediateProcessId", 406, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("ignoredDataRecordTotalCount", 407, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("dataLinkFrameType", 408, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("sectionOffset", 409, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("sectionExportedOctets", 410, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("dot1qServiceInstanceTag", 411, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("dot1qServiceInstanceId", 412, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("dot1qServiceInstancePriority", 413, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("dot1qCustomerSourceMacAddress", 414, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("dot1qCustomerDestinationMacAddress", 415, 12, 0, 6), 0) + registerInfoElement(*entities.NewInfoElement("", 416, 255, 0, 0), 0) + registerInfoElement(*entities.NewInfoElement("postLayer2OctetDeltaCount", 417, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postMCastLayer2OctetDeltaCount", 418, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("", 419, 255, 0, 0), 0) + registerInfoElement(*entities.NewInfoElement("postLayer2OctetTotalCount", 420, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("postMCastLayer2OctetTotalCount", 421, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("minimumLayer2TotalLength", 422, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("maximumLayer2TotalLength", 423, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("droppedLayer2OctetDeltaCount", 424, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("droppedLayer2OctetTotalCount", 425, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("ignoredLayer2OctetTotalCount", 426, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("notSentLayer2OctetTotalCount", 427, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2OctetDeltaSumOfSquares", 428, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2OctetTotalSumOfSquares", 429, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2FrameDeltaCount", 430, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("layer2FrameTotalCount", 431, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("pseudoWireDestinationIPv4Address", 432, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("ignoredLayer2FrameTotalCount", 433, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueInteger", 434, 7, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueOctetString", 435, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueOID", 436, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueBits", 437, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueIPAddress", 438, 18, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueCounter", 439, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueGauge", 440, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueTimeTicks", 441, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueUnsigned", 442, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueTable", 443, 21, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectValueRow", 444, 21, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectIdentifier", 445, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibSubIdentifier", 446, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("mibIndexIndicator", 447, 4, 0, 8), 0) + registerInfoElement(*entities.NewInfoElement("mibCaptureTimeSemantics", 448, 1, 0, 1), 0) + registerInfoElement(*entities.NewInfoElement("mibContextEngineID", 449, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibContextName", 450, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectName", 451, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectDescription", 452, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibObjectSyntax", 453, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mibModuleName", 454, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mobileIMSI", 455, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("mobileMSISDN", 456, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpStatusCode", 457, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("sourceTransportPortsLimit", 458, 2, 0, 2), 0) + registerInfoElement(*entities.NewInfoElement("httpRequestMethod", 459, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpRequestHost", 460, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpRequestTarget", 461, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpMessageVersion", 462, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("natInstanceID", 463, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("internalAddressRealm", 464, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("externalAddressRealm", 465, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("natQuotaExceededEvent", 466, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("natThresholdEvent", 467, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("httpUserAgent", 468, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpContentType", 469, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("httpReasonPhrase", 470, 13, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("maxSessionEntries", 471, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("maxBIBEntries", 472, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("maxEntriesPerUser", 473, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("maxSubscribers", 474, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("maxFragmentsPendingReassembly", 475, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("addressPoolHighThreshold", 476, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("addressPoolLowThreshold", 477, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("addressPortMappingHighThreshold", 478, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("addressPortMappingLowThreshold", 479, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("addressPortMappingPerUserHighThreshold", 480, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("globalAddressMappingHighThreshold", 481, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("vpnIdentifier", 482, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpCommunity", 483, 3, 0, 4), 0) + registerInfoElement(*entities.NewInfoElement("bgpSourceCommunityList", 484, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpDestinationCommunityList", 485, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpExtendedCommunity", 486, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpSourceExtendedCommunityList", 487, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpDestinationExtendedCommunityList", 488, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpLargeCommunity", 489, 0, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpSourceLargeCommunityList", 490, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("bgpDestinationLargeCommunityList", 491, 20, 0, 65535), 0) + registerInfoElement(*entities.NewInfoElement("Unassigned", 0, 255, 0, 0), 0) +} diff --git a/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.csv b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.csv new file mode 100644 index 000000000..725c436fb --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.csv @@ -0,0 +1,59 @@ +ElementID,Name,Abstract Data Type,Data Type Semantics,Status,Description,Units,Range,References,Requester,Revision,Date,Enterprise ID, +100,sourcePodNamespace,string,,current,,,,,,,,56506, +101,sourcePodName,string,,current,,,,,,,,56506, +102,destinationPodNamespace,string,,current,,,,,,,,56506, +103,destinationPodName,string,,current,,,,,,,,56506, +104,sourceNodeName,string,,current,,,,,,,,56506, +105,destinationNodeName,string,,current,,,,,,,,56506, +106,destinationClusterIPv4,ipv4Address,,current,,,,,,,,56506, +107,destinationClusterIPv6,ipv6Address,,current,,,,,,,,56506, +108,destinationServicePort,unsigned16,,current,,,,,,,,56506, +109,destinationServicePortName,string,,current,,,,,,,,56506, +110,ingressNetworkPolicyName,string,,current,,,,,,,,56506, +111,ingressNetworkPolicyNamespace,string,,current,,,,,,,,56506, +112,egressNetworkPolicyName,string,,current,,,,,,,,56506, +113,egressNetworkPolicyNamespace,string,,current,,,,,,,,56506, +114,ingressNetworkPolicyUID,string,,current,,,,,,,,56506, +115,ingressNetworkPolicyType,unsigned8,,current,Supported Actions(uint8 value): NetworkPolicyTypeK8sNetworkPolicy(1) NetworkPolicyTypeAntreaNetworkPolicy(2) NetworkPolicyTypeAntreaClusterNetworkPolicy(3),,,,,,,56506, +116,ingressNetworkPolicyRulePriority,signed32,,current,Only applicable to Antrea Network Policy and Antrea Cluster Network Policy,,,,,,,56506, +117,egressNetworkPolicyUID,string,,current,,,,,,,,56506, +118,egressNetworkPolicyType,unsigned8,,current,Supported Actions(uint8 value): NetworkPolicyTypeK8sNetworkPolicy(1) NetworkPolicyTypeAntreaNetworkPolicy(2) NetworkPolicyTypeAntreaClusterNetworkPolicy(3),,,,,,,56506, +119,egressNetworkPolicyRulePriority,signed32,,current,Only applicable to Antrea Network Policy and Antrea Cluster Network Policy,,,,,,,56506, +120,packetTotalCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +121,octetTotalCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +122,packetDeltaCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +123,octetDeltaCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +124,reversePacketTotalCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +125,reverseOctetTotalCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +126,reversePacketDeltaCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +127,reverseOctetDeltaCountFromSourceNode,unsigned64,,current,,,,,,,,56506, +128,packetTotalCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +129,octetTotalCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +130,packetDeltaCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +131,octetDeltaCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +132,reversePacketTotalCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +133,reverseOctetTotalCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +134,reversePacketDeltaCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +135,reverseOctetDeltaCountFromDestinationNode,unsigned64,,current,,,,,,,,56506, +136,tcpState,string,,current,,,,,,,,56506, +137,flowType,unsigned8,,current,The type of flow is based on the location of the source and destination Pods. Supported Types(uint8 value): FlowTypeIntraNode(1) FlowTypeInterNode(2) FlowTypeToExternal(3) and FlowTypeFromExternal(4),,,,,,,56506, +138,tcpStatePrevList,string,,current,,,,,,,,56506, +139,ingressNetworkPolicyRuleAction,unsigned8,,current,Supported Actions(uint8 value): NetworkPolicyRuleActionNoAction(0) NetworkPolicyRuleActionAllow(1) NetworkPolicyRuleActionDrop(2) NetworkPolicyRuleActionReject(3),,,,,,,56506, +140,egressNetworkPolicyRuleAction,unsigned8,,current,Supported Actions(uint8 value): NetworkPolicyRuleActionAllow(1) NetworkPolicyRuleActionDrop(2) NetworkPolicyRuleActionReject(3),,,,,,,56506, +141,ingressNetworkPolicyRuleName,string,,current,,,,,,,,56506, +142,egressNetworkPolicyRuleName,string,,current,,,,,,,,56506, +143,sourcePodLabels,string,,current,,,,,,,,56506, +144,destinationPodLabels,string,,current,,,,,,,,56506, +145,throughput,unsigned64,,current,,,,,,,,56506, +146,reverseThroughput,unsigned64,,current,,,,,,,,56506, +147,throughputFromSourceNode,unsigned64,,current,,,,,,,,56506, +148,throughputFromDestinationNode,unsigned64,,current,,,,,,,,56506, +149,reverseThroughputFromSourceNode,unsigned64,,current,,,,,,,,56506, +150,reverseThroughputFromDestinationNode,unsigned64,,current,,,,,,,,56506, +151,flowEndSecondsFromSourceNode,dateTimeSeconds,,current,,,,,,,,56506, +152,flowEndSecondsFromDestinationNode,dateTimeSeconds,,current,,,,,,,,56506, +153,egressName,string,,current,,,,,,,,56506, +154,egressIP,string,,current,,,,,,,,56506, +155,appProtocolName,string,,current,,,,,,,,56506, +156,httpVals,string,,current,,,,,,,,56506, +157,egressNodeName,string,,current,,,,,,,,56506, diff --git a/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.go b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.go new file mode 100644 index 000000000..b180427dc --- /dev/null +++ b/vendor/github.com/vmware/go-ipfix/pkg/registry/registry_antrea.go @@ -0,0 +1,82 @@ +// Copyright 2021 VMware, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry + +import ( + "github.com/vmware/go-ipfix/pkg/entities" +) + +// AUTO GENERATED, DO NOT CHANGE + +func loadAntreaRegistry() { + registerInfoElement(*entities.NewInfoElement("sourcePodNamespace", 100, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("sourcePodName", 101, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("destinationPodNamespace", 102, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("destinationPodName", 103, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("sourceNodeName", 104, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("destinationNodeName", 105, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("destinationClusterIPv4", 106, 18, 56506, 4), 56506) + registerInfoElement(*entities.NewInfoElement("destinationClusterIPv6", 107, 19, 56506, 16), 56506) + registerInfoElement(*entities.NewInfoElement("destinationServicePort", 108, 2, 56506, 2), 56506) + registerInfoElement(*entities.NewInfoElement("destinationServicePortName", 109, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyName", 110, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyNamespace", 111, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyName", 112, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyNamespace", 113, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyUID", 114, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyType", 115, 1, 56506, 1), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyRulePriority", 116, 7, 56506, 4), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyUID", 117, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyType", 118, 1, 56506, 1), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyRulePriority", 119, 7, 56506, 4), 56506) + registerInfoElement(*entities.NewInfoElement("packetTotalCountFromSourceNode", 120, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("octetTotalCountFromSourceNode", 121, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("packetDeltaCountFromSourceNode", 122, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("octetDeltaCountFromSourceNode", 123, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reversePacketTotalCountFromSourceNode", 124, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseOctetTotalCountFromSourceNode", 125, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reversePacketDeltaCountFromSourceNode", 126, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseOctetDeltaCountFromSourceNode", 127, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("packetTotalCountFromDestinationNode", 128, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("octetTotalCountFromDestinationNode", 129, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("packetDeltaCountFromDestinationNode", 130, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("octetDeltaCountFromDestinationNode", 131, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reversePacketTotalCountFromDestinationNode", 132, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseOctetTotalCountFromDestinationNode", 133, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reversePacketDeltaCountFromDestinationNode", 134, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseOctetDeltaCountFromDestinationNode", 135, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("tcpState", 136, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("flowType", 137, 1, 56506, 1), 56506) + registerInfoElement(*entities.NewInfoElement("tcpStatePrevList", 138, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyRuleAction", 139, 1, 56506, 1), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyRuleAction", 140, 1, 56506, 1), 56506) + registerInfoElement(*entities.NewInfoElement("ingressNetworkPolicyRuleName", 141, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressNetworkPolicyRuleName", 142, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("sourcePodLabels", 143, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("destinationPodLabels", 144, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("throughput", 145, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseThroughput", 146, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("throughputFromSourceNode", 147, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("throughputFromDestinationNode", 148, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseThroughputFromSourceNode", 149, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("reverseThroughputFromDestinationNode", 150, 4, 56506, 8), 56506) + registerInfoElement(*entities.NewInfoElement("flowEndSecondsFromSourceNode", 151, 14, 56506, 4), 56506) + registerInfoElement(*entities.NewInfoElement("flowEndSecondsFromDestinationNode", 152, 14, 56506, 4), 56506) + registerInfoElement(*entities.NewInfoElement("egressName", 153, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressIP", 154, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("appProtocolName", 155, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("httpVals", 156, 13, 56506, 65535), 56506) + registerInfoElement(*entities.NewInfoElement("egressNodeName", 157, 13, 56506, 65535), 56506) +} diff --git a/vendor/github.com/xdg-go/pbkdf2/.gitignore b/vendor/github.com/xdg-go/pbkdf2/.gitignore new file mode 100644 index 000000000..f1c181ec9 --- /dev/null +++ b/vendor/github.com/xdg-go/pbkdf2/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/xdg-go/pbkdf2/LICENSE b/vendor/github.com/xdg-go/pbkdf2/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/xdg-go/pbkdf2/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/pbkdf2/README.md b/vendor/github.com/xdg-go/pbkdf2/README.md new file mode 100644 index 000000000..d2824e456 --- /dev/null +++ b/vendor/github.com/xdg-go/pbkdf2/README.md @@ -0,0 +1,17 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/pbkdf2.svg)](https://pkg.go.dev/github.com/xdg-go/pbkdf2) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/pbkdf2)](https://goreportcard.com/report/github.com/xdg-go/pbkdf2) +[![Github Actions](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/pbkdf2/actions/workflows/test.yml) + +# pbkdf2 – Go implementation of PBKDF2 + +## Description + +Package pbkdf2 provides password-based key derivation based on +[RFC 8018](https://tools.ietf.org/html/rfc8018). + +## Copyright and License + +Copyright 2021 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go b/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go new file mode 100644 index 000000000..029945ca4 --- /dev/null +++ b/vendor/github.com/xdg-go/pbkdf2/pbkdf2.go @@ -0,0 +1,76 @@ +// Copyright 2021 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package pbkdf2 implements password-based key derivation using the PBKDF2 +// algorithm described in RFC 2898 and RFC 8018. +// +// It provides a drop-in replacement for `golang.org/x/crypto/pbkdf2`, with +// the following benefits: +// +// - Released as a module with semantic versioning +// +// - Does not pull in dependencies for unrelated `x/crypto/*` packages +// +// - Supports Go 1.9+ +// +// See https://tools.ietf.org/html/rfc8018#section-4 for security considerations +// in the selection of a salt and iteration count. +package pbkdf2 + +import ( + "crypto/hmac" + "encoding/binary" + "hash" +) + +// Key generates a derived key from a password using the PBKDF2 algorithm. The +// inputs include salt bytes, the iteration count, desired key length, and a +// constructor for a hashing function. For example, for a 32-byte key using +// SHA-256: +// +// key := Key([]byte("trustNo1"), salt, 10000, 32, sha256.New) +func Key(password, salt []byte, iterCount, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hLen := prf.Size() + numBlocks := keyLen / hLen + // Get an extra block if keyLen is not an even number of hLen blocks. + if keyLen%hLen > 0 { + numBlocks++ + } + + Ti := make([]byte, hLen) + Uj := make([]byte, hLen) + dk := make([]byte, 0, hLen*numBlocks) + buf := make([]byte, 4) + + for i := uint32(1); i <= uint32(numBlocks); i++ { + // Initialize Uj for j == 1 from salt and block index. + // Initialize Ti = U1. + binary.BigEndian.PutUint32(buf, i) + prf.Reset() + prf.Write(salt) + prf.Write(buf) + Uj = Uj[:0] + Uj = prf.Sum(Uj) + + // Ti = U1 ^ U2 ^ ... ^ Ux + copy(Ti, Uj) + for j := 2; j <= iterCount; j++ { + prf.Reset() + prf.Write(Uj) + Uj = Uj[:0] + Uj = prf.Sum(Uj) + for k := range Uj { + Ti[k] ^= Uj[k] + } + } + + // DK = concat(T1, T2, ... Tn) + dk = append(dk, Ti...) + } + + return dk[0:keyLen] +} diff --git a/vendor/github.com/xdg-go/scram/.gitignore b/vendor/github.com/xdg-go/scram/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/xdg-go/scram/CHANGELOG.md b/vendor/github.com/xdg-go/scram/CHANGELOG.md new file mode 100644 index 000000000..b833be5e2 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/CHANGELOG.md @@ -0,0 +1,26 @@ +# CHANGELOG + +## v1.1.2 - 2022-12-07 + +- Bump stringprep dependency to v1.0.4 for upstream CVE fix. + +## v1.1.1 - 2022-03-03 + +- Bump stringprep dependency to v1.0.3 for upstream CVE fix. + +## v1.1.0 - 2022-01-16 + +- Add SHA-512 hash generator function for convenience. + +## v1.0.2 - 2021-03-28 + +- Switch PBKDF2 dependency to github.com/xdg-go/pbkdf2 to + minimize transitive dependencies and support Go 1.9+. + +## v1.0.1 - 2021-03-27 + +- Bump stringprep dependency to v1.0.2 for Go 1.11 support. + +## v1.0.0 - 2021-03-27 + +- First release as a Go module diff --git a/vendor/github.com/xdg-go/scram/LICENSE b/vendor/github.com/xdg-go/scram/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/scram/README.md b/vendor/github.com/xdg-go/scram/README.md new file mode 100644 index 000000000..3a46f5ceb --- /dev/null +++ b/vendor/github.com/xdg-go/scram/README.md @@ -0,0 +1,72 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/scram.svg)](https://pkg.go.dev/github.com/xdg-go/scram) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/scram)](https://goreportcard.com/report/github.com/xdg-go/scram) +[![Github Actions](https://github.com/xdg-go/scram/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/scram/actions/workflows/test.yml) + +# scram – Go implementation of RFC-5802 + +## Description + +Package scram provides client and server implementations of the Salted +Challenge Response Authentication Mechanism (SCRAM) described in +[RFC-5802](https://tools.ietf.org/html/rfc5802) and +[RFC-7677](https://tools.ietf.org/html/rfc7677). + +It includes both client and server side support. + +Channel binding and extensions are not (yet) supported. + +## Examples + +### Client side + + package main + + import "github.com/xdg-go/scram" + + func main() { + // Get Client with username, password and (optional) authorization ID. + clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "") + if err != nil { + panic(err) + } + + // Prepare the authentication conversation. Use the empty string as the + // initial server message argument to start the conversation. + conv := clientSHA1.NewConversation() + var serverMsg string + + // Get the first message, send it and read the response. + firstMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(firstMsg) + + // Get the second message, send it, and read the response. + secondMsg, err := conv.Step(serverMsg) + if err != nil { + panic(err) + } + serverMsg = sendClientMsg(secondMsg) + + // Validate the server's final message. We have no further message to + // send so ignore that return value. + _, err = conv.Step(serverMsg) + if err != nil { + panic(err) + } + + return + } + + func sendClientMsg(s string) string { + // A real implementation would send this to a server and read a reply. + return "" + } + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/scram/client.go b/vendor/github.com/xdg-go/scram/client.go new file mode 100644 index 000000000..5b53021b3 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/client.go @@ -0,0 +1,130 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "sync" + + "github.com/xdg-go/pbkdf2" +) + +// Client implements the client side of SCRAM authentication. It holds +// configuration values needed to initialize new client-side conversations for +// a specific username, password and authorization ID tuple. Client caches +// the computationally-expensive parts of a SCRAM conversation as described in +// RFC-5802. If repeated authentication conversations may be required for a +// user (e.g. disconnect/reconnect), the user's Client should be preserved. +// +// For security reasons, Clients have a default minimum PBKDF2 iteration count +// of 4096. If a server requests a smaller iteration count, an authentication +// conversation will error. +// +// A Client can also be used by a server application to construct the hashed +// authentication values to be stored for a new user. See StoredCredentials() +// for more. +type Client struct { + sync.RWMutex + username string + password string + authzID string + minIters int + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + cache map[KeyFactors]derivedKeys +} + +func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client { + return &Client{ + username: username, + password: password, + authzID: authzID, + minIters: 4096, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + cache: make(map[KeyFactors]derivedKeys), + } +} + +// WithMinIterations changes minimum required PBKDF2 iteration count. +func (c *Client) WithMinIterations(n int) *Client { + c.Lock() + defer c.Unlock() + c.minIters = n + return c +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client { + c.Lock() + defer c.Unlock() + c.nonceGen = ng + return c +} + +// NewConversation constructs a client-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (c *Client) NewConversation() *ClientConversation { + c.RLock() + defer c.RUnlock() + return &ClientConversation{ + client: c, + nonceGen: c.nonceGen, + hashGen: c.hashGen, + minIters: c.minIters, + } +} + +func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys { + dk, ok := c.getCache(kf) + if !ok { + dk = c.computeKeys(kf) + c.setCache(kf, dk) + } + return dk +} + +// GetStoredCredentials takes a salt and iteration count structure and +// provides the values that must be stored by a server to authentication a +// user. These values are what the Server credential lookup function must +// return for a given username. +func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials { + dk := c.getDerivedKeys(kf) + return StoredCredentials{ + KeyFactors: kf, + StoredKey: dk.StoredKey, + ServerKey: dk.ServerKey, + } +} + +func (c *Client) computeKeys(kf KeyFactors) derivedKeys { + h := c.hashGen() + saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen) + clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key")) + + return derivedKeys{ + ClientKey: clientKey, + StoredKey: computeHash(c.hashGen, clientKey), + ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")), + } +} + +func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) { + c.RLock() + defer c.RUnlock() + dk, ok := c.cache[kf] + return dk, ok +} + +func (c *Client) setCache(kf KeyFactors, dk derivedKeys) { + c.Lock() + defer c.Unlock() + c.cache[kf] = dk + return +} diff --git a/vendor/github.com/xdg-go/scram/client_conv.go b/vendor/github.com/xdg-go/scram/client_conv.go new file mode 100644 index 000000000..834056889 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/client_conv.go @@ -0,0 +1,149 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" + "strings" +) + +type clientState int + +const ( + clientStarting clientState = iota + clientFirst + clientFinal + clientDone +) + +// ClientConversation implements the client-side of an authentication +// conversation with a server. A new conversation must be created for +// each authentication attempt. +type ClientConversation struct { + client *Client + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + minIters int + state clientState + valid bool + gs2 string + nonce string + c1b string + serveSig []byte +} + +// Step takes a string provided from a server (or just an empty string for the +// very first conversation step) and attempts to move the authentication +// conversation forward. It returns a string to be sent to the server or an +// error if the server message is invalid. Calling Step after a conversation +// completes is also an error. +func (cc *ClientConversation) Step(challenge string) (response string, err error) { + switch cc.state { + case clientStarting: + cc.state = clientFirst + response, err = cc.firstMsg() + case clientFirst: + cc.state = clientFinal + response, err = cc.finalMsg(challenge) + case clientFinal: + cc.state = clientDone + response, err = cc.validateServer(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (cc *ClientConversation) Done() bool { + return cc.state == clientDone +} + +// Valid returns true if the conversation successfully authenticated with the +// server, including counter-validation that the server actually has the +// user's stored credentials. +func (cc *ClientConversation) Valid() bool { + return cc.valid +} + +func (cc *ClientConversation) firstMsg() (string, error) { + // Values are cached for use in final message parameters + cc.gs2 = cc.gs2Header() + cc.nonce = cc.client.nonceGen() + cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce) + + return cc.gs2 + cc.c1b, nil +} + +func (cc *ClientConversation) finalMsg(s1 string) (string, error) { + msg, err := parseServerFirst(s1) + if err != nil { + return "", err + } + + // Check nonce prefix and update + if !strings.HasPrefix(msg.nonce, cc.nonce) { + return "", errors.New("server nonce did not extend client nonce") + } + cc.nonce = msg.nonce + + // Check iteration count vs minimum + if msg.iters < cc.minIters { + return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters) + } + + // Create client-final-message-without-proof + c2wop := fmt.Sprintf( + "c=%s,r=%s", + base64.StdEncoding.EncodeToString([]byte(cc.gs2)), + cc.nonce, + ) + + // Create auth message + authMsg := cc.c1b + "," + s1 + "," + c2wop + + // Get derived keys from client cache + dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters}) + + // Create proof as clientkey XOR clientsignature + clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg)) + clientProof := xorBytes(dk.ClientKey, clientSignature) + proof := base64.StdEncoding.EncodeToString(clientProof) + + // Cache ServerSignature for later validation + cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg)) + + return fmt.Sprintf("%s,p=%s", c2wop, proof), nil +} + +func (cc *ClientConversation) validateServer(s2 string) (string, error) { + msg, err := parseServerFinal(s2) + if err != nil { + return "", err + } + + if len(msg.err) > 0 { + return "", fmt.Errorf("server error: %s", msg.err) + } + + if !hmac.Equal(msg.verifier, cc.serveSig) { + return "", errors.New("server validation failed") + } + + cc.valid = true + return "", nil +} + +func (cc *ClientConversation) gs2Header() string { + if cc.client.authzID == "" { + return "n,," + } + return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID)) +} diff --git a/vendor/github.com/xdg-go/scram/common.go b/vendor/github.com/xdg-go/scram/common.go new file mode 100644 index 000000000..cb705cb74 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/common.go @@ -0,0 +1,97 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "strings" +) + +// NonceGeneratorFcn defines a function that returns a string of high-quality +// random printable ASCII characters EXCLUDING the comma (',') character. The +// default nonce generator provides Base64 encoding of 24 bytes from +// crypto/rand. +type NonceGeneratorFcn func() string + +// derivedKeys collects the three cryptographically derived values +// into one struct for caching. +type derivedKeys struct { + ClientKey []byte + StoredKey []byte + ServerKey []byte +} + +// KeyFactors represent the two server-provided factors needed to compute +// client credentials for authentication. Salt is decoded bytes (i.e. not +// base64), but in string form so that KeyFactors can be used as a map key for +// cached credentials. +type KeyFactors struct { + Salt string + Iters int +} + +// StoredCredentials are the values that a server must store for a given +// username to allow authentication. They include the salt and iteration +// count, plus the derived values to authenticate a client and for the server +// to authenticate itself back to the client. +// +// NOTE: these are specific to a given hash function. To allow a user to +// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of +// StoredCredentials must be created and stored, one for each hash function. +type StoredCredentials struct { + KeyFactors + StoredKey []byte + ServerKey []byte +} + +// CredentialLookup is a callback to provide StoredCredentials for a given +// username. This is used to configure Server objects. +// +// NOTE: these are specific to a given hash function. The callback provided +// to a Server with a given hash function must provide the corresponding +// StoredCredentials. +type CredentialLookup func(string) (StoredCredentials, error) + +func defaultNonceGenerator() string { + raw := make([]byte, 24) + nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw))) + rand.Read(raw) + base64.StdEncoding.Encode(nonce, raw) + return string(nonce) +} + +func encodeName(s string) string { + return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1) +} + +func decodeName(s string) (string, error) { + // TODO Check for = not followed by 2C or 3D + return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil +} + +func computeHash(hg HashGeneratorFcn, b []byte) []byte { + h := hg() + h.Write(b) + return h.Sum(nil) +} + +func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte { + mac := hmac.New(hg, key) + mac.Write(data) + return mac.Sum(nil) +} + +func xorBytes(a, b []byte) []byte { + // TODO check a & b are same length, or just xor to smallest + xor := make([]byte, len(a)) + for i := range a { + xor[i] = a[i] ^ b[i] + } + return xor +} diff --git a/vendor/github.com/xdg-go/scram/doc.go b/vendor/github.com/xdg-go/scram/doc.go new file mode 100644 index 000000000..82e8aeed8 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/doc.go @@ -0,0 +1,26 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package scram provides client and server implementations of the Salted +// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802 +// and RFC-7677. +// +// Usage +// +// The scram package provides variables, `SHA1`, `SHA256`, and `SHA512`, that +// are used to construct Client or Server objects. +// +// clientSHA1, err := scram.SHA1.NewClient(username, password, authID) +// clientSHA256, err := scram.SHA256.NewClient(username, password, authID) +// clientSHA512, err := scram.SHA512.NewClient(username, password, authID) +// +// serverSHA1, err := scram.SHA1.NewServer(credentialLookupFcn) +// serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn) +// serverSHA512, err := scram.SHA512.NewServer(credentialLookupFcn) +// +// These objects are used to construct ClientConversation or +// ServerConversation objects that are used to carry out authentication. +package scram diff --git a/vendor/github.com/xdg-go/scram/parse.go b/vendor/github.com/xdg-go/scram/parse.go new file mode 100644 index 000000000..722f6043d --- /dev/null +++ b/vendor/github.com/xdg-go/scram/parse.go @@ -0,0 +1,205 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" +) + +type c1Msg struct { + gs2Header string + authzID string + username string + nonce string + c1b string +} + +type c2Msg struct { + cbind []byte + nonce string + proof []byte + c2wop string +} + +type s1Msg struct { + nonce string + salt []byte + iters int +} + +type s2Msg struct { + verifier []byte + err string +} + +func parseField(s, k string) (string, error) { + t := strings.TrimPrefix(s, k+"=") + if t == s { + return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k) + } + return t, nil +} + +func parseGS2Flag(s string) (string, error) { + if s[0] == 'p' { + return "", fmt.Errorf("channel binding requested but not supported") + } + + if s == "n" || s == "y" { + return s, nil + } + + return "", fmt.Errorf("error parsing '%s' for gs2 flag", s) +} + +func parseFieldBase64(s, k string) ([]byte, error) { + raw, err := parseField(s, k) + if err != nil { + return nil, err + } + + dec, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + return dec, nil +} + +func parseFieldInt(s, k string) (int, error) { + raw, err := parseField(s, k) + if err != nil { + return 0, err + } + + num, err := strconv.Atoi(raw) + if err != nil { + return 0, fmt.Errorf("error parsing field '%s': %v", k, err) + } + + return num, nil +} + +func parseClientFirst(c1 string) (msg c1Msg, err error) { + + fields := strings.Split(c1, ",") + if len(fields) < 4 { + err = errors.New("not enough fields in first server message") + return + } + + gs2flag, err := parseGS2Flag(fields[0]) + if err != nil { + return + } + + // 'a' field is optional + if len(fields[1]) > 0 { + msg.authzID, err = parseField(fields[1], "a") + if err != nil { + return + } + } + + // Recombine and save the gs2 header + msg.gs2Header = gs2flag + "," + msg.authzID + "," + + // Check for unsupported extensions field "m". + if strings.HasPrefix(fields[2], "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + msg.username, err = parseField(fields[2], "n") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[3], "r") + if err != nil { + return + } + + msg.c1b = strings.Join(fields[2:], ",") + + return +} + +func parseClientFinal(c2 string) (msg c2Msg, err error) { + fields := strings.Split(c2, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.cbind, err = parseFieldBase64(fields[0], "c") + if err != nil { + return + } + + msg.nonce, err = parseField(fields[1], "r") + if err != nil { + return + } + + // Extension fields may come between nonce and proof, so we + // grab the *last* fields as proof. + msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p") + if err != nil { + return + } + + msg.c2wop = c2[:strings.LastIndex(c2, ",")] + + return +} + +func parseServerFirst(s1 string) (msg s1Msg, err error) { + + // Check for unsupported extensions field "m". + if strings.HasPrefix(s1, "m=") { + err = errors.New("SCRAM message extensions are not supported") + return + } + + fields := strings.Split(s1, ",") + if len(fields) < 3 { + err = errors.New("not enough fields in first server message") + return + } + + msg.nonce, err = parseField(fields[0], "r") + if err != nil { + return + } + + msg.salt, err = parseFieldBase64(fields[1], "s") + if err != nil { + return + } + + msg.iters, err = parseFieldInt(fields[2], "i") + + return +} + +func parseServerFinal(s2 string) (msg s2Msg, err error) { + fields := strings.Split(s2, ",") + + msg.verifier, err = parseFieldBase64(fields[0], "v") + if err == nil { + return + } + + msg.err, err = parseField(fields[0], "e") + + return +} diff --git a/vendor/github.com/xdg-go/scram/scram.go b/vendor/github.com/xdg-go/scram/scram.go new file mode 100644 index 000000000..a7b366027 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/scram.go @@ -0,0 +1,71 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + "github.com/xdg-go/stringprep" +) + +// HashGeneratorFcn abstracts a factory function that returns a hash.Hash +// value to be used for SCRAM operations. Generally, one would use the +// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most +// common forms of SCRAM. +type HashGeneratorFcn func() hash.Hash + +// SHA1 is a function that returns a crypto/sha1 hasher and should be used to +// create Client objects configured for SHA-1 hashing. +var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() } + +// SHA256 is a function that returns a crypto/sha256 hasher and should be used +// to create Client objects configured for SHA-256 hashing. +var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() } + +// SHA512 is a function that returns a crypto/sha512 hasher and should be used +// to create Client objects configured for SHA-512 hashing. +var SHA512 HashGeneratorFcn = func() hash.Hash { return sha512.New() } + +// NewClient constructs a SCRAM client component based on a given hash.Hash +// factory receiver. This constructor will normalize the username, password +// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If +// SASLprep fails, the method returns an error. +func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) { + var userprep, passprep, authprep string + var err error + + if userprep, err = stringprep.SASLprep.Prepare(username); err != nil { + return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err) + } + if passprep, err = stringprep.SASLprep.Prepare(password); err != nil { + return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err) + } + if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil { + return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err) + } + + return newClient(userprep, passprep, authprep, f), nil +} + +// NewClientUnprepped acts like NewClient, except none of the arguments will +// be normalized via SASLprep. This is not generally recommended, but is +// provided for users that may have custom normalization needs. +func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) { + return newClient(username, password, authzID, f), nil +} + +// NewServer constructs a SCRAM server component based on a given hash.Hash +// factory receiver. To be maximally generic, it uses dependency injection to +// handle credential lookup, which is the process of turning a username string +// into a struct with stored credentials for authentication. +func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) { + return newServer(cl, f) +} diff --git a/vendor/github.com/xdg-go/scram/server.go b/vendor/github.com/xdg-go/scram/server.go new file mode 100644 index 000000000..b119b3615 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/server.go @@ -0,0 +1,50 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import "sync" + +// Server implements the server side of SCRAM authentication. It holds +// configuration values needed to initialize new server-side conversations. +// Generally, this can be persistent within an application. +type Server struct { + sync.RWMutex + credentialCB CredentialLookup + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn +} + +func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) { + return &Server{ + credentialCB: cl, + nonceGen: defaultNonceGenerator, + hashGen: fcn, + }, nil +} + +// WithNonceGenerator replaces the default nonce generator (base64 encoding of +// 24 bytes from crypto/rand) with a custom generator. This is provided for +// testing or for users with custom nonce requirements. +func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server { + s.Lock() + defer s.Unlock() + s.nonceGen = ng + return s +} + +// NewConversation constructs a server-side authentication conversation. +// Conversations cannot be reused, so this must be called for each new +// authentication attempt. +func (s *Server) NewConversation() *ServerConversation { + s.RLock() + defer s.RUnlock() + return &ServerConversation{ + nonceGen: s.nonceGen, + hashGen: s.hashGen, + credentialCB: s.credentialCB, + } +} diff --git a/vendor/github.com/xdg-go/scram/server_conv.go b/vendor/github.com/xdg-go/scram/server_conv.go new file mode 100644 index 000000000..9c8838c38 --- /dev/null +++ b/vendor/github.com/xdg-go/scram/server_conv.go @@ -0,0 +1,151 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package scram + +import ( + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" +) + +type serverState int + +const ( + serverFirst serverState = iota + serverFinal + serverDone +) + +// ServerConversation implements the server-side of an authentication +// conversation with a client. A new conversation must be created for +// each authentication attempt. +type ServerConversation struct { + nonceGen NonceGeneratorFcn + hashGen HashGeneratorFcn + credentialCB CredentialLookup + state serverState + credential StoredCredentials + valid bool + gs2Header string + username string + authzID string + nonce string + c1b string + s1 string +} + +// Step takes a string provided from a client and attempts to move the +// authentication conversation forward. It returns a string to be sent to the +// client or an error if the client message is invalid. Calling Step after a +// conversation completes is also an error. +func (sc *ServerConversation) Step(challenge string) (response string, err error) { + switch sc.state { + case serverFirst: + sc.state = serverFinal + response, err = sc.firstMsg(challenge) + case serverFinal: + sc.state = serverDone + response, err = sc.finalMsg(challenge) + default: + response, err = "", errors.New("Conversation already completed") + } + return +} + +// Done returns true if the conversation is completed or has errored. +func (sc *ServerConversation) Done() bool { + return sc.state == serverDone +} + +// Valid returns true if the conversation successfully authenticated the +// client. +func (sc *ServerConversation) Valid() bool { + return sc.valid +} + +// Username returns the client-provided username. This is valid to call +// if the first conversation Step() is successful. +func (sc *ServerConversation) Username() string { + return sc.username +} + +// AuthzID returns the (optional) client-provided authorization identity, if +// any. If one was not provided, it returns the empty string. This is valid +// to call if the first conversation Step() is successful. +func (sc *ServerConversation) AuthzID() string { + return sc.authzID +} + +func (sc *ServerConversation) firstMsg(c1 string) (string, error) { + msg, err := parseClientFirst(c1) + if err != nil { + sc.state = serverDone + return "", err + } + + sc.gs2Header = msg.gs2Header + sc.username = msg.username + sc.authzID = msg.authzID + + sc.credential, err = sc.credentialCB(msg.username) + if err != nil { + sc.state = serverDone + return "e=unknown-user", err + } + + sc.nonce = msg.nonce + sc.nonceGen() + sc.c1b = msg.c1b + sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d", + sc.nonce, + base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)), + sc.credential.Iters, + ) + + return sc.s1, nil +} + +// For errors, returns server error message as well as non-nil error. Callers +// can choose whether to send server error or not. +func (sc *ServerConversation) finalMsg(c2 string) (string, error) { + msg, err := parseClientFinal(c2) + if err != nil { + return "", err + } + + // Check channel binding matches what we expect; in this case, we expect + // just the gs2 header we received as we don't support channel binding + // with a data payload. If we add binding, we need to independently + // compute the header to match here. + if string(msg.cbind) != sc.gs2Header { + return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header) + } + + // Check nonce received matches what we sent + if msg.nonce != sc.nonce { + return "e=other-error", errors.New("nonce received did not match nonce sent") + } + + // Create auth message + authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop + + // Retrieve ClientKey from proof and verify it + clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg)) + clientKey := xorBytes([]byte(msg.proof), clientSignature) + storedKey := computeHash(sc.hashGen, clientKey) + + // Compare with constant-time function + if !hmac.Equal(storedKey, sc.credential.StoredKey) { + return "e=invalid-proof", errors.New("challenge proof invalid") + } + + sc.valid = true + + // Compute and return server verifier + serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg)) + return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil +} diff --git a/vendor/github.com/xdg-go/stringprep/.gitignore b/vendor/github.com/xdg-go/stringprep/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/xdg-go/stringprep/CHANGELOG.md b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md new file mode 100644 index 000000000..04b9753cd --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/CHANGELOG.md @@ -0,0 +1,36 @@ +# CHANGELOG + + +## [v1.0.4] - 2022-12-07 + +### Maintenance + +- Bump golang.org/x/text to v0.3.8 due to CVE-2022-32149 + + +## [v1.0.3] - 2022-03-01 + +### Maintenance + +- Bump golang.org/x/text to v0.3.7 due to CVE-2021-38561 + + +## [v1.0.2] - 2021-03-27 + +### Maintenance + +- Change minimum Go version to 1.11 + + +## [v1.0.1] - 2021-03-24 + +### Bug Fixes + +- Add go.mod file + + +## [v1.0.0] - 2018-02-21 + +[v1.0.2]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.2 +[v1.0.1]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.1 +[v1.0.0]: https://github.com/xdg-go/stringprep/releases/tag/v1.0.0 diff --git a/vendor/github.com/xdg-go/stringprep/LICENSE b/vendor/github.com/xdg-go/stringprep/LICENSE new file mode 100644 index 000000000..67db85882 --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/xdg-go/stringprep/README.md b/vendor/github.com/xdg-go/stringprep/README.md new file mode 100644 index 000000000..83ea5346d --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/README.md @@ -0,0 +1,28 @@ +[![Go Reference](https://pkg.go.dev/badge/github.com/xdg-go/stringprep.svg)](https://pkg.go.dev/github.com/xdg-go/stringprep) +[![Go Report Card](https://goreportcard.com/badge/github.com/xdg-go/stringprep)](https://goreportcard.com/report/github.com/xdg-go/stringprep) +[![Github Actions](https://github.com/xdg-go/stringprep/actions/workflows/test.yml/badge.svg)](https://github.com/xdg-go/stringprep/actions/workflows/test.yml) + +# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep + +## Synopsis + +``` + import "github.com/xdg-go/stringprep" + + prepped := stringprep.SASLprep.Prepare("TrustNô1") + +``` + +## Description + +This library provides an implementation of the stringprep algorithm +(RFC-3454) in Go, including all data tables. + +A pre-built SASLprep (RFC-4013) profile is provided as well. + +## Copyright and License + +Copyright 2018 by David A. Golden. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"). You may +obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/github.com/xdg-go/stringprep/bidi.go b/vendor/github.com/xdg-go/stringprep/bidi.go new file mode 100644 index 000000000..6f6d321df --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/bidi.go @@ -0,0 +1,73 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var errHasLCat = "BiDi string can't have runes from category L" +var errFirstRune = "BiDi string first rune must have category R or AL" +var errLastRune = "BiDi string last rune must have category R or AL" + +// Check for prohibited characters from table C.8 +func checkBiDiProhibitedRune(s string) error { + for _, r := range s { + if TableC8.Contains(r) { + return Error{Msg: errProhibited, Rune: r} + } + } + return nil +} + +// Check for LCat characters from table D.2 +func checkBiDiLCat(s string) error { + for _, r := range s { + if TableD2.Contains(r) { + return Error{Msg: errHasLCat, Rune: r} + } + } + return nil +} + +// Check first and last characters are in table D.1; requires non-empty string +func checkBadFirstAndLastRandALCat(s string) error { + rs := []rune(s) + if !TableD1.Contains(rs[0]) { + return Error{Msg: errFirstRune, Rune: rs[0]} + } + n := len(rs) - 1 + if !TableD1.Contains(rs[n]) { + return Error{Msg: errLastRune, Rune: rs[n]} + } + return nil +} + +// Look for RandALCat characters from table D.1 +func hasBiDiRandALCat(s string) bool { + for _, r := range s { + if TableD1.Contains(r) { + return true + } + } + return false +} + +// Check that BiDi rules are satisfied ; let empty string pass this rule +func passesBiDiRules(s string) error { + if len(s) == 0 { + return nil + } + if err := checkBiDiProhibitedRune(s); err != nil { + return err + } + if hasBiDiRandALCat(s) { + if err := checkBiDiLCat(s); err != nil { + return err + } + if err := checkBadFirstAndLastRandALCat(s); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/xdg-go/stringprep/doc.go b/vendor/github.com/xdg-go/stringprep/doc.go new file mode 100644 index 000000000..b319e081b --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/doc.go @@ -0,0 +1,10 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package stringprep provides data tables and algorithms for RFC-3454, +// including errata (as of 2018-02). It also provides a profile for +// SASLprep as defined in RFC-4013. +package stringprep diff --git a/vendor/github.com/xdg-go/stringprep/error.go b/vendor/github.com/xdg-go/stringprep/error.go new file mode 100644 index 000000000..7403e4991 --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/error.go @@ -0,0 +1,14 @@ +package stringprep + +import "fmt" + +// Error describes problems encountered during stringprep, including what rune +// was problematic. +type Error struct { + Msg string + Rune rune +} + +func (e Error) Error() string { + return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune) +} diff --git a/vendor/github.com/xdg-go/stringprep/map.go b/vendor/github.com/xdg-go/stringprep/map.go new file mode 100644 index 000000000..e56a0dd43 --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/map.go @@ -0,0 +1,21 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +// Mapping represents a stringprep mapping, from a single rune to zero or more +// runes. +type Mapping map[rune][]rune + +// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping. +// The ok return value is false if the rune was not found. +func (m Mapping) Map(r rune) (replacement []rune, ok bool) { + rs, ok := m[r] + if !ok { + return nil, false + } + return rs, true +} diff --git a/vendor/github.com/xdg-go/stringprep/profile.go b/vendor/github.com/xdg-go/stringprep/profile.go new file mode 100644 index 000000000..5a73be9e5 --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/profile.go @@ -0,0 +1,75 @@ +package stringprep + +import ( + "golang.org/x/text/unicode/norm" +) + +// Profile represents a stringprep profile. +type Profile struct { + Mappings []Mapping + Normalize bool + Prohibits []Set + CheckBiDi bool +} + +var errProhibited = "prohibited character" + +// Prepare transforms an input string to an output string following +// the rules defined in the profile as defined by RFC-3454. +func (p Profile) Prepare(s string) (string, error) { + // Optimistically, assume output will be same length as input + temp := make([]rune, 0, len(s)) + + // Apply maps + for _, r := range s { + rs, ok := p.applyMaps(r) + if ok { + temp = append(temp, rs...) + } else { + temp = append(temp, r) + } + } + + // Normalize + var out string + if p.Normalize { + out = norm.NFKC.String(string(temp)) + } else { + out = string(temp) + } + + // Check prohibited + for _, r := range out { + if p.runeIsProhibited(r) { + return "", Error{Msg: errProhibited, Rune: r} + } + } + + // Check BiDi allowed + if p.CheckBiDi { + if err := passesBiDiRules(out); err != nil { + return "", err + } + } + + return out, nil +} + +func (p Profile) applyMaps(r rune) ([]rune, bool) { + for _, m := range p.Mappings { + rs, ok := m.Map(r) + if ok { + return rs, true + } + } + return nil, false +} + +func (p Profile) runeIsProhibited(r rune) bool { + for _, s := range p.Prohibits { + if s.Contains(r) { + return true + } + } + return false +} diff --git a/vendor/github.com/xdg-go/stringprep/saslprep.go b/vendor/github.com/xdg-go/stringprep/saslprep.go new file mode 100644 index 000000000..40013488b --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/saslprep.go @@ -0,0 +1,52 @@ +package stringprep + +var mapNonASCIISpaceToASCIISpace = Mapping{ + 0x00A0: []rune{0x0020}, + 0x1680: []rune{0x0020}, + 0x2000: []rune{0x0020}, + 0x2001: []rune{0x0020}, + 0x2002: []rune{0x0020}, + 0x2003: []rune{0x0020}, + 0x2004: []rune{0x0020}, + 0x2005: []rune{0x0020}, + 0x2006: []rune{0x0020}, + 0x2007: []rune{0x0020}, + 0x2008: []rune{0x0020}, + 0x2009: []rune{0x0020}, + 0x200A: []rune{0x0020}, + 0x200B: []rune{0x0020}, + 0x202F: []rune{0x0020}, + 0x205F: []rune{0x0020}, + 0x3000: []rune{0x0020}, +} + +// SASLprep is a pre-defined stringprep profile for user names and passwords +// as described in RFC-4013. +// +// Because the stringprep distinction between query and stored strings was +// intended for compatibility across profile versions, but SASLprep was never +// updated and is now deprecated, this profile only operates in stored +// strings mode, prohibiting unassigned code points. +var SASLprep Profile = saslprep + +var saslprep = Profile{ + Mappings: []Mapping{ + TableB1, + mapNonASCIISpaceToASCIISpace, + }, + Normalize: true, + Prohibits: []Set{ + TableA1, + TableC1_2, + TableC2_1, + TableC2_2, + TableC3, + TableC4, + TableC5, + TableC6, + TableC7, + TableC8, + TableC9, + }, + CheckBiDi: true, +} diff --git a/vendor/github.com/xdg-go/stringprep/set.go b/vendor/github.com/xdg-go/stringprep/set.go new file mode 100644 index 000000000..c837e28c8 --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/set.go @@ -0,0 +1,36 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +import "sort" + +// RuneRange represents a close-ended range of runes: [N,M]. For a range +// consisting of a single rune, N and M will be equal. +type RuneRange [2]rune + +// Contains returns true if a rune is within the bounds of the RuneRange. +func (rr RuneRange) Contains(r rune) bool { + return rr[0] <= r && r <= rr[1] +} + +func (rr RuneRange) isAbove(r rune) bool { + return r <= rr[0] +} + +// Set represents a stringprep data table used to identify runes of a +// particular type. +type Set []RuneRange + +// Contains returns true if a rune is within any of the RuneRanges in the +// Set. +func (s Set) Contains(r rune) bool { + i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) }) + if i < len(s) && s[i].Contains(r) { + return true + } + return false +} diff --git a/vendor/github.com/xdg-go/stringprep/tables.go b/vendor/github.com/xdg-go/stringprep/tables.go new file mode 100644 index 000000000..c3fc1fa0f --- /dev/null +++ b/vendor/github.com/xdg-go/stringprep/tables.go @@ -0,0 +1,3215 @@ +// Copyright 2018 by David A. Golden. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package stringprep + +var tableA1 = Set{ + RuneRange{0x0221, 0x0221}, + RuneRange{0x0234, 0x024F}, + RuneRange{0x02AE, 0x02AF}, + RuneRange{0x02EF, 0x02FF}, + RuneRange{0x0350, 0x035F}, + RuneRange{0x0370, 0x0373}, + RuneRange{0x0376, 0x0379}, + RuneRange{0x037B, 0x037D}, + RuneRange{0x037F, 0x0383}, + RuneRange{0x038B, 0x038B}, + RuneRange{0x038D, 0x038D}, + RuneRange{0x03A2, 0x03A2}, + RuneRange{0x03CF, 0x03CF}, + RuneRange{0x03F7, 0x03FF}, + RuneRange{0x0487, 0x0487}, + RuneRange{0x04CF, 0x04CF}, + RuneRange{0x04F6, 0x04F7}, + RuneRange{0x04FA, 0x04FF}, + RuneRange{0x0510, 0x0530}, + RuneRange{0x0557, 0x0558}, + RuneRange{0x0560, 0x0560}, + RuneRange{0x0588, 0x0588}, + RuneRange{0x058B, 0x0590}, + RuneRange{0x05A2, 0x05A2}, + RuneRange{0x05BA, 0x05BA}, + RuneRange{0x05C5, 0x05CF}, + RuneRange{0x05EB, 0x05EF}, + RuneRange{0x05F5, 0x060B}, + RuneRange{0x060D, 0x061A}, + RuneRange{0x061C, 0x061E}, + RuneRange{0x0620, 0x0620}, + RuneRange{0x063B, 0x063F}, + RuneRange{0x0656, 0x065F}, + RuneRange{0x06EE, 0x06EF}, + RuneRange{0x06FF, 0x06FF}, + RuneRange{0x070E, 0x070E}, + RuneRange{0x072D, 0x072F}, + RuneRange{0x074B, 0x077F}, + RuneRange{0x07B2, 0x0900}, + RuneRange{0x0904, 0x0904}, + RuneRange{0x093A, 0x093B}, + RuneRange{0x094E, 0x094F}, + RuneRange{0x0955, 0x0957}, + RuneRange{0x0971, 0x0980}, + RuneRange{0x0984, 0x0984}, + RuneRange{0x098D, 0x098E}, + RuneRange{0x0991, 0x0992}, + RuneRange{0x09A9, 0x09A9}, + RuneRange{0x09B1, 0x09B1}, + RuneRange{0x09B3, 0x09B5}, + RuneRange{0x09BA, 0x09BB}, + RuneRange{0x09BD, 0x09BD}, + RuneRange{0x09C5, 0x09C6}, + RuneRange{0x09C9, 0x09CA}, + RuneRange{0x09CE, 0x09D6}, + RuneRange{0x09D8, 0x09DB}, + RuneRange{0x09DE, 0x09DE}, + RuneRange{0x09E4, 0x09E5}, + RuneRange{0x09FB, 0x0A01}, + RuneRange{0x0A03, 0x0A04}, + RuneRange{0x0A0B, 0x0A0E}, + RuneRange{0x0A11, 0x0A12}, + RuneRange{0x0A29, 0x0A29}, + RuneRange{0x0A31, 0x0A31}, + RuneRange{0x0A34, 0x0A34}, + RuneRange{0x0A37, 0x0A37}, + RuneRange{0x0A3A, 0x0A3B}, + RuneRange{0x0A3D, 0x0A3D}, + RuneRange{0x0A43, 0x0A46}, + RuneRange{0x0A49, 0x0A4A}, + RuneRange{0x0A4E, 0x0A58}, + RuneRange{0x0A5D, 0x0A5D}, + RuneRange{0x0A5F, 0x0A65}, + RuneRange{0x0A75, 0x0A80}, + RuneRange{0x0A84, 0x0A84}, + RuneRange{0x0A8C, 0x0A8C}, + RuneRange{0x0A8E, 0x0A8E}, + RuneRange{0x0A92, 0x0A92}, + RuneRange{0x0AA9, 0x0AA9}, + RuneRange{0x0AB1, 0x0AB1}, + RuneRange{0x0AB4, 0x0AB4}, + RuneRange{0x0ABA, 0x0ABB}, + RuneRange{0x0AC6, 0x0AC6}, + RuneRange{0x0ACA, 0x0ACA}, + RuneRange{0x0ACE, 0x0ACF}, + RuneRange{0x0AD1, 0x0ADF}, + RuneRange{0x0AE1, 0x0AE5}, + RuneRange{0x0AF0, 0x0B00}, + RuneRange{0x0B04, 0x0B04}, + RuneRange{0x0B0D, 0x0B0E}, + RuneRange{0x0B11, 0x0B12}, + RuneRange{0x0B29, 0x0B29}, + RuneRange{0x0B31, 0x0B31}, + RuneRange{0x0B34, 0x0B35}, + RuneRange{0x0B3A, 0x0B3B}, + RuneRange{0x0B44, 0x0B46}, + RuneRange{0x0B49, 0x0B4A}, + RuneRange{0x0B4E, 0x0B55}, + RuneRange{0x0B58, 0x0B5B}, + RuneRange{0x0B5E, 0x0B5E}, + RuneRange{0x0B62, 0x0B65}, + RuneRange{0x0B71, 0x0B81}, + RuneRange{0x0B84, 0x0B84}, + RuneRange{0x0B8B, 0x0B8D}, + RuneRange{0x0B91, 0x0B91}, + RuneRange{0x0B96, 0x0B98}, + RuneRange{0x0B9B, 0x0B9B}, + RuneRange{0x0B9D, 0x0B9D}, + RuneRange{0x0BA0, 0x0BA2}, + RuneRange{0x0BA5, 0x0BA7}, + RuneRange{0x0BAB, 0x0BAD}, + RuneRange{0x0BB6, 0x0BB6}, + RuneRange{0x0BBA, 0x0BBD}, + RuneRange{0x0BC3, 0x0BC5}, + RuneRange{0x0BC9, 0x0BC9}, + RuneRange{0x0BCE, 0x0BD6}, + RuneRange{0x0BD8, 0x0BE6}, + RuneRange{0x0BF3, 0x0C00}, + RuneRange{0x0C04, 0x0C04}, + RuneRange{0x0C0D, 0x0C0D}, + RuneRange{0x0C11, 0x0C11}, + RuneRange{0x0C29, 0x0C29}, + RuneRange{0x0C34, 0x0C34}, + RuneRange{0x0C3A, 0x0C3D}, + RuneRange{0x0C45, 0x0C45}, + RuneRange{0x0C49, 0x0C49}, + RuneRange{0x0C4E, 0x0C54}, + RuneRange{0x0C57, 0x0C5F}, + RuneRange{0x0C62, 0x0C65}, + RuneRange{0x0C70, 0x0C81}, + RuneRange{0x0C84, 0x0C84}, + RuneRange{0x0C8D, 0x0C8D}, + RuneRange{0x0C91, 0x0C91}, + RuneRange{0x0CA9, 0x0CA9}, + RuneRange{0x0CB4, 0x0CB4}, + RuneRange{0x0CBA, 0x0CBD}, + RuneRange{0x0CC5, 0x0CC5}, + RuneRange{0x0CC9, 0x0CC9}, + RuneRange{0x0CCE, 0x0CD4}, + RuneRange{0x0CD7, 0x0CDD}, + RuneRange{0x0CDF, 0x0CDF}, + RuneRange{0x0CE2, 0x0CE5}, + RuneRange{0x0CF0, 0x0D01}, + RuneRange{0x0D04, 0x0D04}, + RuneRange{0x0D0D, 0x0D0D}, + RuneRange{0x0D11, 0x0D11}, + RuneRange{0x0D29, 0x0D29}, + RuneRange{0x0D3A, 0x0D3D}, + RuneRange{0x0D44, 0x0D45}, + RuneRange{0x0D49, 0x0D49}, + RuneRange{0x0D4E, 0x0D56}, + RuneRange{0x0D58, 0x0D5F}, + RuneRange{0x0D62, 0x0D65}, + RuneRange{0x0D70, 0x0D81}, + RuneRange{0x0D84, 0x0D84}, + RuneRange{0x0D97, 0x0D99}, + RuneRange{0x0DB2, 0x0DB2}, + RuneRange{0x0DBC, 0x0DBC}, + RuneRange{0x0DBE, 0x0DBF}, + RuneRange{0x0DC7, 0x0DC9}, + RuneRange{0x0DCB, 0x0DCE}, + RuneRange{0x0DD5, 0x0DD5}, + RuneRange{0x0DD7, 0x0DD7}, + RuneRange{0x0DE0, 0x0DF1}, + RuneRange{0x0DF5, 0x0E00}, + RuneRange{0x0E3B, 0x0E3E}, + RuneRange{0x0E5C, 0x0E80}, + RuneRange{0x0E83, 0x0E83}, + RuneRange{0x0E85, 0x0E86}, + RuneRange{0x0E89, 0x0E89}, + RuneRange{0x0E8B, 0x0E8C}, + RuneRange{0x0E8E, 0x0E93}, + RuneRange{0x0E98, 0x0E98}, + RuneRange{0x0EA0, 0x0EA0}, + RuneRange{0x0EA4, 0x0EA4}, + RuneRange{0x0EA6, 0x0EA6}, + RuneRange{0x0EA8, 0x0EA9}, + RuneRange{0x0EAC, 0x0EAC}, + RuneRange{0x0EBA, 0x0EBA}, + RuneRange{0x0EBE, 0x0EBF}, + RuneRange{0x0EC5, 0x0EC5}, + RuneRange{0x0EC7, 0x0EC7}, + RuneRange{0x0ECE, 0x0ECF}, + RuneRange{0x0EDA, 0x0EDB}, + RuneRange{0x0EDE, 0x0EFF}, + RuneRange{0x0F48, 0x0F48}, + RuneRange{0x0F6B, 0x0F70}, + RuneRange{0x0F8C, 0x0F8F}, + RuneRange{0x0F98, 0x0F98}, + RuneRange{0x0FBD, 0x0FBD}, + RuneRange{0x0FCD, 0x0FCE}, + RuneRange{0x0FD0, 0x0FFF}, + RuneRange{0x1022, 0x1022}, + RuneRange{0x1028, 0x1028}, + RuneRange{0x102B, 0x102B}, + RuneRange{0x1033, 0x1035}, + RuneRange{0x103A, 0x103F}, + RuneRange{0x105A, 0x109F}, + RuneRange{0x10C6, 0x10CF}, + RuneRange{0x10F9, 0x10FA}, + RuneRange{0x10FC, 0x10FF}, + RuneRange{0x115A, 0x115E}, + RuneRange{0x11A3, 0x11A7}, + RuneRange{0x11FA, 0x11FF}, + RuneRange{0x1207, 0x1207}, + RuneRange{0x1247, 0x1247}, + RuneRange{0x1249, 0x1249}, + RuneRange{0x124E, 0x124F}, + RuneRange{0x1257, 0x1257}, + RuneRange{0x1259, 0x1259}, + RuneRange{0x125E, 0x125F}, + RuneRange{0x1287, 0x1287}, + RuneRange{0x1289, 0x1289}, + RuneRange{0x128E, 0x128F}, + RuneRange{0x12AF, 0x12AF}, + RuneRange{0x12B1, 0x12B1}, + RuneRange{0x12B6, 0x12B7}, + RuneRange{0x12BF, 0x12BF}, + RuneRange{0x12C1, 0x12C1}, + RuneRange{0x12C6, 0x12C7}, + RuneRange{0x12CF, 0x12CF}, + RuneRange{0x12D7, 0x12D7}, + RuneRange{0x12EF, 0x12EF}, + RuneRange{0x130F, 0x130F}, + RuneRange{0x1311, 0x1311}, + RuneRange{0x1316, 0x1317}, + RuneRange{0x131F, 0x131F}, + RuneRange{0x1347, 0x1347}, + RuneRange{0x135B, 0x1360}, + RuneRange{0x137D, 0x139F}, + RuneRange{0x13F5, 0x1400}, + RuneRange{0x1677, 0x167F}, + RuneRange{0x169D, 0x169F}, + RuneRange{0x16F1, 0x16FF}, + RuneRange{0x170D, 0x170D}, + RuneRange{0x1715, 0x171F}, + RuneRange{0x1737, 0x173F}, + RuneRange{0x1754, 0x175F}, + RuneRange{0x176D, 0x176D}, + RuneRange{0x1771, 0x1771}, + RuneRange{0x1774, 0x177F}, + RuneRange{0x17DD, 0x17DF}, + RuneRange{0x17EA, 0x17FF}, + RuneRange{0x180F, 0x180F}, + RuneRange{0x181A, 0x181F}, + RuneRange{0x1878, 0x187F}, + RuneRange{0x18AA, 0x1DFF}, + RuneRange{0x1E9C, 0x1E9F}, + RuneRange{0x1EFA, 0x1EFF}, + RuneRange{0x1F16, 0x1F17}, + RuneRange{0x1F1E, 0x1F1F}, + RuneRange{0x1F46, 0x1F47}, + RuneRange{0x1F4E, 0x1F4F}, + RuneRange{0x1F58, 0x1F58}, + RuneRange{0x1F5A, 0x1F5A}, + RuneRange{0x1F5C, 0x1F5C}, + RuneRange{0x1F5E, 0x1F5E}, + RuneRange{0x1F7E, 0x1F7F}, + RuneRange{0x1FB5, 0x1FB5}, + RuneRange{0x1FC5, 0x1FC5}, + RuneRange{0x1FD4, 0x1FD5}, + RuneRange{0x1FDC, 0x1FDC}, + RuneRange{0x1FF0, 0x1FF1}, + RuneRange{0x1FF5, 0x1FF5}, + RuneRange{0x1FFF, 0x1FFF}, + RuneRange{0x2053, 0x2056}, + RuneRange{0x2058, 0x205E}, + RuneRange{0x2064, 0x2069}, + RuneRange{0x2072, 0x2073}, + RuneRange{0x208F, 0x209F}, + RuneRange{0x20B2, 0x20CF}, + RuneRange{0x20EB, 0x20FF}, + RuneRange{0x213B, 0x213C}, + RuneRange{0x214C, 0x2152}, + RuneRange{0x2184, 0x218F}, + RuneRange{0x23CF, 0x23FF}, + RuneRange{0x2427, 0x243F}, + RuneRange{0x244B, 0x245F}, + RuneRange{0x24FF, 0x24FF}, + RuneRange{0x2614, 0x2615}, + RuneRange{0x2618, 0x2618}, + RuneRange{0x267E, 0x267F}, + RuneRange{0x268A, 0x2700}, + RuneRange{0x2705, 0x2705}, + RuneRange{0x270A, 0x270B}, + RuneRange{0x2728, 0x2728}, + RuneRange{0x274C, 0x274C}, + RuneRange{0x274E, 0x274E}, + RuneRange{0x2753, 0x2755}, + RuneRange{0x2757, 0x2757}, + RuneRange{0x275F, 0x2760}, + RuneRange{0x2795, 0x2797}, + RuneRange{0x27B0, 0x27B0}, + RuneRange{0x27BF, 0x27CF}, + RuneRange{0x27EC, 0x27EF}, + RuneRange{0x2B00, 0x2E7F}, + RuneRange{0x2E9A, 0x2E9A}, + RuneRange{0x2EF4, 0x2EFF}, + RuneRange{0x2FD6, 0x2FEF}, + RuneRange{0x2FFC, 0x2FFF}, + RuneRange{0x3040, 0x3040}, + RuneRange{0x3097, 0x3098}, + RuneRange{0x3100, 0x3104}, + RuneRange{0x312D, 0x3130}, + RuneRange{0x318F, 0x318F}, + RuneRange{0x31B8, 0x31EF}, + RuneRange{0x321D, 0x321F}, + RuneRange{0x3244, 0x3250}, + RuneRange{0x327C, 0x327E}, + RuneRange{0x32CC, 0x32CF}, + RuneRange{0x32FF, 0x32FF}, + RuneRange{0x3377, 0x337A}, + RuneRange{0x33DE, 0x33DF}, + RuneRange{0x33FF, 0x33FF}, + RuneRange{0x4DB6, 0x4DFF}, + RuneRange{0x9FA6, 0x9FFF}, + RuneRange{0xA48D, 0xA48F}, + RuneRange{0xA4C7, 0xABFF}, + RuneRange{0xD7A4, 0xD7FF}, + RuneRange{0xFA2E, 0xFA2F}, + RuneRange{0xFA6B, 0xFAFF}, + RuneRange{0xFB07, 0xFB12}, + RuneRange{0xFB18, 0xFB1C}, + RuneRange{0xFB37, 0xFB37}, + RuneRange{0xFB3D, 0xFB3D}, + RuneRange{0xFB3F, 0xFB3F}, + RuneRange{0xFB42, 0xFB42}, + RuneRange{0xFB45, 0xFB45}, + RuneRange{0xFBB2, 0xFBD2}, + RuneRange{0xFD40, 0xFD4F}, + RuneRange{0xFD90, 0xFD91}, + RuneRange{0xFDC8, 0xFDCF}, + RuneRange{0xFDFD, 0xFDFF}, + RuneRange{0xFE10, 0xFE1F}, + RuneRange{0xFE24, 0xFE2F}, + RuneRange{0xFE47, 0xFE48}, + RuneRange{0xFE53, 0xFE53}, + RuneRange{0xFE67, 0xFE67}, + RuneRange{0xFE6C, 0xFE6F}, + RuneRange{0xFE75, 0xFE75}, + RuneRange{0xFEFD, 0xFEFE}, + RuneRange{0xFF00, 0xFF00}, + RuneRange{0xFFBF, 0xFFC1}, + RuneRange{0xFFC8, 0xFFC9}, + RuneRange{0xFFD0, 0xFFD1}, + RuneRange{0xFFD8, 0xFFD9}, + RuneRange{0xFFDD, 0xFFDF}, + RuneRange{0xFFE7, 0xFFE7}, + RuneRange{0xFFEF, 0xFFF8}, + RuneRange{0x10000, 0x102FF}, + RuneRange{0x1031F, 0x1031F}, + RuneRange{0x10324, 0x1032F}, + RuneRange{0x1034B, 0x103FF}, + RuneRange{0x10426, 0x10427}, + RuneRange{0x1044E, 0x1CFFF}, + RuneRange{0x1D0F6, 0x1D0FF}, + RuneRange{0x1D127, 0x1D129}, + RuneRange{0x1D1DE, 0x1D3FF}, + RuneRange{0x1D455, 0x1D455}, + RuneRange{0x1D49D, 0x1D49D}, + RuneRange{0x1D4A0, 0x1D4A1}, + RuneRange{0x1D4A3, 0x1D4A4}, + RuneRange{0x1D4A7, 0x1D4A8}, + RuneRange{0x1D4AD, 0x1D4AD}, + RuneRange{0x1D4BA, 0x1D4BA}, + RuneRange{0x1D4BC, 0x1D4BC}, + RuneRange{0x1D4C1, 0x1D4C1}, + RuneRange{0x1D4C4, 0x1D4C4}, + RuneRange{0x1D506, 0x1D506}, + RuneRange{0x1D50B, 0x1D50C}, + RuneRange{0x1D515, 0x1D515}, + RuneRange{0x1D51D, 0x1D51D}, + RuneRange{0x1D53A, 0x1D53A}, + RuneRange{0x1D53F, 0x1D53F}, + RuneRange{0x1D545, 0x1D545}, + RuneRange{0x1D547, 0x1D549}, + RuneRange{0x1D551, 0x1D551}, + RuneRange{0x1D6A4, 0x1D6A7}, + RuneRange{0x1D7CA, 0x1D7CD}, + RuneRange{0x1D800, 0x1FFFD}, + RuneRange{0x2A6D7, 0x2F7FF}, + RuneRange{0x2FA1E, 0x2FFFD}, + RuneRange{0x30000, 0x3FFFD}, + RuneRange{0x40000, 0x4FFFD}, + RuneRange{0x50000, 0x5FFFD}, + RuneRange{0x60000, 0x6FFFD}, + RuneRange{0x70000, 0x7FFFD}, + RuneRange{0x80000, 0x8FFFD}, + RuneRange{0x90000, 0x9FFFD}, + RuneRange{0xA0000, 0xAFFFD}, + RuneRange{0xB0000, 0xBFFFD}, + RuneRange{0xC0000, 0xCFFFD}, + RuneRange{0xD0000, 0xDFFFD}, + RuneRange{0xE0000, 0xE0000}, + RuneRange{0xE0002, 0xE001F}, + RuneRange{0xE0080, 0xEFFFD}, +} + +// TableA1 represents RFC-3454 Table A.1. +var TableA1 Set = tableA1 + +var tableB1 = Mapping{ + 0x00AD: []rune{}, // Map to nothing + 0x034F: []rune{}, // Map to nothing + 0x180B: []rune{}, // Map to nothing + 0x180C: []rune{}, // Map to nothing + 0x180D: []rune{}, // Map to nothing + 0x200B: []rune{}, // Map to nothing + 0x200C: []rune{}, // Map to nothing + 0x200D: []rune{}, // Map to nothing + 0x2060: []rune{}, // Map to nothing + 0xFE00: []rune{}, // Map to nothing + 0xFE01: []rune{}, // Map to nothing + 0xFE02: []rune{}, // Map to nothing + 0xFE03: []rune{}, // Map to nothing + 0xFE04: []rune{}, // Map to nothing + 0xFE05: []rune{}, // Map to nothing + 0xFE06: []rune{}, // Map to nothing + 0xFE07: []rune{}, // Map to nothing + 0xFE08: []rune{}, // Map to nothing + 0xFE09: []rune{}, // Map to nothing + 0xFE0A: []rune{}, // Map to nothing + 0xFE0B: []rune{}, // Map to nothing + 0xFE0C: []rune{}, // Map to nothing + 0xFE0D: []rune{}, // Map to nothing + 0xFE0E: []rune{}, // Map to nothing + 0xFE0F: []rune{}, // Map to nothing + 0xFEFF: []rune{}, // Map to nothing +} + +// TableB1 represents RFC-3454 Table B.1. +var TableB1 Mapping = tableB1 + +var tableB2 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x037A: []rune{0x0020, 0x03B9}, // Additional folding + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D2: []rune{0x03C5}, // Additional folding + 0x03D3: []rune{0x03CD}, // Additional folding + 0x03D4: []rune{0x03CB}, // Additional folding + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x20A8: []rune{0x0072, 0x0073}, // Additional folding + 0x2102: []rune{0x0063}, // Additional folding + 0x2103: []rune{0x00B0, 0x0063}, // Additional folding + 0x2107: []rune{0x025B}, // Additional folding + 0x2109: []rune{0x00B0, 0x0066}, // Additional folding + 0x210B: []rune{0x0068}, // Additional folding + 0x210C: []rune{0x0068}, // Additional folding + 0x210D: []rune{0x0068}, // Additional folding + 0x2110: []rune{0x0069}, // Additional folding + 0x2111: []rune{0x0069}, // Additional folding + 0x2112: []rune{0x006C}, // Additional folding + 0x2115: []rune{0x006E}, // Additional folding + 0x2116: []rune{0x006E, 0x006F}, // Additional folding + 0x2119: []rune{0x0070}, // Additional folding + 0x211A: []rune{0x0071}, // Additional folding + 0x211B: []rune{0x0072}, // Additional folding + 0x211C: []rune{0x0072}, // Additional folding + 0x211D: []rune{0x0072}, // Additional folding + 0x2120: []rune{0x0073, 0x006D}, // Additional folding + 0x2121: []rune{0x0074, 0x0065, 0x006C}, // Additional folding + 0x2122: []rune{0x0074, 0x006D}, // Additional folding + 0x2124: []rune{0x007A}, // Additional folding + 0x2126: []rune{0x03C9}, // Case map + 0x2128: []rune{0x007A}, // Additional folding + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x212C: []rune{0x0062}, // Additional folding + 0x212D: []rune{0x0063}, // Additional folding + 0x2130: []rune{0x0065}, // Additional folding + 0x2131: []rune{0x0066}, // Additional folding + 0x2133: []rune{0x006D}, // Additional folding + 0x213E: []rune{0x03B3}, // Additional folding + 0x213F: []rune{0x03C0}, // Additional folding + 0x2145: []rune{0x0064}, // Additional folding + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0x3371: []rune{0x0068, 0x0070, 0x0061}, // Additional folding + 0x3373: []rune{0x0061, 0x0075}, // Additional folding + 0x3375: []rune{0x006F, 0x0076}, // Additional folding + 0x3380: []rune{0x0070, 0x0061}, // Additional folding + 0x3381: []rune{0x006E, 0x0061}, // Additional folding + 0x3382: []rune{0x03BC, 0x0061}, // Additional folding + 0x3383: []rune{0x006D, 0x0061}, // Additional folding + 0x3384: []rune{0x006B, 0x0061}, // Additional folding + 0x3385: []rune{0x006B, 0x0062}, // Additional folding + 0x3386: []rune{0x006D, 0x0062}, // Additional folding + 0x3387: []rune{0x0067, 0x0062}, // Additional folding + 0x338A: []rune{0x0070, 0x0066}, // Additional folding + 0x338B: []rune{0x006E, 0x0066}, // Additional folding + 0x338C: []rune{0x03BC, 0x0066}, // Additional folding + 0x3390: []rune{0x0068, 0x007A}, // Additional folding + 0x3391: []rune{0x006B, 0x0068, 0x007A}, // Additional folding + 0x3392: []rune{0x006D, 0x0068, 0x007A}, // Additional folding + 0x3393: []rune{0x0067, 0x0068, 0x007A}, // Additional folding + 0x3394: []rune{0x0074, 0x0068, 0x007A}, // Additional folding + 0x33A9: []rune{0x0070, 0x0061}, // Additional folding + 0x33AA: []rune{0x006B, 0x0070, 0x0061}, // Additional folding + 0x33AB: []rune{0x006D, 0x0070, 0x0061}, // Additional folding + 0x33AC: []rune{0x0067, 0x0070, 0x0061}, // Additional folding + 0x33B4: []rune{0x0070, 0x0076}, // Additional folding + 0x33B5: []rune{0x006E, 0x0076}, // Additional folding + 0x33B6: []rune{0x03BC, 0x0076}, // Additional folding + 0x33B7: []rune{0x006D, 0x0076}, // Additional folding + 0x33B8: []rune{0x006B, 0x0076}, // Additional folding + 0x33B9: []rune{0x006D, 0x0076}, // Additional folding + 0x33BA: []rune{0x0070, 0x0077}, // Additional folding + 0x33BB: []rune{0x006E, 0x0077}, // Additional folding + 0x33BC: []rune{0x03BC, 0x0077}, // Additional folding + 0x33BD: []rune{0x006D, 0x0077}, // Additional folding + 0x33BE: []rune{0x006B, 0x0077}, // Additional folding + 0x33BF: []rune{0x006D, 0x0077}, // Additional folding + 0x33C0: []rune{0x006B, 0x03C9}, // Additional folding + 0x33C1: []rune{0x006D, 0x03C9}, // Additional folding + 0x33C3: []rune{0x0062, 0x0071}, // Additional folding + 0x33C6: []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding + 0x33C7: []rune{0x0063, 0x006F, 0x002E}, // Additional folding + 0x33C8: []rune{0x0064, 0x0062}, // Additional folding + 0x33C9: []rune{0x0067, 0x0079}, // Additional folding + 0x33CB: []rune{0x0068, 0x0070}, // Additional folding + 0x33CD: []rune{0x006B, 0x006B}, // Additional folding + 0x33CE: []rune{0x006B, 0x006D}, // Additional folding + 0x33D7: []rune{0x0070, 0x0068}, // Additional folding + 0x33D9: []rune{0x0070, 0x0070, 0x006D}, // Additional folding + 0x33DA: []rune{0x0070, 0x0072}, // Additional folding + 0x33DC: []rune{0x0073, 0x0076}, // Additional folding + 0x33DD: []rune{0x0077, 0x0062}, // Additional folding + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map + 0x1D400: []rune{0x0061}, // Additional folding + 0x1D401: []rune{0x0062}, // Additional folding + 0x1D402: []rune{0x0063}, // Additional folding + 0x1D403: []rune{0x0064}, // Additional folding + 0x1D404: []rune{0x0065}, // Additional folding + 0x1D405: []rune{0x0066}, // Additional folding + 0x1D406: []rune{0x0067}, // Additional folding + 0x1D407: []rune{0x0068}, // Additional folding + 0x1D408: []rune{0x0069}, // Additional folding + 0x1D409: []rune{0x006A}, // Additional folding + 0x1D40A: []rune{0x006B}, // Additional folding + 0x1D40B: []rune{0x006C}, // Additional folding + 0x1D40C: []rune{0x006D}, // Additional folding + 0x1D40D: []rune{0x006E}, // Additional folding + 0x1D40E: []rune{0x006F}, // Additional folding + 0x1D40F: []rune{0x0070}, // Additional folding + 0x1D410: []rune{0x0071}, // Additional folding + 0x1D411: []rune{0x0072}, // Additional folding + 0x1D412: []rune{0x0073}, // Additional folding + 0x1D413: []rune{0x0074}, // Additional folding + 0x1D414: []rune{0x0075}, // Additional folding + 0x1D415: []rune{0x0076}, // Additional folding + 0x1D416: []rune{0x0077}, // Additional folding + 0x1D417: []rune{0x0078}, // Additional folding + 0x1D418: []rune{0x0079}, // Additional folding + 0x1D419: []rune{0x007A}, // Additional folding + 0x1D434: []rune{0x0061}, // Additional folding + 0x1D435: []rune{0x0062}, // Additional folding + 0x1D436: []rune{0x0063}, // Additional folding + 0x1D437: []rune{0x0064}, // Additional folding + 0x1D438: []rune{0x0065}, // Additional folding + 0x1D439: []rune{0x0066}, // Additional folding + 0x1D43A: []rune{0x0067}, // Additional folding + 0x1D43B: []rune{0x0068}, // Additional folding + 0x1D43C: []rune{0x0069}, // Additional folding + 0x1D43D: []rune{0x006A}, // Additional folding + 0x1D43E: []rune{0x006B}, // Additional folding + 0x1D43F: []rune{0x006C}, // Additional folding + 0x1D440: []rune{0x006D}, // Additional folding + 0x1D441: []rune{0x006E}, // Additional folding + 0x1D442: []rune{0x006F}, // Additional folding + 0x1D443: []rune{0x0070}, // Additional folding + 0x1D444: []rune{0x0071}, // Additional folding + 0x1D445: []rune{0x0072}, // Additional folding + 0x1D446: []rune{0x0073}, // Additional folding + 0x1D447: []rune{0x0074}, // Additional folding + 0x1D448: []rune{0x0075}, // Additional folding + 0x1D449: []rune{0x0076}, // Additional folding + 0x1D44A: []rune{0x0077}, // Additional folding + 0x1D44B: []rune{0x0078}, // Additional folding + 0x1D44C: []rune{0x0079}, // Additional folding + 0x1D44D: []rune{0x007A}, // Additional folding + 0x1D468: []rune{0x0061}, // Additional folding + 0x1D469: []rune{0x0062}, // Additional folding + 0x1D46A: []rune{0x0063}, // Additional folding + 0x1D46B: []rune{0x0064}, // Additional folding + 0x1D46C: []rune{0x0065}, // Additional folding + 0x1D46D: []rune{0x0066}, // Additional folding + 0x1D46E: []rune{0x0067}, // Additional folding + 0x1D46F: []rune{0x0068}, // Additional folding + 0x1D470: []rune{0x0069}, // Additional folding + 0x1D471: []rune{0x006A}, // Additional folding + 0x1D472: []rune{0x006B}, // Additional folding + 0x1D473: []rune{0x006C}, // Additional folding + 0x1D474: []rune{0x006D}, // Additional folding + 0x1D475: []rune{0x006E}, // Additional folding + 0x1D476: []rune{0x006F}, // Additional folding + 0x1D477: []rune{0x0070}, // Additional folding + 0x1D478: []rune{0x0071}, // Additional folding + 0x1D479: []rune{0x0072}, // Additional folding + 0x1D47A: []rune{0x0073}, // Additional folding + 0x1D47B: []rune{0x0074}, // Additional folding + 0x1D47C: []rune{0x0075}, // Additional folding + 0x1D47D: []rune{0x0076}, // Additional folding + 0x1D47E: []rune{0x0077}, // Additional folding + 0x1D47F: []rune{0x0078}, // Additional folding + 0x1D480: []rune{0x0079}, // Additional folding + 0x1D481: []rune{0x007A}, // Additional folding + 0x1D49C: []rune{0x0061}, // Additional folding + 0x1D49E: []rune{0x0063}, // Additional folding + 0x1D49F: []rune{0x0064}, // Additional folding + 0x1D4A2: []rune{0x0067}, // Additional folding + 0x1D4A5: []rune{0x006A}, // Additional folding + 0x1D4A6: []rune{0x006B}, // Additional folding + 0x1D4A9: []rune{0x006E}, // Additional folding + 0x1D4AA: []rune{0x006F}, // Additional folding + 0x1D4AB: []rune{0x0070}, // Additional folding + 0x1D4AC: []rune{0x0071}, // Additional folding + 0x1D4AE: []rune{0x0073}, // Additional folding + 0x1D4AF: []rune{0x0074}, // Additional folding + 0x1D4B0: []rune{0x0075}, // Additional folding + 0x1D4B1: []rune{0x0076}, // Additional folding + 0x1D4B2: []rune{0x0077}, // Additional folding + 0x1D4B3: []rune{0x0078}, // Additional folding + 0x1D4B4: []rune{0x0079}, // Additional folding + 0x1D4B5: []rune{0x007A}, // Additional folding + 0x1D4D0: []rune{0x0061}, // Additional folding + 0x1D4D1: []rune{0x0062}, // Additional folding + 0x1D4D2: []rune{0x0063}, // Additional folding + 0x1D4D3: []rune{0x0064}, // Additional folding + 0x1D4D4: []rune{0x0065}, // Additional folding + 0x1D4D5: []rune{0x0066}, // Additional folding + 0x1D4D6: []rune{0x0067}, // Additional folding + 0x1D4D7: []rune{0x0068}, // Additional folding + 0x1D4D8: []rune{0x0069}, // Additional folding + 0x1D4D9: []rune{0x006A}, // Additional folding + 0x1D4DA: []rune{0x006B}, // Additional folding + 0x1D4DB: []rune{0x006C}, // Additional folding + 0x1D4DC: []rune{0x006D}, // Additional folding + 0x1D4DD: []rune{0x006E}, // Additional folding + 0x1D4DE: []rune{0x006F}, // Additional folding + 0x1D4DF: []rune{0x0070}, // Additional folding + 0x1D4E0: []rune{0x0071}, // Additional folding + 0x1D4E1: []rune{0x0072}, // Additional folding + 0x1D4E2: []rune{0x0073}, // Additional folding + 0x1D4E3: []rune{0x0074}, // Additional folding + 0x1D4E4: []rune{0x0075}, // Additional folding + 0x1D4E5: []rune{0x0076}, // Additional folding + 0x1D4E6: []rune{0x0077}, // Additional folding + 0x1D4E7: []rune{0x0078}, // Additional folding + 0x1D4E8: []rune{0x0079}, // Additional folding + 0x1D4E9: []rune{0x007A}, // Additional folding + 0x1D504: []rune{0x0061}, // Additional folding + 0x1D505: []rune{0x0062}, // Additional folding + 0x1D507: []rune{0x0064}, // Additional folding + 0x1D508: []rune{0x0065}, // Additional folding + 0x1D509: []rune{0x0066}, // Additional folding + 0x1D50A: []rune{0x0067}, // Additional folding + 0x1D50D: []rune{0x006A}, // Additional folding + 0x1D50E: []rune{0x006B}, // Additional folding + 0x1D50F: []rune{0x006C}, // Additional folding + 0x1D510: []rune{0x006D}, // Additional folding + 0x1D511: []rune{0x006E}, // Additional folding + 0x1D512: []rune{0x006F}, // Additional folding + 0x1D513: []rune{0x0070}, // Additional folding + 0x1D514: []rune{0x0071}, // Additional folding + 0x1D516: []rune{0x0073}, // Additional folding + 0x1D517: []rune{0x0074}, // Additional folding + 0x1D518: []rune{0x0075}, // Additional folding + 0x1D519: []rune{0x0076}, // Additional folding + 0x1D51A: []rune{0x0077}, // Additional folding + 0x1D51B: []rune{0x0078}, // Additional folding + 0x1D51C: []rune{0x0079}, // Additional folding + 0x1D538: []rune{0x0061}, // Additional folding + 0x1D539: []rune{0x0062}, // Additional folding + 0x1D53B: []rune{0x0064}, // Additional folding + 0x1D53C: []rune{0x0065}, // Additional folding + 0x1D53D: []rune{0x0066}, // Additional folding + 0x1D53E: []rune{0x0067}, // Additional folding + 0x1D540: []rune{0x0069}, // Additional folding + 0x1D541: []rune{0x006A}, // Additional folding + 0x1D542: []rune{0x006B}, // Additional folding + 0x1D543: []rune{0x006C}, // Additional folding + 0x1D544: []rune{0x006D}, // Additional folding + 0x1D546: []rune{0x006F}, // Additional folding + 0x1D54A: []rune{0x0073}, // Additional folding + 0x1D54B: []rune{0x0074}, // Additional folding + 0x1D54C: []rune{0x0075}, // Additional folding + 0x1D54D: []rune{0x0076}, // Additional folding + 0x1D54E: []rune{0x0077}, // Additional folding + 0x1D54F: []rune{0x0078}, // Additional folding + 0x1D550: []rune{0x0079}, // Additional folding + 0x1D56C: []rune{0x0061}, // Additional folding + 0x1D56D: []rune{0x0062}, // Additional folding + 0x1D56E: []rune{0x0063}, // Additional folding + 0x1D56F: []rune{0x0064}, // Additional folding + 0x1D570: []rune{0x0065}, // Additional folding + 0x1D571: []rune{0x0066}, // Additional folding + 0x1D572: []rune{0x0067}, // Additional folding + 0x1D573: []rune{0x0068}, // Additional folding + 0x1D574: []rune{0x0069}, // Additional folding + 0x1D575: []rune{0x006A}, // Additional folding + 0x1D576: []rune{0x006B}, // Additional folding + 0x1D577: []rune{0x006C}, // Additional folding + 0x1D578: []rune{0x006D}, // Additional folding + 0x1D579: []rune{0x006E}, // Additional folding + 0x1D57A: []rune{0x006F}, // Additional folding + 0x1D57B: []rune{0x0070}, // Additional folding + 0x1D57C: []rune{0x0071}, // Additional folding + 0x1D57D: []rune{0x0072}, // Additional folding + 0x1D57E: []rune{0x0073}, // Additional folding + 0x1D57F: []rune{0x0074}, // Additional folding + 0x1D580: []rune{0x0075}, // Additional folding + 0x1D581: []rune{0x0076}, // Additional folding + 0x1D582: []rune{0x0077}, // Additional folding + 0x1D583: []rune{0x0078}, // Additional folding + 0x1D584: []rune{0x0079}, // Additional folding + 0x1D585: []rune{0x007A}, // Additional folding + 0x1D5A0: []rune{0x0061}, // Additional folding + 0x1D5A1: []rune{0x0062}, // Additional folding + 0x1D5A2: []rune{0x0063}, // Additional folding + 0x1D5A3: []rune{0x0064}, // Additional folding + 0x1D5A4: []rune{0x0065}, // Additional folding + 0x1D5A5: []rune{0x0066}, // Additional folding + 0x1D5A6: []rune{0x0067}, // Additional folding + 0x1D5A7: []rune{0x0068}, // Additional folding + 0x1D5A8: []rune{0x0069}, // Additional folding + 0x1D5A9: []rune{0x006A}, // Additional folding + 0x1D5AA: []rune{0x006B}, // Additional folding + 0x1D5AB: []rune{0x006C}, // Additional folding + 0x1D5AC: []rune{0x006D}, // Additional folding + 0x1D5AD: []rune{0x006E}, // Additional folding + 0x1D5AE: []rune{0x006F}, // Additional folding + 0x1D5AF: []rune{0x0070}, // Additional folding + 0x1D5B0: []rune{0x0071}, // Additional folding + 0x1D5B1: []rune{0x0072}, // Additional folding + 0x1D5B2: []rune{0x0073}, // Additional folding + 0x1D5B3: []rune{0x0074}, // Additional folding + 0x1D5B4: []rune{0x0075}, // Additional folding + 0x1D5B5: []rune{0x0076}, // Additional folding + 0x1D5B6: []rune{0x0077}, // Additional folding + 0x1D5B7: []rune{0x0078}, // Additional folding + 0x1D5B8: []rune{0x0079}, // Additional folding + 0x1D5B9: []rune{0x007A}, // Additional folding + 0x1D5D4: []rune{0x0061}, // Additional folding + 0x1D5D5: []rune{0x0062}, // Additional folding + 0x1D5D6: []rune{0x0063}, // Additional folding + 0x1D5D7: []rune{0x0064}, // Additional folding + 0x1D5D8: []rune{0x0065}, // Additional folding + 0x1D5D9: []rune{0x0066}, // Additional folding + 0x1D5DA: []rune{0x0067}, // Additional folding + 0x1D5DB: []rune{0x0068}, // Additional folding + 0x1D5DC: []rune{0x0069}, // Additional folding + 0x1D5DD: []rune{0x006A}, // Additional folding + 0x1D5DE: []rune{0x006B}, // Additional folding + 0x1D5DF: []rune{0x006C}, // Additional folding + 0x1D5E0: []rune{0x006D}, // Additional folding + 0x1D5E1: []rune{0x006E}, // Additional folding + 0x1D5E2: []rune{0x006F}, // Additional folding + 0x1D5E3: []rune{0x0070}, // Additional folding + 0x1D5E4: []rune{0x0071}, // Additional folding + 0x1D5E5: []rune{0x0072}, // Additional folding + 0x1D5E6: []rune{0x0073}, // Additional folding + 0x1D5E7: []rune{0x0074}, // Additional folding + 0x1D5E8: []rune{0x0075}, // Additional folding + 0x1D5E9: []rune{0x0076}, // Additional folding + 0x1D5EA: []rune{0x0077}, // Additional folding + 0x1D5EB: []rune{0x0078}, // Additional folding + 0x1D5EC: []rune{0x0079}, // Additional folding + 0x1D5ED: []rune{0x007A}, // Additional folding + 0x1D608: []rune{0x0061}, // Additional folding + 0x1D609: []rune{0x0062}, // Additional folding + 0x1D60A: []rune{0x0063}, // Additional folding + 0x1D60B: []rune{0x0064}, // Additional folding + 0x1D60C: []rune{0x0065}, // Additional folding + 0x1D60D: []rune{0x0066}, // Additional folding + 0x1D60E: []rune{0x0067}, // Additional folding + 0x1D60F: []rune{0x0068}, // Additional folding + 0x1D610: []rune{0x0069}, // Additional folding + 0x1D611: []rune{0x006A}, // Additional folding + 0x1D612: []rune{0x006B}, // Additional folding + 0x1D613: []rune{0x006C}, // Additional folding + 0x1D614: []rune{0x006D}, // Additional folding + 0x1D615: []rune{0x006E}, // Additional folding + 0x1D616: []rune{0x006F}, // Additional folding + 0x1D617: []rune{0x0070}, // Additional folding + 0x1D618: []rune{0x0071}, // Additional folding + 0x1D619: []rune{0x0072}, // Additional folding + 0x1D61A: []rune{0x0073}, // Additional folding + 0x1D61B: []rune{0x0074}, // Additional folding + 0x1D61C: []rune{0x0075}, // Additional folding + 0x1D61D: []rune{0x0076}, // Additional folding + 0x1D61E: []rune{0x0077}, // Additional folding + 0x1D61F: []rune{0x0078}, // Additional folding + 0x1D620: []rune{0x0079}, // Additional folding + 0x1D621: []rune{0x007A}, // Additional folding + 0x1D63C: []rune{0x0061}, // Additional folding + 0x1D63D: []rune{0x0062}, // Additional folding + 0x1D63E: []rune{0x0063}, // Additional folding + 0x1D63F: []rune{0x0064}, // Additional folding + 0x1D640: []rune{0x0065}, // Additional folding + 0x1D641: []rune{0x0066}, // Additional folding + 0x1D642: []rune{0x0067}, // Additional folding + 0x1D643: []rune{0x0068}, // Additional folding + 0x1D644: []rune{0x0069}, // Additional folding + 0x1D645: []rune{0x006A}, // Additional folding + 0x1D646: []rune{0x006B}, // Additional folding + 0x1D647: []rune{0x006C}, // Additional folding + 0x1D648: []rune{0x006D}, // Additional folding + 0x1D649: []rune{0x006E}, // Additional folding + 0x1D64A: []rune{0x006F}, // Additional folding + 0x1D64B: []rune{0x0070}, // Additional folding + 0x1D64C: []rune{0x0071}, // Additional folding + 0x1D64D: []rune{0x0072}, // Additional folding + 0x1D64E: []rune{0x0073}, // Additional folding + 0x1D64F: []rune{0x0074}, // Additional folding + 0x1D650: []rune{0x0075}, // Additional folding + 0x1D651: []rune{0x0076}, // Additional folding + 0x1D652: []rune{0x0077}, // Additional folding + 0x1D653: []rune{0x0078}, // Additional folding + 0x1D654: []rune{0x0079}, // Additional folding + 0x1D655: []rune{0x007A}, // Additional folding + 0x1D670: []rune{0x0061}, // Additional folding + 0x1D671: []rune{0x0062}, // Additional folding + 0x1D672: []rune{0x0063}, // Additional folding + 0x1D673: []rune{0x0064}, // Additional folding + 0x1D674: []rune{0x0065}, // Additional folding + 0x1D675: []rune{0x0066}, // Additional folding + 0x1D676: []rune{0x0067}, // Additional folding + 0x1D677: []rune{0x0068}, // Additional folding + 0x1D678: []rune{0x0069}, // Additional folding + 0x1D679: []rune{0x006A}, // Additional folding + 0x1D67A: []rune{0x006B}, // Additional folding + 0x1D67B: []rune{0x006C}, // Additional folding + 0x1D67C: []rune{0x006D}, // Additional folding + 0x1D67D: []rune{0x006E}, // Additional folding + 0x1D67E: []rune{0x006F}, // Additional folding + 0x1D67F: []rune{0x0070}, // Additional folding + 0x1D680: []rune{0x0071}, // Additional folding + 0x1D681: []rune{0x0072}, // Additional folding + 0x1D682: []rune{0x0073}, // Additional folding + 0x1D683: []rune{0x0074}, // Additional folding + 0x1D684: []rune{0x0075}, // Additional folding + 0x1D685: []rune{0x0076}, // Additional folding + 0x1D686: []rune{0x0077}, // Additional folding + 0x1D687: []rune{0x0078}, // Additional folding + 0x1D688: []rune{0x0079}, // Additional folding + 0x1D689: []rune{0x007A}, // Additional folding + 0x1D6A8: []rune{0x03B1}, // Additional folding + 0x1D6A9: []rune{0x03B2}, // Additional folding + 0x1D6AA: []rune{0x03B3}, // Additional folding + 0x1D6AB: []rune{0x03B4}, // Additional folding + 0x1D6AC: []rune{0x03B5}, // Additional folding + 0x1D6AD: []rune{0x03B6}, // Additional folding + 0x1D6AE: []rune{0x03B7}, // Additional folding + 0x1D6AF: []rune{0x03B8}, // Additional folding + 0x1D6B0: []rune{0x03B9}, // Additional folding + 0x1D6B1: []rune{0x03BA}, // Additional folding + 0x1D6B2: []rune{0x03BB}, // Additional folding + 0x1D6B3: []rune{0x03BC}, // Additional folding + 0x1D6B4: []rune{0x03BD}, // Additional folding + 0x1D6B5: []rune{0x03BE}, // Additional folding + 0x1D6B6: []rune{0x03BF}, // Additional folding + 0x1D6B7: []rune{0x03C0}, // Additional folding + 0x1D6B8: []rune{0x03C1}, // Additional folding + 0x1D6B9: []rune{0x03B8}, // Additional folding + 0x1D6BA: []rune{0x03C3}, // Additional folding + 0x1D6BB: []rune{0x03C4}, // Additional folding + 0x1D6BC: []rune{0x03C5}, // Additional folding + 0x1D6BD: []rune{0x03C6}, // Additional folding + 0x1D6BE: []rune{0x03C7}, // Additional folding + 0x1D6BF: []rune{0x03C8}, // Additional folding + 0x1D6C0: []rune{0x03C9}, // Additional folding + 0x1D6D3: []rune{0x03C3}, // Additional folding + 0x1D6E2: []rune{0x03B1}, // Additional folding + 0x1D6E3: []rune{0x03B2}, // Additional folding + 0x1D6E4: []rune{0x03B3}, // Additional folding + 0x1D6E5: []rune{0x03B4}, // Additional folding + 0x1D6E6: []rune{0x03B5}, // Additional folding + 0x1D6E7: []rune{0x03B6}, // Additional folding + 0x1D6E8: []rune{0x03B7}, // Additional folding + 0x1D6E9: []rune{0x03B8}, // Additional folding + 0x1D6EA: []rune{0x03B9}, // Additional folding + 0x1D6EB: []rune{0x03BA}, // Additional folding + 0x1D6EC: []rune{0x03BB}, // Additional folding + 0x1D6ED: []rune{0x03BC}, // Additional folding + 0x1D6EE: []rune{0x03BD}, // Additional folding + 0x1D6EF: []rune{0x03BE}, // Additional folding + 0x1D6F0: []rune{0x03BF}, // Additional folding + 0x1D6F1: []rune{0x03C0}, // Additional folding + 0x1D6F2: []rune{0x03C1}, // Additional folding + 0x1D6F3: []rune{0x03B8}, // Additional folding + 0x1D6F4: []rune{0x03C3}, // Additional folding + 0x1D6F5: []rune{0x03C4}, // Additional folding + 0x1D6F6: []rune{0x03C5}, // Additional folding + 0x1D6F7: []rune{0x03C6}, // Additional folding + 0x1D6F8: []rune{0x03C7}, // Additional folding + 0x1D6F9: []rune{0x03C8}, // Additional folding + 0x1D6FA: []rune{0x03C9}, // Additional folding + 0x1D70D: []rune{0x03C3}, // Additional folding + 0x1D71C: []rune{0x03B1}, // Additional folding + 0x1D71D: []rune{0x03B2}, // Additional folding + 0x1D71E: []rune{0x03B3}, // Additional folding + 0x1D71F: []rune{0x03B4}, // Additional folding + 0x1D720: []rune{0x03B5}, // Additional folding + 0x1D721: []rune{0x03B6}, // Additional folding + 0x1D722: []rune{0x03B7}, // Additional folding + 0x1D723: []rune{0x03B8}, // Additional folding + 0x1D724: []rune{0x03B9}, // Additional folding + 0x1D725: []rune{0x03BA}, // Additional folding + 0x1D726: []rune{0x03BB}, // Additional folding + 0x1D727: []rune{0x03BC}, // Additional folding + 0x1D728: []rune{0x03BD}, // Additional folding + 0x1D729: []rune{0x03BE}, // Additional folding + 0x1D72A: []rune{0x03BF}, // Additional folding + 0x1D72B: []rune{0x03C0}, // Additional folding + 0x1D72C: []rune{0x03C1}, // Additional folding + 0x1D72D: []rune{0x03B8}, // Additional folding + 0x1D72E: []rune{0x03C3}, // Additional folding + 0x1D72F: []rune{0x03C4}, // Additional folding + 0x1D730: []rune{0x03C5}, // Additional folding + 0x1D731: []rune{0x03C6}, // Additional folding + 0x1D732: []rune{0x03C7}, // Additional folding + 0x1D733: []rune{0x03C8}, // Additional folding + 0x1D734: []rune{0x03C9}, // Additional folding + 0x1D747: []rune{0x03C3}, // Additional folding + 0x1D756: []rune{0x03B1}, // Additional folding + 0x1D757: []rune{0x03B2}, // Additional folding + 0x1D758: []rune{0x03B3}, // Additional folding + 0x1D759: []rune{0x03B4}, // Additional folding + 0x1D75A: []rune{0x03B5}, // Additional folding + 0x1D75B: []rune{0x03B6}, // Additional folding + 0x1D75C: []rune{0x03B7}, // Additional folding + 0x1D75D: []rune{0x03B8}, // Additional folding + 0x1D75E: []rune{0x03B9}, // Additional folding + 0x1D75F: []rune{0x03BA}, // Additional folding + 0x1D760: []rune{0x03BB}, // Additional folding + 0x1D761: []rune{0x03BC}, // Additional folding + 0x1D762: []rune{0x03BD}, // Additional folding + 0x1D763: []rune{0x03BE}, // Additional folding + 0x1D764: []rune{0x03BF}, // Additional folding + 0x1D765: []rune{0x03C0}, // Additional folding + 0x1D766: []rune{0x03C1}, // Additional folding + 0x1D767: []rune{0x03B8}, // Additional folding + 0x1D768: []rune{0x03C3}, // Additional folding + 0x1D769: []rune{0x03C4}, // Additional folding + 0x1D76A: []rune{0x03C5}, // Additional folding + 0x1D76B: []rune{0x03C6}, // Additional folding + 0x1D76C: []rune{0x03C7}, // Additional folding + 0x1D76D: []rune{0x03C8}, // Additional folding + 0x1D76E: []rune{0x03C9}, // Additional folding + 0x1D781: []rune{0x03C3}, // Additional folding + 0x1D790: []rune{0x03B1}, // Additional folding + 0x1D791: []rune{0x03B2}, // Additional folding + 0x1D792: []rune{0x03B3}, // Additional folding + 0x1D793: []rune{0x03B4}, // Additional folding + 0x1D794: []rune{0x03B5}, // Additional folding + 0x1D795: []rune{0x03B6}, // Additional folding + 0x1D796: []rune{0x03B7}, // Additional folding + 0x1D797: []rune{0x03B8}, // Additional folding + 0x1D798: []rune{0x03B9}, // Additional folding + 0x1D799: []rune{0x03BA}, // Additional folding + 0x1D79A: []rune{0x03BB}, // Additional folding + 0x1D79B: []rune{0x03BC}, // Additional folding + 0x1D79C: []rune{0x03BD}, // Additional folding + 0x1D79D: []rune{0x03BE}, // Additional folding + 0x1D79E: []rune{0x03BF}, // Additional folding + 0x1D79F: []rune{0x03C0}, // Additional folding + 0x1D7A0: []rune{0x03C1}, // Additional folding + 0x1D7A1: []rune{0x03B8}, // Additional folding + 0x1D7A2: []rune{0x03C3}, // Additional folding + 0x1D7A3: []rune{0x03C4}, // Additional folding + 0x1D7A4: []rune{0x03C5}, // Additional folding + 0x1D7A5: []rune{0x03C6}, // Additional folding + 0x1D7A6: []rune{0x03C7}, // Additional folding + 0x1D7A7: []rune{0x03C8}, // Additional folding + 0x1D7A8: []rune{0x03C9}, // Additional folding + 0x1D7BB: []rune{0x03C3}, // Additional folding +} + +// TableB2 represents RFC-3454 Table B.2. +var TableB2 Mapping = tableB2 + +var tableB3 = Mapping{ + 0x0041: []rune{0x0061}, // Case map + 0x0042: []rune{0x0062}, // Case map + 0x0043: []rune{0x0063}, // Case map + 0x0044: []rune{0x0064}, // Case map + 0x0045: []rune{0x0065}, // Case map + 0x0046: []rune{0x0066}, // Case map + 0x0047: []rune{0x0067}, // Case map + 0x0048: []rune{0x0068}, // Case map + 0x0049: []rune{0x0069}, // Case map + 0x004A: []rune{0x006A}, // Case map + 0x004B: []rune{0x006B}, // Case map + 0x004C: []rune{0x006C}, // Case map + 0x004D: []rune{0x006D}, // Case map + 0x004E: []rune{0x006E}, // Case map + 0x004F: []rune{0x006F}, // Case map + 0x0050: []rune{0x0070}, // Case map + 0x0051: []rune{0x0071}, // Case map + 0x0052: []rune{0x0072}, // Case map + 0x0053: []rune{0x0073}, // Case map + 0x0054: []rune{0x0074}, // Case map + 0x0055: []rune{0x0075}, // Case map + 0x0056: []rune{0x0076}, // Case map + 0x0057: []rune{0x0077}, // Case map + 0x0058: []rune{0x0078}, // Case map + 0x0059: []rune{0x0079}, // Case map + 0x005A: []rune{0x007A}, // Case map + 0x00B5: []rune{0x03BC}, // Case map + 0x00C0: []rune{0x00E0}, // Case map + 0x00C1: []rune{0x00E1}, // Case map + 0x00C2: []rune{0x00E2}, // Case map + 0x00C3: []rune{0x00E3}, // Case map + 0x00C4: []rune{0x00E4}, // Case map + 0x00C5: []rune{0x00E5}, // Case map + 0x00C6: []rune{0x00E6}, // Case map + 0x00C7: []rune{0x00E7}, // Case map + 0x00C8: []rune{0x00E8}, // Case map + 0x00C9: []rune{0x00E9}, // Case map + 0x00CA: []rune{0x00EA}, // Case map + 0x00CB: []rune{0x00EB}, // Case map + 0x00CC: []rune{0x00EC}, // Case map + 0x00CD: []rune{0x00ED}, // Case map + 0x00CE: []rune{0x00EE}, // Case map + 0x00CF: []rune{0x00EF}, // Case map + 0x00D0: []rune{0x00F0}, // Case map + 0x00D1: []rune{0x00F1}, // Case map + 0x00D2: []rune{0x00F2}, // Case map + 0x00D3: []rune{0x00F3}, // Case map + 0x00D4: []rune{0x00F4}, // Case map + 0x00D5: []rune{0x00F5}, // Case map + 0x00D6: []rune{0x00F6}, // Case map + 0x00D8: []rune{0x00F8}, // Case map + 0x00D9: []rune{0x00F9}, // Case map + 0x00DA: []rune{0x00FA}, // Case map + 0x00DB: []rune{0x00FB}, // Case map + 0x00DC: []rune{0x00FC}, // Case map + 0x00DD: []rune{0x00FD}, // Case map + 0x00DE: []rune{0x00FE}, // Case map + 0x00DF: []rune{0x0073, 0x0073}, // Case map + 0x0100: []rune{0x0101}, // Case map + 0x0102: []rune{0x0103}, // Case map + 0x0104: []rune{0x0105}, // Case map + 0x0106: []rune{0x0107}, // Case map + 0x0108: []rune{0x0109}, // Case map + 0x010A: []rune{0x010B}, // Case map + 0x010C: []rune{0x010D}, // Case map + 0x010E: []rune{0x010F}, // Case map + 0x0110: []rune{0x0111}, // Case map + 0x0112: []rune{0x0113}, // Case map + 0x0114: []rune{0x0115}, // Case map + 0x0116: []rune{0x0117}, // Case map + 0x0118: []rune{0x0119}, // Case map + 0x011A: []rune{0x011B}, // Case map + 0x011C: []rune{0x011D}, // Case map + 0x011E: []rune{0x011F}, // Case map + 0x0120: []rune{0x0121}, // Case map + 0x0122: []rune{0x0123}, // Case map + 0x0124: []rune{0x0125}, // Case map + 0x0126: []rune{0x0127}, // Case map + 0x0128: []rune{0x0129}, // Case map + 0x012A: []rune{0x012B}, // Case map + 0x012C: []rune{0x012D}, // Case map + 0x012E: []rune{0x012F}, // Case map + 0x0130: []rune{0x0069, 0x0307}, // Case map + 0x0132: []rune{0x0133}, // Case map + 0x0134: []rune{0x0135}, // Case map + 0x0136: []rune{0x0137}, // Case map + 0x0139: []rune{0x013A}, // Case map + 0x013B: []rune{0x013C}, // Case map + 0x013D: []rune{0x013E}, // Case map + 0x013F: []rune{0x0140}, // Case map + 0x0141: []rune{0x0142}, // Case map + 0x0143: []rune{0x0144}, // Case map + 0x0145: []rune{0x0146}, // Case map + 0x0147: []rune{0x0148}, // Case map + 0x0149: []rune{0x02BC, 0x006E}, // Case map + 0x014A: []rune{0x014B}, // Case map + 0x014C: []rune{0x014D}, // Case map + 0x014E: []rune{0x014F}, // Case map + 0x0150: []rune{0x0151}, // Case map + 0x0152: []rune{0x0153}, // Case map + 0x0154: []rune{0x0155}, // Case map + 0x0156: []rune{0x0157}, // Case map + 0x0158: []rune{0x0159}, // Case map + 0x015A: []rune{0x015B}, // Case map + 0x015C: []rune{0x015D}, // Case map + 0x015E: []rune{0x015F}, // Case map + 0x0160: []rune{0x0161}, // Case map + 0x0162: []rune{0x0163}, // Case map + 0x0164: []rune{0x0165}, // Case map + 0x0166: []rune{0x0167}, // Case map + 0x0168: []rune{0x0169}, // Case map + 0x016A: []rune{0x016B}, // Case map + 0x016C: []rune{0x016D}, // Case map + 0x016E: []rune{0x016F}, // Case map + 0x0170: []rune{0x0171}, // Case map + 0x0172: []rune{0x0173}, // Case map + 0x0174: []rune{0x0175}, // Case map + 0x0176: []rune{0x0177}, // Case map + 0x0178: []rune{0x00FF}, // Case map + 0x0179: []rune{0x017A}, // Case map + 0x017B: []rune{0x017C}, // Case map + 0x017D: []rune{0x017E}, // Case map + 0x017F: []rune{0x0073}, // Case map + 0x0181: []rune{0x0253}, // Case map + 0x0182: []rune{0x0183}, // Case map + 0x0184: []rune{0x0185}, // Case map + 0x0186: []rune{0x0254}, // Case map + 0x0187: []rune{0x0188}, // Case map + 0x0189: []rune{0x0256}, // Case map + 0x018A: []rune{0x0257}, // Case map + 0x018B: []rune{0x018C}, // Case map + 0x018E: []rune{0x01DD}, // Case map + 0x018F: []rune{0x0259}, // Case map + 0x0190: []rune{0x025B}, // Case map + 0x0191: []rune{0x0192}, // Case map + 0x0193: []rune{0x0260}, // Case map + 0x0194: []rune{0x0263}, // Case map + 0x0196: []rune{0x0269}, // Case map + 0x0197: []rune{0x0268}, // Case map + 0x0198: []rune{0x0199}, // Case map + 0x019C: []rune{0x026F}, // Case map + 0x019D: []rune{0x0272}, // Case map + 0x019F: []rune{0x0275}, // Case map + 0x01A0: []rune{0x01A1}, // Case map + 0x01A2: []rune{0x01A3}, // Case map + 0x01A4: []rune{0x01A5}, // Case map + 0x01A6: []rune{0x0280}, // Case map + 0x01A7: []rune{0x01A8}, // Case map + 0x01A9: []rune{0x0283}, // Case map + 0x01AC: []rune{0x01AD}, // Case map + 0x01AE: []rune{0x0288}, // Case map + 0x01AF: []rune{0x01B0}, // Case map + 0x01B1: []rune{0x028A}, // Case map + 0x01B2: []rune{0x028B}, // Case map + 0x01B3: []rune{0x01B4}, // Case map + 0x01B5: []rune{0x01B6}, // Case map + 0x01B7: []rune{0x0292}, // Case map + 0x01B8: []rune{0x01B9}, // Case map + 0x01BC: []rune{0x01BD}, // Case map + 0x01C4: []rune{0x01C6}, // Case map + 0x01C5: []rune{0x01C6}, // Case map + 0x01C7: []rune{0x01C9}, // Case map + 0x01C8: []rune{0x01C9}, // Case map + 0x01CA: []rune{0x01CC}, // Case map + 0x01CB: []rune{0x01CC}, // Case map + 0x01CD: []rune{0x01CE}, // Case map + 0x01CF: []rune{0x01D0}, // Case map + 0x01D1: []rune{0x01D2}, // Case map + 0x01D3: []rune{0x01D4}, // Case map + 0x01D5: []rune{0x01D6}, // Case map + 0x01D7: []rune{0x01D8}, // Case map + 0x01D9: []rune{0x01DA}, // Case map + 0x01DB: []rune{0x01DC}, // Case map + 0x01DE: []rune{0x01DF}, // Case map + 0x01E0: []rune{0x01E1}, // Case map + 0x01E2: []rune{0x01E3}, // Case map + 0x01E4: []rune{0x01E5}, // Case map + 0x01E6: []rune{0x01E7}, // Case map + 0x01E8: []rune{0x01E9}, // Case map + 0x01EA: []rune{0x01EB}, // Case map + 0x01EC: []rune{0x01ED}, // Case map + 0x01EE: []rune{0x01EF}, // Case map + 0x01F0: []rune{0x006A, 0x030C}, // Case map + 0x01F1: []rune{0x01F3}, // Case map + 0x01F2: []rune{0x01F3}, // Case map + 0x01F4: []rune{0x01F5}, // Case map + 0x01F6: []rune{0x0195}, // Case map + 0x01F7: []rune{0x01BF}, // Case map + 0x01F8: []rune{0x01F9}, // Case map + 0x01FA: []rune{0x01FB}, // Case map + 0x01FC: []rune{0x01FD}, // Case map + 0x01FE: []rune{0x01FF}, // Case map + 0x0200: []rune{0x0201}, // Case map + 0x0202: []rune{0x0203}, // Case map + 0x0204: []rune{0x0205}, // Case map + 0x0206: []rune{0x0207}, // Case map + 0x0208: []rune{0x0209}, // Case map + 0x020A: []rune{0x020B}, // Case map + 0x020C: []rune{0x020D}, // Case map + 0x020E: []rune{0x020F}, // Case map + 0x0210: []rune{0x0211}, // Case map + 0x0212: []rune{0x0213}, // Case map + 0x0214: []rune{0x0215}, // Case map + 0x0216: []rune{0x0217}, // Case map + 0x0218: []rune{0x0219}, // Case map + 0x021A: []rune{0x021B}, // Case map + 0x021C: []rune{0x021D}, // Case map + 0x021E: []rune{0x021F}, // Case map + 0x0220: []rune{0x019E}, // Case map + 0x0222: []rune{0x0223}, // Case map + 0x0224: []rune{0x0225}, // Case map + 0x0226: []rune{0x0227}, // Case map + 0x0228: []rune{0x0229}, // Case map + 0x022A: []rune{0x022B}, // Case map + 0x022C: []rune{0x022D}, // Case map + 0x022E: []rune{0x022F}, // Case map + 0x0230: []rune{0x0231}, // Case map + 0x0232: []rune{0x0233}, // Case map + 0x0345: []rune{0x03B9}, // Case map + 0x0386: []rune{0x03AC}, // Case map + 0x0388: []rune{0x03AD}, // Case map + 0x0389: []rune{0x03AE}, // Case map + 0x038A: []rune{0x03AF}, // Case map + 0x038C: []rune{0x03CC}, // Case map + 0x038E: []rune{0x03CD}, // Case map + 0x038F: []rune{0x03CE}, // Case map + 0x0390: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x0391: []rune{0x03B1}, // Case map + 0x0392: []rune{0x03B2}, // Case map + 0x0393: []rune{0x03B3}, // Case map + 0x0394: []rune{0x03B4}, // Case map + 0x0395: []rune{0x03B5}, // Case map + 0x0396: []rune{0x03B6}, // Case map + 0x0397: []rune{0x03B7}, // Case map + 0x0398: []rune{0x03B8}, // Case map + 0x0399: []rune{0x03B9}, // Case map + 0x039A: []rune{0x03BA}, // Case map + 0x039B: []rune{0x03BB}, // Case map + 0x039C: []rune{0x03BC}, // Case map + 0x039D: []rune{0x03BD}, // Case map + 0x039E: []rune{0x03BE}, // Case map + 0x039F: []rune{0x03BF}, // Case map + 0x03A0: []rune{0x03C0}, // Case map + 0x03A1: []rune{0x03C1}, // Case map + 0x03A3: []rune{0x03C3}, // Case map + 0x03A4: []rune{0x03C4}, // Case map + 0x03A5: []rune{0x03C5}, // Case map + 0x03A6: []rune{0x03C6}, // Case map + 0x03A7: []rune{0x03C7}, // Case map + 0x03A8: []rune{0x03C8}, // Case map + 0x03A9: []rune{0x03C9}, // Case map + 0x03AA: []rune{0x03CA}, // Case map + 0x03AB: []rune{0x03CB}, // Case map + 0x03B0: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x03C2: []rune{0x03C3}, // Case map + 0x03D0: []rune{0x03B2}, // Case map + 0x03D1: []rune{0x03B8}, // Case map + 0x03D5: []rune{0x03C6}, // Case map + 0x03D6: []rune{0x03C0}, // Case map + 0x03D8: []rune{0x03D9}, // Case map + 0x03DA: []rune{0x03DB}, // Case map + 0x03DC: []rune{0x03DD}, // Case map + 0x03DE: []rune{0x03DF}, // Case map + 0x03E0: []rune{0x03E1}, // Case map + 0x03E2: []rune{0x03E3}, // Case map + 0x03E4: []rune{0x03E5}, // Case map + 0x03E6: []rune{0x03E7}, // Case map + 0x03E8: []rune{0x03E9}, // Case map + 0x03EA: []rune{0x03EB}, // Case map + 0x03EC: []rune{0x03ED}, // Case map + 0x03EE: []rune{0x03EF}, // Case map + 0x03F0: []rune{0x03BA}, // Case map + 0x03F1: []rune{0x03C1}, // Case map + 0x03F2: []rune{0x03C3}, // Case map + 0x03F4: []rune{0x03B8}, // Case map + 0x03F5: []rune{0x03B5}, // Case map + 0x0400: []rune{0x0450}, // Case map + 0x0401: []rune{0x0451}, // Case map + 0x0402: []rune{0x0452}, // Case map + 0x0403: []rune{0x0453}, // Case map + 0x0404: []rune{0x0454}, // Case map + 0x0405: []rune{0x0455}, // Case map + 0x0406: []rune{0x0456}, // Case map + 0x0407: []rune{0x0457}, // Case map + 0x0408: []rune{0x0458}, // Case map + 0x0409: []rune{0x0459}, // Case map + 0x040A: []rune{0x045A}, // Case map + 0x040B: []rune{0x045B}, // Case map + 0x040C: []rune{0x045C}, // Case map + 0x040D: []rune{0x045D}, // Case map + 0x040E: []rune{0x045E}, // Case map + 0x040F: []rune{0x045F}, // Case map + 0x0410: []rune{0x0430}, // Case map + 0x0411: []rune{0x0431}, // Case map + 0x0412: []rune{0x0432}, // Case map + 0x0413: []rune{0x0433}, // Case map + 0x0414: []rune{0x0434}, // Case map + 0x0415: []rune{0x0435}, // Case map + 0x0416: []rune{0x0436}, // Case map + 0x0417: []rune{0x0437}, // Case map + 0x0418: []rune{0x0438}, // Case map + 0x0419: []rune{0x0439}, // Case map + 0x041A: []rune{0x043A}, // Case map + 0x041B: []rune{0x043B}, // Case map + 0x041C: []rune{0x043C}, // Case map + 0x041D: []rune{0x043D}, // Case map + 0x041E: []rune{0x043E}, // Case map + 0x041F: []rune{0x043F}, // Case map + 0x0420: []rune{0x0440}, // Case map + 0x0421: []rune{0x0441}, // Case map + 0x0422: []rune{0x0442}, // Case map + 0x0423: []rune{0x0443}, // Case map + 0x0424: []rune{0x0444}, // Case map + 0x0425: []rune{0x0445}, // Case map + 0x0426: []rune{0x0446}, // Case map + 0x0427: []rune{0x0447}, // Case map + 0x0428: []rune{0x0448}, // Case map + 0x0429: []rune{0x0449}, // Case map + 0x042A: []rune{0x044A}, // Case map + 0x042B: []rune{0x044B}, // Case map + 0x042C: []rune{0x044C}, // Case map + 0x042D: []rune{0x044D}, // Case map + 0x042E: []rune{0x044E}, // Case map + 0x042F: []rune{0x044F}, // Case map + 0x0460: []rune{0x0461}, // Case map + 0x0462: []rune{0x0463}, // Case map + 0x0464: []rune{0x0465}, // Case map + 0x0466: []rune{0x0467}, // Case map + 0x0468: []rune{0x0469}, // Case map + 0x046A: []rune{0x046B}, // Case map + 0x046C: []rune{0x046D}, // Case map + 0x046E: []rune{0x046F}, // Case map + 0x0470: []rune{0x0471}, // Case map + 0x0472: []rune{0x0473}, // Case map + 0x0474: []rune{0x0475}, // Case map + 0x0476: []rune{0x0477}, // Case map + 0x0478: []rune{0x0479}, // Case map + 0x047A: []rune{0x047B}, // Case map + 0x047C: []rune{0x047D}, // Case map + 0x047E: []rune{0x047F}, // Case map + 0x0480: []rune{0x0481}, // Case map + 0x048A: []rune{0x048B}, // Case map + 0x048C: []rune{0x048D}, // Case map + 0x048E: []rune{0x048F}, // Case map + 0x0490: []rune{0x0491}, // Case map + 0x0492: []rune{0x0493}, // Case map + 0x0494: []rune{0x0495}, // Case map + 0x0496: []rune{0x0497}, // Case map + 0x0498: []rune{0x0499}, // Case map + 0x049A: []rune{0x049B}, // Case map + 0x049C: []rune{0x049D}, // Case map + 0x049E: []rune{0x049F}, // Case map + 0x04A0: []rune{0x04A1}, // Case map + 0x04A2: []rune{0x04A3}, // Case map + 0x04A4: []rune{0x04A5}, // Case map + 0x04A6: []rune{0x04A7}, // Case map + 0x04A8: []rune{0x04A9}, // Case map + 0x04AA: []rune{0x04AB}, // Case map + 0x04AC: []rune{0x04AD}, // Case map + 0x04AE: []rune{0x04AF}, // Case map + 0x04B0: []rune{0x04B1}, // Case map + 0x04B2: []rune{0x04B3}, // Case map + 0x04B4: []rune{0x04B5}, // Case map + 0x04B6: []rune{0x04B7}, // Case map + 0x04B8: []rune{0x04B9}, // Case map + 0x04BA: []rune{0x04BB}, // Case map + 0x04BC: []rune{0x04BD}, // Case map + 0x04BE: []rune{0x04BF}, // Case map + 0x04C1: []rune{0x04C2}, // Case map + 0x04C3: []rune{0x04C4}, // Case map + 0x04C5: []rune{0x04C6}, // Case map + 0x04C7: []rune{0x04C8}, // Case map + 0x04C9: []rune{0x04CA}, // Case map + 0x04CB: []rune{0x04CC}, // Case map + 0x04CD: []rune{0x04CE}, // Case map + 0x04D0: []rune{0x04D1}, // Case map + 0x04D2: []rune{0x04D3}, // Case map + 0x04D4: []rune{0x04D5}, // Case map + 0x04D6: []rune{0x04D7}, // Case map + 0x04D8: []rune{0x04D9}, // Case map + 0x04DA: []rune{0x04DB}, // Case map + 0x04DC: []rune{0x04DD}, // Case map + 0x04DE: []rune{0x04DF}, // Case map + 0x04E0: []rune{0x04E1}, // Case map + 0x04E2: []rune{0x04E3}, // Case map + 0x04E4: []rune{0x04E5}, // Case map + 0x04E6: []rune{0x04E7}, // Case map + 0x04E8: []rune{0x04E9}, // Case map + 0x04EA: []rune{0x04EB}, // Case map + 0x04EC: []rune{0x04ED}, // Case map + 0x04EE: []rune{0x04EF}, // Case map + 0x04F0: []rune{0x04F1}, // Case map + 0x04F2: []rune{0x04F3}, // Case map + 0x04F4: []rune{0x04F5}, // Case map + 0x04F8: []rune{0x04F9}, // Case map + 0x0500: []rune{0x0501}, // Case map + 0x0502: []rune{0x0503}, // Case map + 0x0504: []rune{0x0505}, // Case map + 0x0506: []rune{0x0507}, // Case map + 0x0508: []rune{0x0509}, // Case map + 0x050A: []rune{0x050B}, // Case map + 0x050C: []rune{0x050D}, // Case map + 0x050E: []rune{0x050F}, // Case map + 0x0531: []rune{0x0561}, // Case map + 0x0532: []rune{0x0562}, // Case map + 0x0533: []rune{0x0563}, // Case map + 0x0534: []rune{0x0564}, // Case map + 0x0535: []rune{0x0565}, // Case map + 0x0536: []rune{0x0566}, // Case map + 0x0537: []rune{0x0567}, // Case map + 0x0538: []rune{0x0568}, // Case map + 0x0539: []rune{0x0569}, // Case map + 0x053A: []rune{0x056A}, // Case map + 0x053B: []rune{0x056B}, // Case map + 0x053C: []rune{0x056C}, // Case map + 0x053D: []rune{0x056D}, // Case map + 0x053E: []rune{0x056E}, // Case map + 0x053F: []rune{0x056F}, // Case map + 0x0540: []rune{0x0570}, // Case map + 0x0541: []rune{0x0571}, // Case map + 0x0542: []rune{0x0572}, // Case map + 0x0543: []rune{0x0573}, // Case map + 0x0544: []rune{0x0574}, // Case map + 0x0545: []rune{0x0575}, // Case map + 0x0546: []rune{0x0576}, // Case map + 0x0547: []rune{0x0577}, // Case map + 0x0548: []rune{0x0578}, // Case map + 0x0549: []rune{0x0579}, // Case map + 0x054A: []rune{0x057A}, // Case map + 0x054B: []rune{0x057B}, // Case map + 0x054C: []rune{0x057C}, // Case map + 0x054D: []rune{0x057D}, // Case map + 0x054E: []rune{0x057E}, // Case map + 0x054F: []rune{0x057F}, // Case map + 0x0550: []rune{0x0580}, // Case map + 0x0551: []rune{0x0581}, // Case map + 0x0552: []rune{0x0582}, // Case map + 0x0553: []rune{0x0583}, // Case map + 0x0554: []rune{0x0584}, // Case map + 0x0555: []rune{0x0585}, // Case map + 0x0556: []rune{0x0586}, // Case map + 0x0587: []rune{0x0565, 0x0582}, // Case map + 0x1E00: []rune{0x1E01}, // Case map + 0x1E02: []rune{0x1E03}, // Case map + 0x1E04: []rune{0x1E05}, // Case map + 0x1E06: []rune{0x1E07}, // Case map + 0x1E08: []rune{0x1E09}, // Case map + 0x1E0A: []rune{0x1E0B}, // Case map + 0x1E0C: []rune{0x1E0D}, // Case map + 0x1E0E: []rune{0x1E0F}, // Case map + 0x1E10: []rune{0x1E11}, // Case map + 0x1E12: []rune{0x1E13}, // Case map + 0x1E14: []rune{0x1E15}, // Case map + 0x1E16: []rune{0x1E17}, // Case map + 0x1E18: []rune{0x1E19}, // Case map + 0x1E1A: []rune{0x1E1B}, // Case map + 0x1E1C: []rune{0x1E1D}, // Case map + 0x1E1E: []rune{0x1E1F}, // Case map + 0x1E20: []rune{0x1E21}, // Case map + 0x1E22: []rune{0x1E23}, // Case map + 0x1E24: []rune{0x1E25}, // Case map + 0x1E26: []rune{0x1E27}, // Case map + 0x1E28: []rune{0x1E29}, // Case map + 0x1E2A: []rune{0x1E2B}, // Case map + 0x1E2C: []rune{0x1E2D}, // Case map + 0x1E2E: []rune{0x1E2F}, // Case map + 0x1E30: []rune{0x1E31}, // Case map + 0x1E32: []rune{0x1E33}, // Case map + 0x1E34: []rune{0x1E35}, // Case map + 0x1E36: []rune{0x1E37}, // Case map + 0x1E38: []rune{0x1E39}, // Case map + 0x1E3A: []rune{0x1E3B}, // Case map + 0x1E3C: []rune{0x1E3D}, // Case map + 0x1E3E: []rune{0x1E3F}, // Case map + 0x1E40: []rune{0x1E41}, // Case map + 0x1E42: []rune{0x1E43}, // Case map + 0x1E44: []rune{0x1E45}, // Case map + 0x1E46: []rune{0x1E47}, // Case map + 0x1E48: []rune{0x1E49}, // Case map + 0x1E4A: []rune{0x1E4B}, // Case map + 0x1E4C: []rune{0x1E4D}, // Case map + 0x1E4E: []rune{0x1E4F}, // Case map + 0x1E50: []rune{0x1E51}, // Case map + 0x1E52: []rune{0x1E53}, // Case map + 0x1E54: []rune{0x1E55}, // Case map + 0x1E56: []rune{0x1E57}, // Case map + 0x1E58: []rune{0x1E59}, // Case map + 0x1E5A: []rune{0x1E5B}, // Case map + 0x1E5C: []rune{0x1E5D}, // Case map + 0x1E5E: []rune{0x1E5F}, // Case map + 0x1E60: []rune{0x1E61}, // Case map + 0x1E62: []rune{0x1E63}, // Case map + 0x1E64: []rune{0x1E65}, // Case map + 0x1E66: []rune{0x1E67}, // Case map + 0x1E68: []rune{0x1E69}, // Case map + 0x1E6A: []rune{0x1E6B}, // Case map + 0x1E6C: []rune{0x1E6D}, // Case map + 0x1E6E: []rune{0x1E6F}, // Case map + 0x1E70: []rune{0x1E71}, // Case map + 0x1E72: []rune{0x1E73}, // Case map + 0x1E74: []rune{0x1E75}, // Case map + 0x1E76: []rune{0x1E77}, // Case map + 0x1E78: []rune{0x1E79}, // Case map + 0x1E7A: []rune{0x1E7B}, // Case map + 0x1E7C: []rune{0x1E7D}, // Case map + 0x1E7E: []rune{0x1E7F}, // Case map + 0x1E80: []rune{0x1E81}, // Case map + 0x1E82: []rune{0x1E83}, // Case map + 0x1E84: []rune{0x1E85}, // Case map + 0x1E86: []rune{0x1E87}, // Case map + 0x1E88: []rune{0x1E89}, // Case map + 0x1E8A: []rune{0x1E8B}, // Case map + 0x1E8C: []rune{0x1E8D}, // Case map + 0x1E8E: []rune{0x1E8F}, // Case map + 0x1E90: []rune{0x1E91}, // Case map + 0x1E92: []rune{0x1E93}, // Case map + 0x1E94: []rune{0x1E95}, // Case map + 0x1E96: []rune{0x0068, 0x0331}, // Case map + 0x1E97: []rune{0x0074, 0x0308}, // Case map + 0x1E98: []rune{0x0077, 0x030A}, // Case map + 0x1E99: []rune{0x0079, 0x030A}, // Case map + 0x1E9A: []rune{0x0061, 0x02BE}, // Case map + 0x1E9B: []rune{0x1E61}, // Case map + 0x1EA0: []rune{0x1EA1}, // Case map + 0x1EA2: []rune{0x1EA3}, // Case map + 0x1EA4: []rune{0x1EA5}, // Case map + 0x1EA6: []rune{0x1EA7}, // Case map + 0x1EA8: []rune{0x1EA9}, // Case map + 0x1EAA: []rune{0x1EAB}, // Case map + 0x1EAC: []rune{0x1EAD}, // Case map + 0x1EAE: []rune{0x1EAF}, // Case map + 0x1EB0: []rune{0x1EB1}, // Case map + 0x1EB2: []rune{0x1EB3}, // Case map + 0x1EB4: []rune{0x1EB5}, // Case map + 0x1EB6: []rune{0x1EB7}, // Case map + 0x1EB8: []rune{0x1EB9}, // Case map + 0x1EBA: []rune{0x1EBB}, // Case map + 0x1EBC: []rune{0x1EBD}, // Case map + 0x1EBE: []rune{0x1EBF}, // Case map + 0x1EC0: []rune{0x1EC1}, // Case map + 0x1EC2: []rune{0x1EC3}, // Case map + 0x1EC4: []rune{0x1EC5}, // Case map + 0x1EC6: []rune{0x1EC7}, // Case map + 0x1EC8: []rune{0x1EC9}, // Case map + 0x1ECA: []rune{0x1ECB}, // Case map + 0x1ECC: []rune{0x1ECD}, // Case map + 0x1ECE: []rune{0x1ECF}, // Case map + 0x1ED0: []rune{0x1ED1}, // Case map + 0x1ED2: []rune{0x1ED3}, // Case map + 0x1ED4: []rune{0x1ED5}, // Case map + 0x1ED6: []rune{0x1ED7}, // Case map + 0x1ED8: []rune{0x1ED9}, // Case map + 0x1EDA: []rune{0x1EDB}, // Case map + 0x1EDC: []rune{0x1EDD}, // Case map + 0x1EDE: []rune{0x1EDF}, // Case map + 0x1EE0: []rune{0x1EE1}, // Case map + 0x1EE2: []rune{0x1EE3}, // Case map + 0x1EE4: []rune{0x1EE5}, // Case map + 0x1EE6: []rune{0x1EE7}, // Case map + 0x1EE8: []rune{0x1EE9}, // Case map + 0x1EEA: []rune{0x1EEB}, // Case map + 0x1EEC: []rune{0x1EED}, // Case map + 0x1EEE: []rune{0x1EEF}, // Case map + 0x1EF0: []rune{0x1EF1}, // Case map + 0x1EF2: []rune{0x1EF3}, // Case map + 0x1EF4: []rune{0x1EF5}, // Case map + 0x1EF6: []rune{0x1EF7}, // Case map + 0x1EF8: []rune{0x1EF9}, // Case map + 0x1F08: []rune{0x1F00}, // Case map + 0x1F09: []rune{0x1F01}, // Case map + 0x1F0A: []rune{0x1F02}, // Case map + 0x1F0B: []rune{0x1F03}, // Case map + 0x1F0C: []rune{0x1F04}, // Case map + 0x1F0D: []rune{0x1F05}, // Case map + 0x1F0E: []rune{0x1F06}, // Case map + 0x1F0F: []rune{0x1F07}, // Case map + 0x1F18: []rune{0x1F10}, // Case map + 0x1F19: []rune{0x1F11}, // Case map + 0x1F1A: []rune{0x1F12}, // Case map + 0x1F1B: []rune{0x1F13}, // Case map + 0x1F1C: []rune{0x1F14}, // Case map + 0x1F1D: []rune{0x1F15}, // Case map + 0x1F28: []rune{0x1F20}, // Case map + 0x1F29: []rune{0x1F21}, // Case map + 0x1F2A: []rune{0x1F22}, // Case map + 0x1F2B: []rune{0x1F23}, // Case map + 0x1F2C: []rune{0x1F24}, // Case map + 0x1F2D: []rune{0x1F25}, // Case map + 0x1F2E: []rune{0x1F26}, // Case map + 0x1F2F: []rune{0x1F27}, // Case map + 0x1F38: []rune{0x1F30}, // Case map + 0x1F39: []rune{0x1F31}, // Case map + 0x1F3A: []rune{0x1F32}, // Case map + 0x1F3B: []rune{0x1F33}, // Case map + 0x1F3C: []rune{0x1F34}, // Case map + 0x1F3D: []rune{0x1F35}, // Case map + 0x1F3E: []rune{0x1F36}, // Case map + 0x1F3F: []rune{0x1F37}, // Case map + 0x1F48: []rune{0x1F40}, // Case map + 0x1F49: []rune{0x1F41}, // Case map + 0x1F4A: []rune{0x1F42}, // Case map + 0x1F4B: []rune{0x1F43}, // Case map + 0x1F4C: []rune{0x1F44}, // Case map + 0x1F4D: []rune{0x1F45}, // Case map + 0x1F50: []rune{0x03C5, 0x0313}, // Case map + 0x1F52: []rune{0x03C5, 0x0313, 0x0300}, // Case map + 0x1F54: []rune{0x03C5, 0x0313, 0x0301}, // Case map + 0x1F56: []rune{0x03C5, 0x0313, 0x0342}, // Case map + 0x1F59: []rune{0x1F51}, // Case map + 0x1F5B: []rune{0x1F53}, // Case map + 0x1F5D: []rune{0x1F55}, // Case map + 0x1F5F: []rune{0x1F57}, // Case map + 0x1F68: []rune{0x1F60}, // Case map + 0x1F69: []rune{0x1F61}, // Case map + 0x1F6A: []rune{0x1F62}, // Case map + 0x1F6B: []rune{0x1F63}, // Case map + 0x1F6C: []rune{0x1F64}, // Case map + 0x1F6D: []rune{0x1F65}, // Case map + 0x1F6E: []rune{0x1F66}, // Case map + 0x1F6F: []rune{0x1F67}, // Case map + 0x1F80: []rune{0x1F00, 0x03B9}, // Case map + 0x1F81: []rune{0x1F01, 0x03B9}, // Case map + 0x1F82: []rune{0x1F02, 0x03B9}, // Case map + 0x1F83: []rune{0x1F03, 0x03B9}, // Case map + 0x1F84: []rune{0x1F04, 0x03B9}, // Case map + 0x1F85: []rune{0x1F05, 0x03B9}, // Case map + 0x1F86: []rune{0x1F06, 0x03B9}, // Case map + 0x1F87: []rune{0x1F07, 0x03B9}, // Case map + 0x1F88: []rune{0x1F00, 0x03B9}, // Case map + 0x1F89: []rune{0x1F01, 0x03B9}, // Case map + 0x1F8A: []rune{0x1F02, 0x03B9}, // Case map + 0x1F8B: []rune{0x1F03, 0x03B9}, // Case map + 0x1F8C: []rune{0x1F04, 0x03B9}, // Case map + 0x1F8D: []rune{0x1F05, 0x03B9}, // Case map + 0x1F8E: []rune{0x1F06, 0x03B9}, // Case map + 0x1F8F: []rune{0x1F07, 0x03B9}, // Case map + 0x1F90: []rune{0x1F20, 0x03B9}, // Case map + 0x1F91: []rune{0x1F21, 0x03B9}, // Case map + 0x1F92: []rune{0x1F22, 0x03B9}, // Case map + 0x1F93: []rune{0x1F23, 0x03B9}, // Case map + 0x1F94: []rune{0x1F24, 0x03B9}, // Case map + 0x1F95: []rune{0x1F25, 0x03B9}, // Case map + 0x1F96: []rune{0x1F26, 0x03B9}, // Case map + 0x1F97: []rune{0x1F27, 0x03B9}, // Case map + 0x1F98: []rune{0x1F20, 0x03B9}, // Case map + 0x1F99: []rune{0x1F21, 0x03B9}, // Case map + 0x1F9A: []rune{0x1F22, 0x03B9}, // Case map + 0x1F9B: []rune{0x1F23, 0x03B9}, // Case map + 0x1F9C: []rune{0x1F24, 0x03B9}, // Case map + 0x1F9D: []rune{0x1F25, 0x03B9}, // Case map + 0x1F9E: []rune{0x1F26, 0x03B9}, // Case map + 0x1F9F: []rune{0x1F27, 0x03B9}, // Case map + 0x1FA0: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA1: []rune{0x1F61, 0x03B9}, // Case map + 0x1FA2: []rune{0x1F62, 0x03B9}, // Case map + 0x1FA3: []rune{0x1F63, 0x03B9}, // Case map + 0x1FA4: []rune{0x1F64, 0x03B9}, // Case map + 0x1FA5: []rune{0x1F65, 0x03B9}, // Case map + 0x1FA6: []rune{0x1F66, 0x03B9}, // Case map + 0x1FA7: []rune{0x1F67, 0x03B9}, // Case map + 0x1FA8: []rune{0x1F60, 0x03B9}, // Case map + 0x1FA9: []rune{0x1F61, 0x03B9}, // Case map + 0x1FAA: []rune{0x1F62, 0x03B9}, // Case map + 0x1FAB: []rune{0x1F63, 0x03B9}, // Case map + 0x1FAC: []rune{0x1F64, 0x03B9}, // Case map + 0x1FAD: []rune{0x1F65, 0x03B9}, // Case map + 0x1FAE: []rune{0x1F66, 0x03B9}, // Case map + 0x1FAF: []rune{0x1F67, 0x03B9}, // Case map + 0x1FB2: []rune{0x1F70, 0x03B9}, // Case map + 0x1FB3: []rune{0x03B1, 0x03B9}, // Case map + 0x1FB4: []rune{0x03AC, 0x03B9}, // Case map + 0x1FB6: []rune{0x03B1, 0x0342}, // Case map + 0x1FB7: []rune{0x03B1, 0x0342, 0x03B9}, // Case map + 0x1FB8: []rune{0x1FB0}, // Case map + 0x1FB9: []rune{0x1FB1}, // Case map + 0x1FBA: []rune{0x1F70}, // Case map + 0x1FBB: []rune{0x1F71}, // Case map + 0x1FBC: []rune{0x03B1, 0x03B9}, // Case map + 0x1FBE: []rune{0x03B9}, // Case map + 0x1FC2: []rune{0x1F74, 0x03B9}, // Case map + 0x1FC3: []rune{0x03B7, 0x03B9}, // Case map + 0x1FC4: []rune{0x03AE, 0x03B9}, // Case map + 0x1FC6: []rune{0x03B7, 0x0342}, // Case map + 0x1FC7: []rune{0x03B7, 0x0342, 0x03B9}, // Case map + 0x1FC8: []rune{0x1F72}, // Case map + 0x1FC9: []rune{0x1F73}, // Case map + 0x1FCA: []rune{0x1F74}, // Case map + 0x1FCB: []rune{0x1F75}, // Case map + 0x1FCC: []rune{0x03B7, 0x03B9}, // Case map + 0x1FD2: []rune{0x03B9, 0x0308, 0x0300}, // Case map + 0x1FD3: []rune{0x03B9, 0x0308, 0x0301}, // Case map + 0x1FD6: []rune{0x03B9, 0x0342}, // Case map + 0x1FD7: []rune{0x03B9, 0x0308, 0x0342}, // Case map + 0x1FD8: []rune{0x1FD0}, // Case map + 0x1FD9: []rune{0x1FD1}, // Case map + 0x1FDA: []rune{0x1F76}, // Case map + 0x1FDB: []rune{0x1F77}, // Case map + 0x1FE2: []rune{0x03C5, 0x0308, 0x0300}, // Case map + 0x1FE3: []rune{0x03C5, 0x0308, 0x0301}, // Case map + 0x1FE4: []rune{0x03C1, 0x0313}, // Case map + 0x1FE6: []rune{0x03C5, 0x0342}, // Case map + 0x1FE7: []rune{0x03C5, 0x0308, 0x0342}, // Case map + 0x1FE8: []rune{0x1FE0}, // Case map + 0x1FE9: []rune{0x1FE1}, // Case map + 0x1FEA: []rune{0x1F7A}, // Case map + 0x1FEB: []rune{0x1F7B}, // Case map + 0x1FEC: []rune{0x1FE5}, // Case map + 0x1FF2: []rune{0x1F7C, 0x03B9}, // Case map + 0x1FF3: []rune{0x03C9, 0x03B9}, // Case map + 0x1FF4: []rune{0x03CE, 0x03B9}, // Case map + 0x1FF6: []rune{0x03C9, 0x0342}, // Case map + 0x1FF7: []rune{0x03C9, 0x0342, 0x03B9}, // Case map + 0x1FF8: []rune{0x1F78}, // Case map + 0x1FF9: []rune{0x1F79}, // Case map + 0x1FFA: []rune{0x1F7C}, // Case map + 0x1FFB: []rune{0x1F7D}, // Case map + 0x1FFC: []rune{0x03C9, 0x03B9}, // Case map + 0x2126: []rune{0x03C9}, // Case map + 0x212A: []rune{0x006B}, // Case map + 0x212B: []rune{0x00E5}, // Case map + 0x2160: []rune{0x2170}, // Case map + 0x2161: []rune{0x2171}, // Case map + 0x2162: []rune{0x2172}, // Case map + 0x2163: []rune{0x2173}, // Case map + 0x2164: []rune{0x2174}, // Case map + 0x2165: []rune{0x2175}, // Case map + 0x2166: []rune{0x2176}, // Case map + 0x2167: []rune{0x2177}, // Case map + 0x2168: []rune{0x2178}, // Case map + 0x2169: []rune{0x2179}, // Case map + 0x216A: []rune{0x217A}, // Case map + 0x216B: []rune{0x217B}, // Case map + 0x216C: []rune{0x217C}, // Case map + 0x216D: []rune{0x217D}, // Case map + 0x216E: []rune{0x217E}, // Case map + 0x216F: []rune{0x217F}, // Case map + 0x24B6: []rune{0x24D0}, // Case map + 0x24B7: []rune{0x24D1}, // Case map + 0x24B8: []rune{0x24D2}, // Case map + 0x24B9: []rune{0x24D3}, // Case map + 0x24BA: []rune{0x24D4}, // Case map + 0x24BB: []rune{0x24D5}, // Case map + 0x24BC: []rune{0x24D6}, // Case map + 0x24BD: []rune{0x24D7}, // Case map + 0x24BE: []rune{0x24D8}, // Case map + 0x24BF: []rune{0x24D9}, // Case map + 0x24C0: []rune{0x24DA}, // Case map + 0x24C1: []rune{0x24DB}, // Case map + 0x24C2: []rune{0x24DC}, // Case map + 0x24C3: []rune{0x24DD}, // Case map + 0x24C4: []rune{0x24DE}, // Case map + 0x24C5: []rune{0x24DF}, // Case map + 0x24C6: []rune{0x24E0}, // Case map + 0x24C7: []rune{0x24E1}, // Case map + 0x24C8: []rune{0x24E2}, // Case map + 0x24C9: []rune{0x24E3}, // Case map + 0x24CA: []rune{0x24E4}, // Case map + 0x24CB: []rune{0x24E5}, // Case map + 0x24CC: []rune{0x24E6}, // Case map + 0x24CD: []rune{0x24E7}, // Case map + 0x24CE: []rune{0x24E8}, // Case map + 0x24CF: []rune{0x24E9}, // Case map + 0xFB00: []rune{0x0066, 0x0066}, // Case map + 0xFB01: []rune{0x0066, 0x0069}, // Case map + 0xFB02: []rune{0x0066, 0x006C}, // Case map + 0xFB03: []rune{0x0066, 0x0066, 0x0069}, // Case map + 0xFB04: []rune{0x0066, 0x0066, 0x006C}, // Case map + 0xFB05: []rune{0x0073, 0x0074}, // Case map + 0xFB06: []rune{0x0073, 0x0074}, // Case map + 0xFB13: []rune{0x0574, 0x0576}, // Case map + 0xFB14: []rune{0x0574, 0x0565}, // Case map + 0xFB15: []rune{0x0574, 0x056B}, // Case map + 0xFB16: []rune{0x057E, 0x0576}, // Case map + 0xFB17: []rune{0x0574, 0x056D}, // Case map + 0xFF21: []rune{0xFF41}, // Case map + 0xFF22: []rune{0xFF42}, // Case map + 0xFF23: []rune{0xFF43}, // Case map + 0xFF24: []rune{0xFF44}, // Case map + 0xFF25: []rune{0xFF45}, // Case map + 0xFF26: []rune{0xFF46}, // Case map + 0xFF27: []rune{0xFF47}, // Case map + 0xFF28: []rune{0xFF48}, // Case map + 0xFF29: []rune{0xFF49}, // Case map + 0xFF2A: []rune{0xFF4A}, // Case map + 0xFF2B: []rune{0xFF4B}, // Case map + 0xFF2C: []rune{0xFF4C}, // Case map + 0xFF2D: []rune{0xFF4D}, // Case map + 0xFF2E: []rune{0xFF4E}, // Case map + 0xFF2F: []rune{0xFF4F}, // Case map + 0xFF30: []rune{0xFF50}, // Case map + 0xFF31: []rune{0xFF51}, // Case map + 0xFF32: []rune{0xFF52}, // Case map + 0xFF33: []rune{0xFF53}, // Case map + 0xFF34: []rune{0xFF54}, // Case map + 0xFF35: []rune{0xFF55}, // Case map + 0xFF36: []rune{0xFF56}, // Case map + 0xFF37: []rune{0xFF57}, // Case map + 0xFF38: []rune{0xFF58}, // Case map + 0xFF39: []rune{0xFF59}, // Case map + 0xFF3A: []rune{0xFF5A}, // Case map + 0x10400: []rune{0x10428}, // Case map + 0x10401: []rune{0x10429}, // Case map + 0x10402: []rune{0x1042A}, // Case map + 0x10403: []rune{0x1042B}, // Case map + 0x10404: []rune{0x1042C}, // Case map + 0x10405: []rune{0x1042D}, // Case map + 0x10406: []rune{0x1042E}, // Case map + 0x10407: []rune{0x1042F}, // Case map + 0x10408: []rune{0x10430}, // Case map + 0x10409: []rune{0x10431}, // Case map + 0x1040A: []rune{0x10432}, // Case map + 0x1040B: []rune{0x10433}, // Case map + 0x1040C: []rune{0x10434}, // Case map + 0x1040D: []rune{0x10435}, // Case map + 0x1040E: []rune{0x10436}, // Case map + 0x1040F: []rune{0x10437}, // Case map + 0x10410: []rune{0x10438}, // Case map + 0x10411: []rune{0x10439}, // Case map + 0x10412: []rune{0x1043A}, // Case map + 0x10413: []rune{0x1043B}, // Case map + 0x10414: []rune{0x1043C}, // Case map + 0x10415: []rune{0x1043D}, // Case map + 0x10416: []rune{0x1043E}, // Case map + 0x10417: []rune{0x1043F}, // Case map + 0x10418: []rune{0x10440}, // Case map + 0x10419: []rune{0x10441}, // Case map + 0x1041A: []rune{0x10442}, // Case map + 0x1041B: []rune{0x10443}, // Case map + 0x1041C: []rune{0x10444}, // Case map + 0x1041D: []rune{0x10445}, // Case map + 0x1041E: []rune{0x10446}, // Case map + 0x1041F: []rune{0x10447}, // Case map + 0x10420: []rune{0x10448}, // Case map + 0x10421: []rune{0x10449}, // Case map + 0x10422: []rune{0x1044A}, // Case map + 0x10423: []rune{0x1044B}, // Case map + 0x10424: []rune{0x1044C}, // Case map + 0x10425: []rune{0x1044D}, // Case map +} + +// TableB3 represents RFC-3454 Table B.3. +var TableB3 Mapping = tableB3 + +var tableC1_1 = Set{ + RuneRange{0x0020, 0x0020}, // SPACE +} + +// TableC1_1 represents RFC-3454 Table C.1.1. +var TableC1_1 Set = tableC1_1 + +var tableC1_2 = Set{ + RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE + RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK + RuneRange{0x2000, 0x2000}, // EN QUAD + RuneRange{0x2001, 0x2001}, // EM QUAD + RuneRange{0x2002, 0x2002}, // EN SPACE + RuneRange{0x2003, 0x2003}, // EM SPACE + RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE + RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE + RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE + RuneRange{0x2007, 0x2007}, // FIGURE SPACE + RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE + RuneRange{0x2009, 0x2009}, // THIN SPACE + RuneRange{0x200A, 0x200A}, // HAIR SPACE + RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE + RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE + RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE + RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE +} + +// TableC1_2 represents RFC-3454 Table C.1.2. +var TableC1_2 Set = tableC1_2 + +var tableC2_1 = Set{ + RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS] + RuneRange{0x007F, 0x007F}, // DELETE +} + +// TableC2_1 represents RFC-3454 Table C.2.1. +var TableC2_1 Set = tableC2_1 + +var tableC2_2 = Set{ + RuneRange{0x0080, 0x009F}, // [CONTROL CHARACTERS] + RuneRange{0x06DD, 0x06DD}, // ARABIC END OF AYAH + RuneRange{0x070F, 0x070F}, // SYRIAC ABBREVIATION MARK + RuneRange{0x180E, 0x180E}, // MONGOLIAN VOWEL SEPARATOR + RuneRange{0x200C, 0x200C}, // ZERO WIDTH NON-JOINER + RuneRange{0x200D, 0x200D}, // ZERO WIDTH JOINER + RuneRange{0x2028, 0x2028}, // LINE SEPARATOR + RuneRange{0x2029, 0x2029}, // PARAGRAPH SEPARATOR + RuneRange{0x2060, 0x2060}, // WORD JOINER + RuneRange{0x2061, 0x2061}, // FUNCTION APPLICATION + RuneRange{0x2062, 0x2062}, // INVISIBLE TIMES + RuneRange{0x2063, 0x2063}, // INVISIBLE SEPARATOR + RuneRange{0x206A, 0x206F}, // [CONTROL CHARACTERS] + RuneRange{0xFEFF, 0xFEFF}, // ZERO WIDTH NO-BREAK SPACE + RuneRange{0xFFF9, 0xFFFC}, // [CONTROL CHARACTERS] + RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS] +} + +// TableC2_2 represents RFC-3454 Table C.2.2. +var TableC2_2 Set = tableC2_2 + +var tableC3 = Set{ + RuneRange{0xE000, 0xF8FF}, // [PRIVATE USE, PLANE 0] + RuneRange{0xF0000, 0xFFFFD}, // [PRIVATE USE, PLANE 15] + RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16] +} + +// TableC3 represents RFC-3454 Table C.3. +var TableC3 Set = tableC3 + +var tableC4 = Set{ + RuneRange{0xFDD0, 0xFDEF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFE, 0xFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x1FFFE, 0x1FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x2FFFE, 0x2FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x3FFFE, 0x3FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x4FFFE, 0x4FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x5FFFE, 0x5FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x6FFFE, 0x6FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x7FFFE, 0x7FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x8FFFE, 0x8FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x9FFFE, 0x9FFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xAFFFE, 0xAFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xBFFFE, 0xBFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xCFFFE, 0xCFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xDFFFE, 0xDFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xEFFFE, 0xEFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0xFFFFE, 0xFFFFF}, // [NONCHARACTER CODE POINTS] + RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS] +} + +// TableC4 represents RFC-3454 Table C.4. +var TableC4 Set = tableC4 + +var tableC5 = Set{ + RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES] +} + +// TableC5 represents RFC-3454 Table C.5. +var TableC5 Set = tableC5 + +var tableC6 = Set{ + RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR + RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR + RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR + RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER + RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER +} + +// TableC6 represents RFC-3454 Table C.6. +var TableC6 Set = tableC6 + +var tableC7 = Set{ + RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS] +} + +// TableC7 represents RFC-3454 Table C.7. +var TableC7 Set = tableC7 + +var tableC8 = Set{ + RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK + RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK + RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK + RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK + RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING + RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING + RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING + RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE + RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE + RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING + RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING + RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING + RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING + RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES + RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES +} + +// TableC8 represents RFC-3454 Table C.8. +var TableC8 Set = tableC8 + +var tableC9 = Set{ + RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG + RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS] +} + +// TableC9 represents RFC-3454 Table C.9. +var TableC9 Set = tableC9 + +var tableD1 = Set{ + RuneRange{0x05BE, 0x05BE}, + RuneRange{0x05C0, 0x05C0}, + RuneRange{0x05C3, 0x05C3}, + RuneRange{0x05D0, 0x05EA}, + RuneRange{0x05F0, 0x05F4}, + RuneRange{0x061B, 0x061B}, + RuneRange{0x061F, 0x061F}, + RuneRange{0x0621, 0x063A}, + RuneRange{0x0640, 0x064A}, + RuneRange{0x066D, 0x066F}, + RuneRange{0x0671, 0x06D5}, + RuneRange{0x06DD, 0x06DD}, + RuneRange{0x06E5, 0x06E6}, + RuneRange{0x06FA, 0x06FE}, + RuneRange{0x0700, 0x070D}, + RuneRange{0x0710, 0x0710}, + RuneRange{0x0712, 0x072C}, + RuneRange{0x0780, 0x07A5}, + RuneRange{0x07B1, 0x07B1}, + RuneRange{0x200F, 0x200F}, + RuneRange{0xFB1D, 0xFB1D}, + RuneRange{0xFB1F, 0xFB28}, + RuneRange{0xFB2A, 0xFB36}, + RuneRange{0xFB38, 0xFB3C}, + RuneRange{0xFB3E, 0xFB3E}, + RuneRange{0xFB40, 0xFB41}, + RuneRange{0xFB43, 0xFB44}, + RuneRange{0xFB46, 0xFBB1}, + RuneRange{0xFBD3, 0xFD3D}, + RuneRange{0xFD50, 0xFD8F}, + RuneRange{0xFD92, 0xFDC7}, + RuneRange{0xFDF0, 0xFDFC}, + RuneRange{0xFE70, 0xFE74}, + RuneRange{0xFE76, 0xFEFC}, +} + +// TableD1 represents RFC-3454 Table D.1. +var TableD1 Set = tableD1 + +var tableD2 = Set{ + RuneRange{0x0041, 0x005A}, + RuneRange{0x0061, 0x007A}, + RuneRange{0x00AA, 0x00AA}, + RuneRange{0x00B5, 0x00B5}, + RuneRange{0x00BA, 0x00BA}, + RuneRange{0x00C0, 0x00D6}, + RuneRange{0x00D8, 0x00F6}, + RuneRange{0x00F8, 0x0220}, + RuneRange{0x0222, 0x0233}, + RuneRange{0x0250, 0x02AD}, + RuneRange{0x02B0, 0x02B8}, + RuneRange{0x02BB, 0x02C1}, + RuneRange{0x02D0, 0x02D1}, + RuneRange{0x02E0, 0x02E4}, + RuneRange{0x02EE, 0x02EE}, + RuneRange{0x037A, 0x037A}, + RuneRange{0x0386, 0x0386}, + RuneRange{0x0388, 0x038A}, + RuneRange{0x038C, 0x038C}, + RuneRange{0x038E, 0x03A1}, + RuneRange{0x03A3, 0x03CE}, + RuneRange{0x03D0, 0x03F5}, + RuneRange{0x0400, 0x0482}, + RuneRange{0x048A, 0x04CE}, + RuneRange{0x04D0, 0x04F5}, + RuneRange{0x04F8, 0x04F9}, + RuneRange{0x0500, 0x050F}, + RuneRange{0x0531, 0x0556}, + RuneRange{0x0559, 0x055F}, + RuneRange{0x0561, 0x0587}, + RuneRange{0x0589, 0x0589}, + RuneRange{0x0903, 0x0903}, + RuneRange{0x0905, 0x0939}, + RuneRange{0x093D, 0x0940}, + RuneRange{0x0949, 0x094C}, + RuneRange{0x0950, 0x0950}, + RuneRange{0x0958, 0x0961}, + RuneRange{0x0964, 0x0970}, + RuneRange{0x0982, 0x0983}, + RuneRange{0x0985, 0x098C}, + RuneRange{0x098F, 0x0990}, + RuneRange{0x0993, 0x09A8}, + RuneRange{0x09AA, 0x09B0}, + RuneRange{0x09B2, 0x09B2}, + RuneRange{0x09B6, 0x09B9}, + RuneRange{0x09BE, 0x09C0}, + RuneRange{0x09C7, 0x09C8}, + RuneRange{0x09CB, 0x09CC}, + RuneRange{0x09D7, 0x09D7}, + RuneRange{0x09DC, 0x09DD}, + RuneRange{0x09DF, 0x09E1}, + RuneRange{0x09E6, 0x09F1}, + RuneRange{0x09F4, 0x09FA}, + RuneRange{0x0A05, 0x0A0A}, + RuneRange{0x0A0F, 0x0A10}, + RuneRange{0x0A13, 0x0A28}, + RuneRange{0x0A2A, 0x0A30}, + RuneRange{0x0A32, 0x0A33}, + RuneRange{0x0A35, 0x0A36}, + RuneRange{0x0A38, 0x0A39}, + RuneRange{0x0A3E, 0x0A40}, + RuneRange{0x0A59, 0x0A5C}, + RuneRange{0x0A5E, 0x0A5E}, + RuneRange{0x0A66, 0x0A6F}, + RuneRange{0x0A72, 0x0A74}, + RuneRange{0x0A83, 0x0A83}, + RuneRange{0x0A85, 0x0A8B}, + RuneRange{0x0A8D, 0x0A8D}, + RuneRange{0x0A8F, 0x0A91}, + RuneRange{0x0A93, 0x0AA8}, + RuneRange{0x0AAA, 0x0AB0}, + RuneRange{0x0AB2, 0x0AB3}, + RuneRange{0x0AB5, 0x0AB9}, + RuneRange{0x0ABD, 0x0AC0}, + RuneRange{0x0AC9, 0x0AC9}, + RuneRange{0x0ACB, 0x0ACC}, + RuneRange{0x0AD0, 0x0AD0}, + RuneRange{0x0AE0, 0x0AE0}, + RuneRange{0x0AE6, 0x0AEF}, + RuneRange{0x0B02, 0x0B03}, + RuneRange{0x0B05, 0x0B0C}, + RuneRange{0x0B0F, 0x0B10}, + RuneRange{0x0B13, 0x0B28}, + RuneRange{0x0B2A, 0x0B30}, + RuneRange{0x0B32, 0x0B33}, + RuneRange{0x0B36, 0x0B39}, + RuneRange{0x0B3D, 0x0B3E}, + RuneRange{0x0B40, 0x0B40}, + RuneRange{0x0B47, 0x0B48}, + RuneRange{0x0B4B, 0x0B4C}, + RuneRange{0x0B57, 0x0B57}, + RuneRange{0x0B5C, 0x0B5D}, + RuneRange{0x0B5F, 0x0B61}, + RuneRange{0x0B66, 0x0B70}, + RuneRange{0x0B83, 0x0B83}, + RuneRange{0x0B85, 0x0B8A}, + RuneRange{0x0B8E, 0x0B90}, + RuneRange{0x0B92, 0x0B95}, + RuneRange{0x0B99, 0x0B9A}, + RuneRange{0x0B9C, 0x0B9C}, + RuneRange{0x0B9E, 0x0B9F}, + RuneRange{0x0BA3, 0x0BA4}, + RuneRange{0x0BA8, 0x0BAA}, + RuneRange{0x0BAE, 0x0BB5}, + RuneRange{0x0BB7, 0x0BB9}, + RuneRange{0x0BBE, 0x0BBF}, + RuneRange{0x0BC1, 0x0BC2}, + RuneRange{0x0BC6, 0x0BC8}, + RuneRange{0x0BCA, 0x0BCC}, + RuneRange{0x0BD7, 0x0BD7}, + RuneRange{0x0BE7, 0x0BF2}, + RuneRange{0x0C01, 0x0C03}, + RuneRange{0x0C05, 0x0C0C}, + RuneRange{0x0C0E, 0x0C10}, + RuneRange{0x0C12, 0x0C28}, + RuneRange{0x0C2A, 0x0C33}, + RuneRange{0x0C35, 0x0C39}, + RuneRange{0x0C41, 0x0C44}, + RuneRange{0x0C60, 0x0C61}, + RuneRange{0x0C66, 0x0C6F}, + RuneRange{0x0C82, 0x0C83}, + RuneRange{0x0C85, 0x0C8C}, + RuneRange{0x0C8E, 0x0C90}, + RuneRange{0x0C92, 0x0CA8}, + RuneRange{0x0CAA, 0x0CB3}, + RuneRange{0x0CB5, 0x0CB9}, + RuneRange{0x0CBE, 0x0CBE}, + RuneRange{0x0CC0, 0x0CC4}, + RuneRange{0x0CC7, 0x0CC8}, + RuneRange{0x0CCA, 0x0CCB}, + RuneRange{0x0CD5, 0x0CD6}, + RuneRange{0x0CDE, 0x0CDE}, + RuneRange{0x0CE0, 0x0CE1}, + RuneRange{0x0CE6, 0x0CEF}, + RuneRange{0x0D02, 0x0D03}, + RuneRange{0x0D05, 0x0D0C}, + RuneRange{0x0D0E, 0x0D10}, + RuneRange{0x0D12, 0x0D28}, + RuneRange{0x0D2A, 0x0D39}, + RuneRange{0x0D3E, 0x0D40}, + RuneRange{0x0D46, 0x0D48}, + RuneRange{0x0D4A, 0x0D4C}, + RuneRange{0x0D57, 0x0D57}, + RuneRange{0x0D60, 0x0D61}, + RuneRange{0x0D66, 0x0D6F}, + RuneRange{0x0D82, 0x0D83}, + RuneRange{0x0D85, 0x0D96}, + RuneRange{0x0D9A, 0x0DB1}, + RuneRange{0x0DB3, 0x0DBB}, + RuneRange{0x0DBD, 0x0DBD}, + RuneRange{0x0DC0, 0x0DC6}, + RuneRange{0x0DCF, 0x0DD1}, + RuneRange{0x0DD8, 0x0DDF}, + RuneRange{0x0DF2, 0x0DF4}, + RuneRange{0x0E01, 0x0E30}, + RuneRange{0x0E32, 0x0E33}, + RuneRange{0x0E40, 0x0E46}, + RuneRange{0x0E4F, 0x0E5B}, + RuneRange{0x0E81, 0x0E82}, + RuneRange{0x0E84, 0x0E84}, + RuneRange{0x0E87, 0x0E88}, + RuneRange{0x0E8A, 0x0E8A}, + RuneRange{0x0E8D, 0x0E8D}, + RuneRange{0x0E94, 0x0E97}, + RuneRange{0x0E99, 0x0E9F}, + RuneRange{0x0EA1, 0x0EA3}, + RuneRange{0x0EA5, 0x0EA5}, + RuneRange{0x0EA7, 0x0EA7}, + RuneRange{0x0EAA, 0x0EAB}, + RuneRange{0x0EAD, 0x0EB0}, + RuneRange{0x0EB2, 0x0EB3}, + RuneRange{0x0EBD, 0x0EBD}, + RuneRange{0x0EC0, 0x0EC4}, + RuneRange{0x0EC6, 0x0EC6}, + RuneRange{0x0ED0, 0x0ED9}, + RuneRange{0x0EDC, 0x0EDD}, + RuneRange{0x0F00, 0x0F17}, + RuneRange{0x0F1A, 0x0F34}, + RuneRange{0x0F36, 0x0F36}, + RuneRange{0x0F38, 0x0F38}, + RuneRange{0x0F3E, 0x0F47}, + RuneRange{0x0F49, 0x0F6A}, + RuneRange{0x0F7F, 0x0F7F}, + RuneRange{0x0F85, 0x0F85}, + RuneRange{0x0F88, 0x0F8B}, + RuneRange{0x0FBE, 0x0FC5}, + RuneRange{0x0FC7, 0x0FCC}, + RuneRange{0x0FCF, 0x0FCF}, + RuneRange{0x1000, 0x1021}, + RuneRange{0x1023, 0x1027}, + RuneRange{0x1029, 0x102A}, + RuneRange{0x102C, 0x102C}, + RuneRange{0x1031, 0x1031}, + RuneRange{0x1038, 0x1038}, + RuneRange{0x1040, 0x1057}, + RuneRange{0x10A0, 0x10C5}, + RuneRange{0x10D0, 0x10F8}, + RuneRange{0x10FB, 0x10FB}, + RuneRange{0x1100, 0x1159}, + RuneRange{0x115F, 0x11A2}, + RuneRange{0x11A8, 0x11F9}, + RuneRange{0x1200, 0x1206}, + RuneRange{0x1208, 0x1246}, + RuneRange{0x1248, 0x1248}, + RuneRange{0x124A, 0x124D}, + RuneRange{0x1250, 0x1256}, + RuneRange{0x1258, 0x1258}, + RuneRange{0x125A, 0x125D}, + RuneRange{0x1260, 0x1286}, + RuneRange{0x1288, 0x1288}, + RuneRange{0x128A, 0x128D}, + RuneRange{0x1290, 0x12AE}, + RuneRange{0x12B0, 0x12B0}, + RuneRange{0x12B2, 0x12B5}, + RuneRange{0x12B8, 0x12BE}, + RuneRange{0x12C0, 0x12C0}, + RuneRange{0x12C2, 0x12C5}, + RuneRange{0x12C8, 0x12CE}, + RuneRange{0x12D0, 0x12D6}, + RuneRange{0x12D8, 0x12EE}, + RuneRange{0x12F0, 0x130E}, + RuneRange{0x1310, 0x1310}, + RuneRange{0x1312, 0x1315}, + RuneRange{0x1318, 0x131E}, + RuneRange{0x1320, 0x1346}, + RuneRange{0x1348, 0x135A}, + RuneRange{0x1361, 0x137C}, + RuneRange{0x13A0, 0x13F4}, + RuneRange{0x1401, 0x1676}, + RuneRange{0x1681, 0x169A}, + RuneRange{0x16A0, 0x16F0}, + RuneRange{0x1700, 0x170C}, + RuneRange{0x170E, 0x1711}, + RuneRange{0x1720, 0x1731}, + RuneRange{0x1735, 0x1736}, + RuneRange{0x1740, 0x1751}, + RuneRange{0x1760, 0x176C}, + RuneRange{0x176E, 0x1770}, + RuneRange{0x1780, 0x17B6}, + RuneRange{0x17BE, 0x17C5}, + RuneRange{0x17C7, 0x17C8}, + RuneRange{0x17D4, 0x17DA}, + RuneRange{0x17DC, 0x17DC}, + RuneRange{0x17E0, 0x17E9}, + RuneRange{0x1810, 0x1819}, + RuneRange{0x1820, 0x1877}, + RuneRange{0x1880, 0x18A8}, + RuneRange{0x1E00, 0x1E9B}, + RuneRange{0x1EA0, 0x1EF9}, + RuneRange{0x1F00, 0x1F15}, + RuneRange{0x1F18, 0x1F1D}, + RuneRange{0x1F20, 0x1F45}, + RuneRange{0x1F48, 0x1F4D}, + RuneRange{0x1F50, 0x1F57}, + RuneRange{0x1F59, 0x1F59}, + RuneRange{0x1F5B, 0x1F5B}, + RuneRange{0x1F5D, 0x1F5D}, + RuneRange{0x1F5F, 0x1F7D}, + RuneRange{0x1F80, 0x1FB4}, + RuneRange{0x1FB6, 0x1FBC}, + RuneRange{0x1FBE, 0x1FBE}, + RuneRange{0x1FC2, 0x1FC4}, + RuneRange{0x1FC6, 0x1FCC}, + RuneRange{0x1FD0, 0x1FD3}, + RuneRange{0x1FD6, 0x1FDB}, + RuneRange{0x1FE0, 0x1FEC}, + RuneRange{0x1FF2, 0x1FF4}, + RuneRange{0x1FF6, 0x1FFC}, + RuneRange{0x200E, 0x200E}, + RuneRange{0x2071, 0x2071}, + RuneRange{0x207F, 0x207F}, + RuneRange{0x2102, 0x2102}, + RuneRange{0x2107, 0x2107}, + RuneRange{0x210A, 0x2113}, + RuneRange{0x2115, 0x2115}, + RuneRange{0x2119, 0x211D}, + RuneRange{0x2124, 0x2124}, + RuneRange{0x2126, 0x2126}, + RuneRange{0x2128, 0x2128}, + RuneRange{0x212A, 0x212D}, + RuneRange{0x212F, 0x2131}, + RuneRange{0x2133, 0x2139}, + RuneRange{0x213D, 0x213F}, + RuneRange{0x2145, 0x2149}, + RuneRange{0x2160, 0x2183}, + RuneRange{0x2336, 0x237A}, + RuneRange{0x2395, 0x2395}, + RuneRange{0x249C, 0x24E9}, + RuneRange{0x3005, 0x3007}, + RuneRange{0x3021, 0x3029}, + RuneRange{0x3031, 0x3035}, + RuneRange{0x3038, 0x303C}, + RuneRange{0x3041, 0x3096}, + RuneRange{0x309D, 0x309F}, + RuneRange{0x30A1, 0x30FA}, + RuneRange{0x30FC, 0x30FF}, + RuneRange{0x3105, 0x312C}, + RuneRange{0x3131, 0x318E}, + RuneRange{0x3190, 0x31B7}, + RuneRange{0x31F0, 0x321C}, + RuneRange{0x3220, 0x3243}, + RuneRange{0x3260, 0x327B}, + RuneRange{0x327F, 0x32B0}, + RuneRange{0x32C0, 0x32CB}, + RuneRange{0x32D0, 0x32FE}, + RuneRange{0x3300, 0x3376}, + RuneRange{0x337B, 0x33DD}, + RuneRange{0x33E0, 0x33FE}, + RuneRange{0x3400, 0x4DB5}, + RuneRange{0x4E00, 0x9FA5}, + RuneRange{0xA000, 0xA48C}, + RuneRange{0xAC00, 0xD7A3}, + RuneRange{0xD800, 0xFA2D}, + RuneRange{0xFA30, 0xFA6A}, + RuneRange{0xFB00, 0xFB06}, + RuneRange{0xFB13, 0xFB17}, + RuneRange{0xFF21, 0xFF3A}, + RuneRange{0xFF41, 0xFF5A}, + RuneRange{0xFF66, 0xFFBE}, + RuneRange{0xFFC2, 0xFFC7}, + RuneRange{0xFFCA, 0xFFCF}, + RuneRange{0xFFD2, 0xFFD7}, + RuneRange{0xFFDA, 0xFFDC}, + RuneRange{0x10300, 0x1031E}, + RuneRange{0x10320, 0x10323}, + RuneRange{0x10330, 0x1034A}, + RuneRange{0x10400, 0x10425}, + RuneRange{0x10428, 0x1044D}, + RuneRange{0x1D000, 0x1D0F5}, + RuneRange{0x1D100, 0x1D126}, + RuneRange{0x1D12A, 0x1D166}, + RuneRange{0x1D16A, 0x1D172}, + RuneRange{0x1D183, 0x1D184}, + RuneRange{0x1D18C, 0x1D1A9}, + RuneRange{0x1D1AE, 0x1D1DD}, + RuneRange{0x1D400, 0x1D454}, + RuneRange{0x1D456, 0x1D49C}, + RuneRange{0x1D49E, 0x1D49F}, + RuneRange{0x1D4A2, 0x1D4A2}, + RuneRange{0x1D4A5, 0x1D4A6}, + RuneRange{0x1D4A9, 0x1D4AC}, + RuneRange{0x1D4AE, 0x1D4B9}, + RuneRange{0x1D4BB, 0x1D4BB}, + RuneRange{0x1D4BD, 0x1D4C0}, + RuneRange{0x1D4C2, 0x1D4C3}, + RuneRange{0x1D4C5, 0x1D505}, + RuneRange{0x1D507, 0x1D50A}, + RuneRange{0x1D50D, 0x1D514}, + RuneRange{0x1D516, 0x1D51C}, + RuneRange{0x1D51E, 0x1D539}, + RuneRange{0x1D53B, 0x1D53E}, + RuneRange{0x1D540, 0x1D544}, + RuneRange{0x1D546, 0x1D546}, + RuneRange{0x1D54A, 0x1D550}, + RuneRange{0x1D552, 0x1D6A3}, + RuneRange{0x1D6A8, 0x1D7C9}, + RuneRange{0x20000, 0x2A6D6}, + RuneRange{0x2F800, 0x2FA1D}, + RuneRange{0xF0000, 0xFFFFD}, + RuneRange{0x100000, 0x10FFFD}, +} + +// TableD2 represents RFC-3454 Table D.2. +var TableD2 Set = tableD2 diff --git a/vendor/github.com/xrash/smetrics/.travis.yml b/vendor/github.com/xrash/smetrics/.travis.yml new file mode 100644 index 000000000..d1cd67ff9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - 1.11 + - 1.12 + - 1.13 + - 1.14.x + - master +script: + - cd tests && make diff --git a/vendor/github.com/xrash/smetrics/LICENSE b/vendor/github.com/xrash/smetrics/LICENSE new file mode 100644 index 000000000..80445682f --- /dev/null +++ b/vendor/github.com/xrash/smetrics/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2016 Felipe da Cunha Gonçalves +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/xrash/smetrics/README.md b/vendor/github.com/xrash/smetrics/README.md new file mode 100644 index 000000000..5e0c1a463 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/README.md @@ -0,0 +1,49 @@ +[![Build Status](https://travis-ci.org/xrash/smetrics.svg?branch=master)](http://travis-ci.org/xrash/smetrics) + +# smetrics + +`smetrics` is "string metrics". + +Package smetrics provides a bunch of algorithms for calculating the distance between strings. + +There are implementations for calculating the popular Levenshtein distance (aka Edit Distance or Wagner-Fischer), as well as the Jaro distance, the Jaro-Winkler distance, and more. + +# How to import + +```go +import "github.com/xrash/smetrics" +``` + +# Documentation + +Go to [https://pkg.go.dev/github.com/xrash/smetrics](https://pkg.go.dev/github.com/xrash/smetrics) for complete documentation. + +# Example + +```go +package main + +import ( + "github.com/xrash/smetrics" +) + +func main() { + smetrics.WagnerFischer("POTATO", "POTATTO", 1, 1, 2) + smetrics.WagnerFischer("MOUSE", "HOUSE", 2, 2, 4) + + smetrics.Ukkonen("POTATO", "POTATTO", 1, 1, 2) + smetrics.Ukkonen("MOUSE", "HOUSE", 2, 2, 4) + + smetrics.Jaro("AL", "AL") + smetrics.Jaro("MARTHA", "MARHTA") + + smetrics.JaroWinkler("AL", "AL", 0.7, 4) + smetrics.JaroWinkler("MARTHA", "MARHTA", 0.7, 4) + + smetrics.Soundex("Euler") + smetrics.Soundex("Ellery") + + smetrics.Hamming("aaa", "aaa") + smetrics.Hamming("aaa", "aab") +} +``` diff --git a/vendor/github.com/xrash/smetrics/doc.go b/vendor/github.com/xrash/smetrics/doc.go new file mode 100644 index 000000000..21bc986c9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/doc.go @@ -0,0 +1,19 @@ +/* +Package smetrics provides a bunch of algorithms for calculating +the distance between strings. + +There are implementations for calculating the popular Levenshtein +distance (aka Edit Distance or Wagner-Fischer), as well as the Jaro +distance, the Jaro-Winkler distance, and more. + +For the Levenshtein distance, you can use the functions WagnerFischer() +and Ukkonen(). Read the documentation on these functions. + +For the Jaro and Jaro-Winkler algorithms, check the functions +Jaro() and JaroWinkler(). Read the documentation on these functions. + +For the Soundex algorithm, check the function Soundex(). + +For the Hamming distance algorithm, check the function Hamming(). +*/ +package smetrics diff --git a/vendor/github.com/xrash/smetrics/hamming.go b/vendor/github.com/xrash/smetrics/hamming.go new file mode 100644 index 000000000..505d3e5da --- /dev/null +++ b/vendor/github.com/xrash/smetrics/hamming.go @@ -0,0 +1,25 @@ +package smetrics + +import ( + "fmt" +) + +// The Hamming distance is the minimum number of substitutions required to change string A into string B. Both strings must have the same size. If the strings have different sizes, the function returns an error. +func Hamming(a, b string) (int, error) { + al := len(a) + bl := len(b) + + if al != bl { + return -1, fmt.Errorf("strings are not equal (len(a)=%d, len(b)=%d)", al, bl) + } + + var difference = 0 + + for i := range a { + if a[i] != b[i] { + difference = difference + 1 + } + } + + return difference, nil +} diff --git a/vendor/github.com/xrash/smetrics/jaro-winkler.go b/vendor/github.com/xrash/smetrics/jaro-winkler.go new file mode 100644 index 000000000..abdb28883 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/jaro-winkler.go @@ -0,0 +1,28 @@ +package smetrics + +import ( + "math" +) + +// The Jaro-Winkler distance. The result is 1 for equal strings, and 0 for completely different strings. It is commonly used on Record Linkage stuff, thus it tries to be accurate for common typos when writing real names such as person names and street names. +// Jaro-Winkler is a modification of the Jaro algorithm. It works by first running Jaro, then boosting the score of exact matches at the beginning of the strings. Because of that, it introduces two more parameters: the boostThreshold and the prefixSize. These are commonly set to 0.7 and 4, respectively. +func JaroWinkler(a, b string, boostThreshold float64, prefixSize int) float64 { + j := Jaro(a, b) + + if j <= boostThreshold { + return j + } + + prefixSize = int(math.Min(float64(len(a)), math.Min(float64(prefixSize), float64(len(b))))) + + var prefixMatch float64 + for i := 0; i < prefixSize; i++ { + if a[i] == b[i] { + prefixMatch++ + } else { + break + } + } + + return j + 0.1*prefixMatch*(1.0-j) +} diff --git a/vendor/github.com/xrash/smetrics/jaro.go b/vendor/github.com/xrash/smetrics/jaro.go new file mode 100644 index 000000000..75f924e11 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/jaro.go @@ -0,0 +1,86 @@ +package smetrics + +import ( + "math" +) + +// The Jaro distance. The result is 1 for equal strings, and 0 for completely different strings. +func Jaro(a, b string) float64 { + // If both strings are zero-length, they are completely equal, + // therefore return 1. + if len(a) == 0 && len(b) == 0 { + return 1 + } + + // If one string is zero-length, strings are completely different, + // therefore return 0. + if len(a) == 0 || len(b) == 0 { + return 0 + } + + // Define the necessary variables for the algorithm. + la := float64(len(a)) + lb := float64(len(b)) + matchRange := int(math.Max(0, math.Floor(math.Max(la, lb)/2.0)-1)) + matchesA := make([]bool, len(a)) + matchesB := make([]bool, len(b)) + var matches float64 = 0 + + // Step 1: Matches + // Loop through each character of the first string, + // looking for a matching character in the second string. + for i := 0; i < len(a); i++ { + start := int(math.Max(0, float64(i-matchRange))) + end := int(math.Min(lb-1, float64(i+matchRange))) + + for j := start; j <= end; j++ { + if matchesB[j] { + continue + } + + if a[i] == b[j] { + matchesA[i] = true + matchesB[j] = true + matches++ + break + } + } + } + + // If there are no matches, strings are completely different, + // therefore return 0. + if matches == 0 { + return 0 + } + + // Step 2: Transpositions + // Loop through the matches' arrays, looking for + // unaligned matches. Count the number of unaligned matches. + unaligned := 0 + j := 0 + for i := 0; i < len(a); i++ { + if !matchesA[i] { + continue + } + + for !matchesB[j] { + j++ + } + + if a[i] != b[j] { + unaligned++ + } + + j++ + } + + // The number of unaligned matches divided by two, is the number of _transpositions_. + transpositions := math.Floor(float64(unaligned / 2)) + + // Jaro distance is the average between these three numbers: + // 1. matches / length of string A + // 2. matches / length of string B + // 3. (matches - transpositions/matches) + // So, all that divided by three is the final result. + return ((matches / la) + (matches / lb) + ((matches - transpositions) / matches)) / 3.0 +} diff --git a/vendor/github.com/xrash/smetrics/soundex.go b/vendor/github.com/xrash/smetrics/soundex.go new file mode 100644 index 000000000..18c3aef72 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/soundex.go @@ -0,0 +1,63 @@ +package smetrics + +import ( + "strings" +) + +// The Soundex encoding. It is a phonetic algorithm that considers how the words sound in English. Soundex maps a string to a 4-byte code consisting of the first letter of the original string and three numbers. Strings that sound similar should map to the same code. +func Soundex(s string) string { + b := strings.Builder{} + b.Grow(4) + + p := s[0] + if p <= 'z' && p >= 'a' { + p -= 32 // convert to uppercase + } + b.WriteByte(p) + + n := 0 + for i := 1; i < len(s); i++ { + c := s[i] + + if c <= 'z' && c >= 'a' { + c -= 32 // convert to uppercase + } else if c < 'A' || c > 'Z' { + continue + } + + if c == p { + continue + } + + p = c + + switch c { + case 'B', 'P', 'F', 'V': + c = '1' + case 'C', 'S', 'K', 'G', 'J', 'Q', 'X', 'Z': + c = '2' + case 'D', 'T': + c = '3' + case 'L': + c = '4' + case 'M', 'N': + c = '5' + case 'R': + c = '6' + default: + continue + } + + b.WriteByte(c) + n++ + if n == 3 { + break + } + } + + for i := n; i < 3; i++ { + b.WriteByte('0') + } + + return b.String() +} diff --git a/vendor/github.com/xrash/smetrics/ukkonen.go b/vendor/github.com/xrash/smetrics/ukkonen.go new file mode 100644 index 000000000..3c5579cd9 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/ukkonen.go @@ -0,0 +1,94 @@ +package smetrics + +import ( + "math" +) + +// The Ukkonen algorithm for calculating the Levenshtein distance. The algorithm is described in http://www.cs.helsinki.fi/u/ukkonen/InfCont85.PDF, or in docs/InfCont85.PDF. It runs on O(t . min(m, n)) where t is the actual distance between strings a and b. It needs O(min(t, m, n)) space. This function might be preferred over WagnerFischer() for *very* similar strings. But test it out yourself. +// The first two parameters are the two strings to be compared. The last three parameters are the insertion cost, the deletion cost and the substitution cost. These are normally defined as 1, 1 and 2 respectively. +func Ukkonen(a, b string, icost, dcost, scost int) int { + var lowerCost int + + if icost < dcost && icost < scost { + lowerCost = icost + } else if dcost < scost { + lowerCost = dcost + } else { + lowerCost = scost + } + + infinite := math.MaxInt32 / 2 + + var r []int + var k, kprime, p, t int + var ins, del, sub int + + if len(a) > len(b) { + t = (len(a) - len(b) + 1) * lowerCost + } else { + t = (len(b) - len(a) + 1) * lowerCost + } + + for { + if (t / lowerCost) < (len(b) - len(a)) { + continue + } + + // This is the right damn thing since the original Ukkonen + // paper minimizes the expression result only, but the uncommented version + // doesn't need to deal with floats so it's faster. + // p = int(math.Floor(0.5*((float64(t)/float64(lowerCost)) - float64(len(b) - len(a))))) + p = ((t / lowerCost) - (len(b) - len(a))) / 2 + + k = -p + kprime = k + + rowlength := (len(b) - len(a)) + (2 * p) + + r = make([]int, rowlength+2) + + for i := 0; i < rowlength+2; i++ { + r[i] = infinite + } + + for i := 0; i <= len(a); i++ { + for j := 0; j <= rowlength; j++ { + if i == j+k && i == 0 { + r[j] = 0 + } else { + if j-1 < 0 { + ins = infinite + } else { + ins = r[j-1] + icost + } + + del = r[j+1] + dcost + sub = r[j] + scost + + if i-1 < 0 || i-1 >= len(a) || j+k-1 >= len(b) || j+k-1 < 0 { + sub = infinite + } else if a[i-1] == b[j+k-1] { + sub = r[j] + } + + if ins < del && ins < sub { + r[j] = ins + } else if del < sub { + r[j] = del + } else { + r[j] = sub + } + } + } + k++ + } + + if r[(len(b)-len(a))+(2*p)+kprime] <= t { + break + } else { + t *= 2 + } + } + + return r[(len(b)-len(a))+(2*p)+kprime] +} diff --git a/vendor/github.com/xrash/smetrics/wagner-fischer.go b/vendor/github.com/xrash/smetrics/wagner-fischer.go new file mode 100644 index 000000000..9883aea04 --- /dev/null +++ b/vendor/github.com/xrash/smetrics/wagner-fischer.go @@ -0,0 +1,48 @@ +package smetrics + +// The Wagner-Fischer algorithm for calculating the Levenshtein distance. +// The first two parameters are the two strings to be compared. The last three parameters are the insertion cost, the deletion cost and the substitution cost. These are normally defined as 1, 1 and 2 respectively. +func WagnerFischer(a, b string, icost, dcost, scost int) int { + + // Allocate both rows. + row1 := make([]int, len(b)+1) + row2 := make([]int, len(b)+1) + var tmp []int + + // Initialize the first row. + for i := 1; i <= len(b); i++ { + row1[i] = i * icost + } + + // For each row... + for i := 1; i <= len(a); i++ { + row2[0] = i * dcost + + // For each column... + for j := 1; j <= len(b); j++ { + if a[i-1] == b[j-1] { + row2[j] = row1[j-1] + } else { + ins := row2[j-1] + icost + del := row1[j] + dcost + sub := row1[j-1] + scost + + if ins < del && ins < sub { + row2[j] = ins + } else if del < sub { + row2[j] = del + } else { + row2[j] = sub + } + } + } + + // Swap the rows at the end of each row. + tmp = row1 + row1 = row2 + row2 = tmp + } + + // Because we swapped the rows, the final result is in row1 instead of row2. + return row1[len(row1)-1] +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 000000000..6bf3abc41 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,9 @@ +ot +fo +te +collison +consequentially +ans +nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 000000000..e2cb3ea94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 000000000..314766e91 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 000000000..895c7664b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 000000000..d09555506 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,311 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - asasalint + - bodyclose + - depguard + - errcheck + - errorlint + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - tenv + - testifylint + - typecheck + - unconvert + - unused + - unparam + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 000000000..40d62fa2e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 000000000..3202496c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 000000000..4b361d026 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,3212 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + + + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-collector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + + +[Go 1.23]: https://go.dev/doc/go1.23 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 000000000..945a07d2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @XSAM @dashpole @pellared @dmathieu + +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 000000000..bb3396557 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,660 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +### Maintainers + +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 000000000..a1228a212 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,297 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: benchmark +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) +benchmark/%: + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ + && cd $* \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.21 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + +.PHONY: prerelease +prerelease: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 000000000..efec27890 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,111 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Beta[^1] | + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +[^1]: https://github.com/orgs/open-telemetry/projects/43 + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 000000000..ffa9b6125 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,135 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 000000000..412f1e362 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 000000000..5b3da8f14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 000000000..eef51ebc2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 000000000..318e42fca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 000000000..be9cd922d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 000000000..f2ba89ce4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 000000000..d9a22c650 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 000000000..3028f9a40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 000000000..6cbefcead --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,411 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "cmp" + "encoding/json" + "reflect" + "slices" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. + Set struct { + equivalent Distinct + } + + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + return [1]KeyValue(kvs) + case 2: + return [2]KeyValue(kvs) + case 3: + return [3]KeyValue(kvs) + case 4: + return [4]KeyValue(kvs) + case 5: + return [5]KeyValue(kvs) + case 6: + return [6]KeyValue(kvs) + case 7: + return [7]KeyValue(kvs) + case 8: + return [8]KeyValue(kvs) + case 9: + return [9]KeyValue(kvs) + case 10: + return [10]KeyValue(kvs) + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 000000000..e584b2477 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 000000000..9ea0ecbbd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 000000000..7d798435e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 000000000..36f536703 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,1018 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// The passed key must be valid, non-empty UTF-8 string. +// If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyProperty(key string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require percent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateBaggageName(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require percent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) + } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateBaggageName(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (m Member) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) + if len(m.properties) > 0 { + s += propertyDelimiter + m.properties.String() + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members is not significant. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + s := Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return + } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] +} + +// validateValue checks if the string is a valid W3C Baggage value. +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + +func validateValueChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 000000000..a572461a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 000000000..b51d87cab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 000000000..24c52b387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 000000000..2acbac354 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 000000000..ee8db448b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 000000000..921f85961 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about logs, see go.opentelemetry.io/otel/log. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 000000000..67414c71e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md new file mode 100644 index 000000000..9184068d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Metric gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go new file mode 100644 index 000000000..428cfea23 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + +import ( + "context" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +type client struct { + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // ourConn keeps track of where conn was created: true if created here in + // NewClient, or false if passed with an option. This is important on + // Shutdown as the conn should only be closed if we created it. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + msc colmetricpb.MetricsServiceClient +} + +// newClient creates a new gRPC metric client. +func newClient(_ context.Context, cfg oconf.Config) (*client, error) { + c := &client{ + exportTimeout: cfg.Metrics.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + conn: cfg.GRPCConn, + } + + if len(cfg.Metrics.Headers) > 0 { + c.metadata = metadata.New(cfg.Metrics.Headers) + } + + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version() + dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} + dialOpts = append(dialOpts, cfg.DialOptions...) + + conn, err := grpc.NewClient(cfg.Metrics.Endpoint, dialOpts...) + if err != nil { + return nil, err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + c.msc = colmetricpb.NewMetricsServiceClient(c.conn) + + return c, nil +} + +// Shutdown shuts down the client, freeing all resource. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +func (c *client) Shutdown(ctx context.Context) error { + // The otlpmetric.Exporter synchronizes access to client methods and + // ensures this is called only once. The only thing that needs to be done + // here is to release any computational resources the client holds. + + c.metadata = nil + c.requestFunc = nil + c.msc = nil + + err := ctx.Err() + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + c.conn = nil + return err +} + +// UploadMetrics sends protoMetrics to connected endpoint. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { + // The otlpmetric.Exporter synchronizes access to client methods, and + // ensures this is not called after the Exporter is shutdown. Only thing + // to do here is send data. + + select { + case <-ctx.Done(): + // Do not upload if the context is already expired. + return ctx.Err() + default: + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{ + ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedDataPoints() + if n != 0 || msg != "" { + err := internal.MetricPartialSuccessError(n, msg) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function based on the clients configured export timeout. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally, handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns if the status is RetryInfo +// and the duration to wait for if an explicit throttle time is included. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go new file mode 100644 index 000000000..db6e3714b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -0,0 +1,266 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" + "go.opentelemetry.io/otel/sdk/metric" +) + +// Option applies a configuration option to the Exporter. +type Option interface { + applyGRPCOption(oconf.Config) oconf.Config +} + +func asGRPCOptions(opts []Option) []oconf.GRPCOption { + converted := make([]oconf.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = oconf.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying the export of metric data +// that failed. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + oconf.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the Exporter's gRPC +// connection, just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{oconf.WithInsecure()} +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{oconf.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{oconf.WithEndpointURL(u)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) oconf.Compression { + if compressor == "gzip" { + return oconf.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return oconf.NoCompression +} + +// WithCompressor sets the compressor the gRPC client uses. +// Supported compressor values: "gzip". +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compressor will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{oconf.WithHeaders(headers)} +} + +// WithTLSCredentials sets the gRPC connection to use creds. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no TLS credentials will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.Metrics.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when establishing a +// gRPC connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The Exporter +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the metric +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{oconf.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{oconf.WithRetry(retry.Config(settings))} +} + +// WithTemporalitySelector sets the TemporalitySelector the client will use to +// determine the Temporality of an instrument based on its kind. If this option +// is not used, the client will use the DefaultTemporalitySelector from the +// go.opentelemetry.io/otel/sdk/metric package. +func WithTemporalitySelector(selector metric.TemporalitySelector) Option { + return wrappedOption{oconf.WithTemporalitySelector(selector)} +} + +// WithAggregationSelector sets the AggregationSelector the client will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation +// explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector metric.AggregationSelector) Option { + return wrappedOption{oconf.WithAggregationSelector(selector)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go new file mode 100644 index 000000000..dcd8de5df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New] and used with a [metric.PeriodicReader]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT. +OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - +aggregation temporality to use on the basis of instrument kind. Supported values: + - "cumulative" - Cumulative aggregation temporality for all instrument kinds, + - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; + Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, + - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; + Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. + +The configuration can be overridden by [WithTemporalitySelector] option. + +OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - +default aggregation to use for histogram instruments. Supported values: + - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], + - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. + +The configuration can be overridden by [WithAggregationSelector] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation +[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation +*/ +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go new file mode 100644 index 000000000..98afc0b1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +// Exporter is a OpenTelemetry metric Exporter using gRPC. +type Exporter struct { + // Ensure synchronous access to the client across all functionality. + clientMu sync.Mutex + client interface { + UploadMetrics(context.Context, *metricpb.ResourceMetrics) error + Shutdown(context.Context) error + } + + temporalitySelector metric.TemporalitySelector + aggregationSelector metric.AggregationSelector + + shutdownOnce sync.Once +} + +func newExporter(c *client, cfg oconf.Config) (*Exporter, error) { + ts := cfg.Metrics.TemporalitySelector + if ts == nil { + ts = func(metric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality + } + } + + as := cfg.Metrics.AggregationSelector + if as == nil { + as = metric.DefaultAggregationSelector + } + + return &Exporter{ + client: c, + + temporalitySelector: ts, + aggregationSelector: as, + }, nil +} + +// Temporality returns the Temporality to use for an instrument kind. +func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality { + return e.temporalitySelector(k) +} + +// Aggregation returns the Aggregation to use for an instrument kind. +func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { + return e.aggregationSelector(k) +} + +// Export transforms and transmits metric data to an OTLP receiver. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + defer global.Debug("OTLP/gRPC exporter export", "Data", rm) + + otlpRm, err := transform.ResourceMetrics(rm) + // Best effort upload of transformable metrics. + e.clientMu.Lock() + upErr := e.client.UploadMetrics(ctx, otlpRm) + e.clientMu.Unlock() + if upErr != nil { + if err == nil { + return fmt.Errorf("failed to upload metrics: %w", upErr) + } + // Merge the two errors. + return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr) + } + return err +} + +// ForceFlush flushes any metric data held by an exporter. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +// +// This method is safe to call concurrently. +func (e *Exporter) ForceFlush(ctx context.Context) error { + // The exporter and client hold no state, nothing to flush. + return ctx.Err() +} + +// Shutdown flushes all metric data held by an exporter and releases any held +// computational resources. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +// +// This method is safe to call concurrently. +func (e *Exporter) Shutdown(ctx context.Context) error { + err := errShutdown + e.shutdownOnce.Do(func() { + e.clientMu.Lock() + client := e.client + e.client = shutdownClient{} + e.clientMu.Unlock() + err = client.Shutdown(ctx) + }) + return err +} + +var errShutdown = fmt.Errorf("gRPC exporter is shutdown") + +type shutdownClient struct{} + +func (c shutdownClient) err(ctx context.Context) error { + if err := ctx.Err(); err != nil { + return err + } + return errShutdown +} + +func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error { + return c.err(ctx) +} + +func (c shutdownClient) Shutdown(ctx context.Context) error { + return c.err(ctx) +} + +// MarshalLog returns logging data about the Exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct{ Type string }{Type: "OTLP/gRPC"} +} + +// New returns an OpenTelemetry metric Exporter. The Exporter can be used with +// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving +// endpoint using gRPC. +// +// If an already established gRPC ClientConn is not passed in options using +// WithGRPCConn, a connection to the OTLP endpoint will be established based +// on options. If a connection cannot be establishes in the lifetime of ctx, +// an error will be returned. +func New(ctx context.Context, options ...Option) (*Exporter, error) { + cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...) + c, err := newClient(ctx, cfg) + if err != nil { + return nil, err + } + return newExporter(c, cfg) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..261f55026 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -0,0 +1,215 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + "unicode" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) + continue + } + + // Only decode the value. + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go new file mode 100644 index 000000000..95e2f4ba3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go new file mode 100644 index 000000000..7ae53f2d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -0,0 +1,210 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Metrics.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), + withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + ) + + return opts +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Metrics.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} + +func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if s, ok := e.GetEnvValue(n); ok { + switch strings.ToLower(s) { + case "cumulative": + fn(cumulativeTemporality) + case "delta": + fn(deltaTemporality) + case "lowmemory": + fn(lowMemory) + default: + global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + } + } + } +} + +func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { + switch ik { + case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { + switch ik { + case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if s, ok := e.GetEnvValue(n); ok { + switch strings.ToLower(s) { + case "explicit_bucket_histogram": + fn(metric.DefaultAggregationSelector) + case "base2_exponential_bucket_histogram": + fn(func(kind metric.InstrumentKind) metric.Aggregation { + if kind == metric.InstrumentKindHistogram { + return metric.AggregationBase2ExponentialHistogram{ + MaxSize: 160, + MaxScale: 20, + NoMinMax: false, + } + } + return metric.DefaultAggregationSelector(kind) + }) + default: + global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + } + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go new file mode 100644 index 000000000..b6ed9a2bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -0,0 +1,376 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" +) + +const ( + // DefaultMaxAttempts describes how many times the driver + // should retry the sending of the payload in case of a + // retryable error. + DefaultMaxAttempts int = 5 + // DefaultMetricsPath is a default URL path for endpoint that + // receives metrics. + DefaultMetricsPath string = "/v1/metrics" + // DefaultBackoff is a default base backoff time used in the + // exponential backoff strategy. + DefaultBackoff time.Duration = 300 * time.Millisecond + // DefaultTimeout is a default max waiting time for the backend to process + // each span or metrics batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + TemporalitySelector metric.TemporalitySelector + AggregationSelector metric.AggregationSelector + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Metrics SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Metrics: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultMetricsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + + TemporalitySelector: metric.DefaultTemporalitySelector, + AggregationSelector: metric.DefaultAggregationSelector, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + cfg := Config{ + Metrics: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultMetricsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + + TemporalitySelector: metric.DefaultTemporalitySelector, + AggregationSelector: metric.DefaultAggregationSelector, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Metrics.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) + } else if cfg.Metrics.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Metrics.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Metrics.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = endpoint + return cfg + }) +} + +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlpmetric: parse endpoint url", "url", v) + return cfg + } + + cfg.Metrics.Endpoint = u.Host + cfg.Metrics.URLPath = u.Path + if u.Scheme != "https" { + cfg.Metrics.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Metrics.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Timeout = duration + return cfg + }) +} + +func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.TemporalitySelector = selector + return cfg + }) +} + +func WithAggregationSelector(selector metric.AggregationSelector) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.AggregationSelector = selector + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go new file mode 100644 index 000000000..83f6d7fd1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go @@ -0,0 +1,47 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + +import "time" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// RetrySettings defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type RetrySettings struct { + // Enabled indicates whether to not retry sending batches in case of export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between + // consecutive retries will always be `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. + // Once this value is reached, the data is discarded. + MaxElapsedTime time.Duration +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go new file mode 100644 index 000000000..0229ac80b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go @@ -0,0 +1,38 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "os" +) + +// ReadTLSConfigFromFile reads a PEM certificate file and creates +// a tls.Config that will use this certifate to verify a server certificate. +func ReadTLSConfigFromFile(path string) (*tls.Config, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return CreateTLSConfig(b) +} + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go new file mode 100644 index 000000000..50e25fdbc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go new file mode 100644 index 000000000..cc3a77055 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %w", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go new file mode 100644 index 000000000..2605c74d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go @@ -0,0 +1,144 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" + +import ( + "go.opentelemetry.io/otel/attribute" + cpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// AttrIter transforms an attribute iterator into OTLP key-values. +func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go new file mode 100644 index 000000000..d31652b4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go @@ -0,0 +1,103 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" + +import ( + "errors" + "fmt" + "strings" + + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +var ( + errUnknownAggregation = errors.New("unknown aggregation") + errUnknownTemporality = errors.New("unknown temporality") +) + +type errMetric struct { + m *mpb.Metric + err error +} + +func (e errMetric) Unwrap() error { + return e.err +} + +func (e errMetric) Error() string { + format := "invalid metric (name: %q, description: %q, unit: %q): %s" + return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) +} + +func (e errMetric) Is(target error) bool { + return errors.Is(e.err, target) +} + +// multiErr is used by the data-type transform functions to wrap multiple +// errors into a single return value. The error message will show all errors +// as a list and scope them by the datatype name that is returning them. +type multiErr struct { + datatype string + errs []error +} + +// errOrNil returns nil if e contains no errors, otherwise it returns e. +func (e *multiErr) errOrNil() error { + if len(e.errs) == 0 { + return nil + } + return e +} + +// append adds err to e. If err is a multiErr, its errs are flattened into e. +func (e *multiErr) append(err error) { + // Do not use errors.As here, this should only be flattened one layer. If + // there is a *multiErr several steps down the chain, all the errors above + // it will be discarded if errors.As is used instead. + switch other := err.(type) { //nolint:errorlint + case *multiErr: + // Flatten err errors into e. + e.errs = append(e.errs, other.errs...) + default: + e.errs = append(e.errs, err) + } +} + +func (e *multiErr) Error() string { + es := make([]string, len(e.errs)) + for i, err := range e.errs { + es[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred transforming %s:\n\t%s" + return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) +} + +func (e *multiErr) Unwrap() error { + switch len(e.errs) { + case 0: + return nil + case 1: + return e.errs[0] + } + + // Return a multiErr without the leading error. + cp := &multiErr{ + datatype: e.datatype, + errs: make([]error, len(e.errs)-1), + } + copy(cp.errs, e.errs[1:]) + return cp +} + +func (e *multiErr) Is(target error) bool { + if len(e.errs) == 0 { + return false + } + // Check if the first error is target. + return errors.Is(e.errs[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go new file mode 100644 index 000000000..975e3b7aa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -0,0 +1,352 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package transform provides transformation functionality from the +// sdk/metric/metricdata data-types into OTLP data-types. +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" + +import ( + "fmt" + "time" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm +// contains invalid ScopeMetrics, an error will be returned along with an OTLP +// ResourceMetrics that contains partial OTLP ScopeMetrics. +func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { + sms, err := ScopeMetrics(rm.ScopeMetrics) + return &mpb.ResourceMetrics{ + Resource: &rpb.Resource{ + Attributes: AttrIter(rm.Resource.Iter()), + }, + ScopeMetrics: sms, + SchemaUrl: rm.Resource.SchemaURL(), + }, err +} + +// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If +// sms contains invalid metric values, an error will be returned along with a +// slice that contains partial OTLP ScopeMetrics. +func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { + errs := &multiErr{datatype: "ScopeMetrics"} + out := make([]*mpb.ScopeMetrics, 0, len(sms)) + for _, sm := range sms { + ms, err := Metrics(sm.Metrics) + if err != nil { + errs.append(err) + } + + out = append(out, &mpb.ScopeMetrics{ + Scope: &cpb.InstrumentationScope{ + Name: sm.Scope.Name, + Version: sm.Scope.Version, + }, + Metrics: ms, + SchemaUrl: sm.Scope.SchemaURL, + }) + } + return out, errs.errOrNil() +} + +// Metrics returns a slice of OTLP Metric generated from ms. If ms contains +// invalid metric values, an error will be returned along with a slice that +// contains partial OTLP Metrics. +func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { + errs := &multiErr{datatype: "Metrics"} + out := make([]*mpb.Metric, 0, len(ms)) + for _, m := range ms { + o, err := metric(m) + if err != nil { + // Do not include invalid data. Drop the metric, report the error. + errs.append(errMetric{m: o, err: err}) + continue + } + out = append(out, o) + } + return out, errs.errOrNil() +} + +func metric(m metricdata.Metrics) (*mpb.Metric, error) { + var err error + out := &mpb.Metric{ + Name: m.Name, + Description: m.Description, + Unit: m.Unit, + } + switch a := m.Data.(type) { + case metricdata.Gauge[int64]: + out.Data = Gauge[int64](a) + case metricdata.Gauge[float64]: + out.Data = Gauge[float64](a) + case metricdata.Sum[int64]: + out.Data, err = Sum[int64](a) + case metricdata.Sum[float64]: + out.Data, err = Sum[float64](a) + case metricdata.Histogram[int64]: + out.Data, err = Histogram(a) + case metricdata.Histogram[float64]: + out.Data, err = Histogram(a) + case metricdata.ExponentialHistogram[int64]: + out.Data, err = ExponentialHistogram(a) + case metricdata.ExponentialHistogram[float64]: + out.Data, err = ExponentialHistogram(a) + case metricdata.Summary: + out.Data = Summary(a) + default: + return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) + } + return out, err +} + +// Gauge returns an OTLP Metric_Gauge generated from g. +func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { + return &mpb.Metric_Gauge{ + Gauge: &mpb.Gauge{ + DataPoints: DataPoints(g.DataPoints), + }, + } +} + +// Sum returns an OTLP Metric_Sum generated from s. An error is returned +// if the temporality of s is unknown. +func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { + t, err := Temporality(s.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Sum{ + Sum: &mpb.Sum{ + AggregationTemporality: t, + IsMonotonic: s.IsMonotonic, + DataPoints: DataPoints(s.DataPoints), + }, + }, nil +} + +// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. +func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { + out := make([]*mpb.NumberDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + ndp := &mpb.NumberDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Exemplars: Exemplars(dPt.Exemplars), + } + switch v := any(dPt.Value).(type) { + case int64: + ndp.Value = &mpb.NumberDataPoint_AsInt{ + AsInt: v, + } + case float64: + ndp.Value = &mpb.NumberDataPoint_AsDouble{ + AsDouble: v, + } + } + out = append(out, ndp) + } + return out +} + +// Histogram returns an OTLP Metric_Histogram generated from h. An error is +// returned if the temporality of h is unknown. +func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) { + t, err := Temporality(h.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Histogram{ + Histogram: &mpb.Histogram{ + AggregationTemporality: t, + DataPoints: HistogramDataPoints(h.DataPoints), + }, + }, nil +} + +// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated +// from dPts. +func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { + out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sum := float64(dPt.Sum) + hdp := &mpb.HistogramDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: &sum, + BucketCounts: dPt.BucketCounts, + ExplicitBounds: dPt.Bounds, + Exemplars: Exemplars(dPt.Exemplars), + } + if v, ok := dPt.Min.Value(); ok { + vF64 := float64(v) + hdp.Min = &vF64 + } + if v, ok := dPt.Max.Value(); ok { + vF64 := float64(v) + hdp.Max = &vF64 + } + out = append(out, hdp) + } + return out +} + +// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is +// returned if the temporality of h is unknown. +func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { + t, err := Temporality(h.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_ExponentialHistogram{ + ExponentialHistogram: &mpb.ExponentialHistogram{ + AggregationTemporality: t, + DataPoints: ExponentialHistogramDataPoints(h.DataPoints), + }, + }, nil +} + +// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated +// from dPts. +func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { + out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sum := float64(dPt.Sum) + ehdp := &mpb.ExponentialHistogramDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: &sum, + Scale: dPt.Scale, + ZeroCount: dPt.ZeroCount, + Exemplars: Exemplars(dPt.Exemplars), + + Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket), + Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket), + } + if v, ok := dPt.Min.Value(); ok { + vF64 := float64(v) + ehdp.Min = &vF64 + } + if v, ok := dPt.Max.Value(); ok { + vF64 := float64(v) + ehdp.Max = &vF64 + } + out = append(out, ehdp) + } + return out +} + +// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated +// from bucket. +func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { + return &mpb.ExponentialHistogramDataPoint_Buckets{ + Offset: bucket.Offset, + BucketCounts: bucket.Counts, + } +} + +// Temporality returns an OTLP AggregationTemporality generated from t. If t +// is unknown, an error is returned along with the invalid +// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. +func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { + switch t { + case metricdata.DeltaTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil + case metricdata.CumulativeTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil + default: + err := fmt.Errorf("%w: %s", errUnknownTemporality, t) + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err + } +} + +// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC as uint64. +// The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64 +// (a date before the year 1678 or after 2262). +// timeUnixNano on the zero Time returns 0. +// The result does not depend on the location associated with t. +func timeUnixNano(t time.Time) uint64 { + if t.IsZero() { + return 0 + } + return uint64(t.UnixNano()) +} + +// Exemplars returns a slice of OTLP Exemplars generated from exemplars. +func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar { + out := make([]*mpb.Exemplar, 0, len(exemplars)) + for _, exemplar := range exemplars { + e := &mpb.Exemplar{ + FilteredAttributes: KeyValues(exemplar.FilteredAttributes), + TimeUnixNano: timeUnixNano(exemplar.Time), + SpanId: exemplar.SpanID, + TraceId: exemplar.TraceID, + } + switch v := any(exemplar.Value).(type) { + case int64: + e.Value = &mpb.Exemplar_AsInt{ + AsInt: v, + } + case float64: + e.Value = &mpb.Exemplar_AsDouble{ + AsDouble: v, + } + } + out = append(out, e) + } + return out +} + +// Summary returns an OTLP Metric_Summary generated from s. +func Summary(s metricdata.Summary) *mpb.Metric_Summary { + return &mpb.Metric_Summary{ + Summary: &mpb.Summary{ + DataPoints: SummaryDataPoints(s.DataPoints), + }, + } +} + +// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from +// dPts. +func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint { + out := make([]*mpb.SummaryDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sdp := &mpb.SummaryDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: dPt.Sum, + QuantileValues: QuantileValues(dPt.QuantileValues), + } + out = append(out, sdp) + } + return out +} + +// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile +// generated from quantiles. +func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile { + out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles)) + for _, q := range quantiles { + quantile := &mpb.SummaryDataPoint_ValueAtQuantile{ + Quantile: q.Quantile, + Value: q.Value, + } + out = append(out, quantile) + } + return out +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go new file mode 100644 index 000000000..1046eb593 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + +// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. +func Version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md new file mode 100644 index 000000000..b02cdcbbe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/README.md @@ -0,0 +1,3 @@ +# OTLP Metric HTTP Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go new file mode 100644 index 000000000..7ef295e59 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -0,0 +1,345 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +import ( + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +type client struct { + // req is cloned for every upload the client makes. + req *http.Request + compression Compression + requestFunc retry.RequestFunc + httpClient *http.Client +} + +// Keep it in sync with golang's DefaultTransport from net/http! We +// have our own copy to avoid handling a situation where the +// DefaultTransport is overwritten with some different implementation +// of http.RoundTripper or it's modified by another package. +var ourTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +// newClient creates a new HTTP metric client. +func newClient(cfg oconf.Config) (*client, error) { + httpClient := &http.Client{ + Transport: ourTransport, + Timeout: cfg.Metrics.Timeout, + } + + if cfg.Metrics.TLSCfg != nil || cfg.Metrics.Proxy != nil { + clonedTransport := ourTransport.Clone() + httpClient.Transport = clonedTransport + + if cfg.Metrics.TLSCfg != nil { + clonedTransport.TLSClientConfig = cfg.Metrics.TLSCfg + } + if cfg.Metrics.Proxy != nil { + clonedTransport.Proxy = cfg.Metrics.Proxy + } + } + + u := &url.URL{ + Scheme: "https", + Host: cfg.Metrics.Endpoint, + Path: cfg.Metrics.URLPath, + } + if cfg.Metrics.Insecure { + u.Scheme = "http" + } + // Body is set when this is cloned during upload. + req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + if err != nil { + return nil, err + } + + userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version() + req.Header.Set("User-Agent", userAgent) + + if n := len(cfg.Metrics.Headers); n > 0 { + for k, v := range cfg.Metrics.Headers { + req.Header.Set(k, v) + } + } + req.Header.Set("Content-Type", "application/x-protobuf") + + return &client{ + compression: Compression(cfg.Metrics.Compression), + req: req, + requestFunc: cfg.RetryConfig.RequestFunc(evaluate), + httpClient: httpClient, + }, nil +} + +// Shutdown shuts down the client, freeing all resources. +func (c *client) Shutdown(ctx context.Context) error { + // The otlpmetric.Exporter synchronizes access to client methods and + // ensures this is called only once. The only thing that needs to be done + // here is to release any computational resources the client holds. + + c.requestFunc = nil + c.httpClient = nil + return ctx.Err() +} + +// UploadMetrics sends protoMetrics to the connected endpoint. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { + // The otlpmetric.Exporter synchronizes access to client methods, and + // ensures this is not called after the Exporter is shutdown. Only thing + // to do here is send data. + + pbRequest := &colmetricpb.ExportMetricsServiceRequest{ + ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, + } + body, err := proto.Marshal(pbRequest) + if err != nil { + return err + } + request, err := c.newRequest(ctx, body) + if err != nil { + return err + } + + return c.requestFunc(ctx, func(iCtx context.Context) error { + select { + case <-iCtx.Done(): + return iCtx.Err() + default: + } + + request.reset(iCtx) + resp, err := c.httpClient.Do(request.Request) + var urlErr *url.Error + if errors.As(err, &urlErr) && urlErr.Temporary() { + return newResponseError(http.Header{}, err) + } + if err != nil { + return err + } + + var rErr error + switch sc := resp.StatusCode; { + case sc >= 200 && sc <= 299: + // Success, do not retry. + + // Read the partial success message, if any. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + if respData.Len() == 0 { + return nil + } + + if resp.Header.Get("Content-Type") == "application/x-protobuf" { + var respProto colmetricpb.ExportMetricsServiceResponse + if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + + if respProto.PartialSuccess != nil { + msg := respProto.PartialSuccess.GetErrorMessage() + n := respProto.PartialSuccess.GetRejectedDataPoints() + if n != 0 || msg != "" { + err := internal.MetricPartialSuccessError(n, msg) + otel.Handle(err) + } + } + } + return nil + case sc == http.StatusTooManyRequests, + sc == http.StatusBadGateway, + sc == http.StatusServiceUnavailable, + sc == http.StatusGatewayTimeout: + // Retry-able failure. + rErr = newResponseError(resp.Header, nil) + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + _ = resp.Body.Close() + return err + } + + // overwrite the error message with the response body + // if it is not empty + if respStr := strings.TrimSpace(respData.String()); respStr != "" { + // Include response for context. + e := errors.New(respStr) + rErr = newResponseError(resp.Header, e) + } + default: + rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status) + } + + if err := resp.Body.Close(); err != nil { + return err + } + return rErr + }) +} + +var gzPool = sync.Pool{ + New: func() interface{} { + w := gzip.NewWriter(io.Discard) + return w + }, +} + +func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { + r := c.req.Clone(ctx) + req := request{Request: r} + + switch c.compression { + case NoCompression: + r.ContentLength = (int64)(len(body)) + req.bodyReader = bodyReader(body) + case GzipCompression: + // Ensure the content length is not used. + r.ContentLength = -1 + r.Header.Set("Content-Encoding", "gzip") + + gz := gzPool.Get().(*gzip.Writer) + defer gzPool.Put(gz) + + var b bytes.Buffer + gz.Reset(&b) + + if _, err := gz.Write(body); err != nil { + return req, err + } + // Close needs to be called to ensure body is fully written. + if err := gz.Close(); err != nil { + return req, err + } + + req.bodyReader = bodyReader(b.Bytes()) + } + + return req, nil +} + +// bodyReader returns a closure returning a new reader for buf. +func bodyReader(buf []byte) func() io.ReadCloser { + return func() io.ReadCloser { + return io.NopCloser(bytes.NewReader(buf)) + } +} + +// request wraps an http.Request with a resettable body reader. +type request struct { + *http.Request + + // bodyReader allows the same body to be used for multiple requests. + bodyReader func() io.ReadCloser +} + +// reset reinitializes the request Body and uses ctx for the request. +func (r *request) reset(ctx context.Context) { + r.Body = r.bodyReader() + r.Request = r.Request.WithContext(ctx) +} + +// retryableError represents a request failure that can be retried. +type retryableError struct { + throttle int64 + err error +} + +// newResponseError returns a retryableError and will extract any explicit +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { + var rErr retryableError + if v := header.Get("Retry-After"); v != "" { + if t, err := strconv.ParseInt(v, 10, 64); err == nil { + rErr.throttle = t + } + } + + rErr.err = wrapped + return rErr +} + +func (e retryableError) Error() string { + if e.err != nil { + return fmt.Sprintf("retry-able request failure: %s", e.err.Error()) + } + + return "retry-able request failure" +} + +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + +// evaluate returns if err is retry-able. If it is and it includes an explicit +// throttling delay, that delay is also returned. +func evaluate(err error) (bool, time.Duration) { + if err == nil { + return false, 0 + } + + // Do not use errors.As here, this should only be flattened one layer. If + // there are several chained errors, all the errors above it will be + // discarded if errors.As is used instead. + rErr, ok := err.(retryableError) //nolint:errorlint + if !ok { + return false, 0 + } + + return true, time.Duration(rErr.throttle) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go new file mode 100644 index 000000000..bf05adcf1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go @@ -0,0 +1,224 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" + "go.opentelemetry.io/otel/sdk/metric" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression oconf.Compression + +// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. +// This type is compatible with http.Transport.Proxy and can be used to set a custom proxy function +// to the OTLP HTTP client. +type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression = Compression(oconf.NoCompression) + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression = Compression(oconf.GzipCompression) +) + +// Option applies an option to the Exporter. +type Option interface { + applyHTTPOption(oconf.Config) oconf.Config +} + +func asHTTPOptions(opts []Option) []oconf.HTTPOption { + converted := make([]oconf.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = oconf.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + +// RetryConfig defines configuration for retrying the export of metric data +// that failed. +type RetryConfig retry.Config + +type wrappedOption struct { + oconf.HTTPOption +} + +func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config { + return w.ApplyHTTPOption(cfg) +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. This +// endpoint is specified as a host and optional port, no path or scheme should +// be included (see WithInsecure and WithURLPath). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{oconf.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{oconf.WithEndpointURL(u)} +} + +// WithCompression sets the compression strategy the Exporter will use to +// compress the HTTP body. +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compression strategy will be used. +func WithCompression(compression Compression) Option { + return wrappedOption{oconf.WithCompression(oconf.Compression(compression))} +} + +// WithURLPath sets the URL path the Exporter will send requests to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, the path +// contained in that variable value will be used. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "/v1/metrics" will be used. +func WithURLPath(urlPath string) Option { + return wrappedOption{oconf.WithURLPath(urlPath)} +} + +// WithTLSClientConfig sets the TLS configuration the Exporter will use for +// HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, the system default configuration is used. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)} +} + +// WithInsecure disables client transport security for the Exporter's HTTP +// connection. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +func WithInsecure() Option { + return wrappedOption{oconf.WithInsecure()} +} + +// WithHeaders will send the provided headers with each HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{oconf.WithHeaders(headers)} +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the metric +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{oconf.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(rc RetryConfig) Option { + return wrappedOption{oconf.WithRetry(retry.Config(rc))} +} + +// WithTemporalitySelector sets the TemporalitySelector the client will use to +// determine the Temporality of an instrument based on its kind. If this option +// is not used, the client will use the DefaultTemporalitySelector from the +// go.opentelemetry.io/otel/sdk/metric package. +func WithTemporalitySelector(selector metric.TemporalitySelector) Option { + return wrappedOption{oconf.WithTemporalitySelector(selector)} +} + +// WithAggregationSelector sets the AggregationSelector the client will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation +// explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector metric.AggregationSelector) Option { + return wrappedOption{oconf.WithAggregationSelector(selector)} +} + +// WithProxy sets the Proxy function the client will use to determine the +// proxy to use for an HTTP request. If this option is not used, the client +// will use [http.ProxyFromEnvironment]. +func WithProxy(pf HTTPTransportProxyFunc) Option { + return wrappedOption{oconf.WithProxy(oconf.HTTPTransportProxyFunc(pf))} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go new file mode 100644 index 000000000..eabb82b98 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads. +By default the telemetry is sent to https://localhost:4318/v1/metrics. + +Exporter should be created using [New] and used with a [metric.PeriodicReader]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") - +target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +environment variable and by [WithEndpoint], [WithEndpointURL], and [WithInsecure] options. + +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") - +target URL to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - +key-value pairs used as headers associated with HTTP requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - +compression strategy the exporter uses to compress the HTTP body. +Supported values: "gzip". +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompression] option. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - +filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - +filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - +filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - +aggregation temporality to use on the basis of instrument kind. Supported values: + - "cumulative" - Cumulative aggregation temporality for all instrument kinds, + - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; + Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, + - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; + Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. + +The configuration can be overridden by [WithTemporalitySelector] option. + +OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - +default aggregation to use for histogram instruments. Supported values: + - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], + - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. + +The configuration can be overridden by [WithAggregationSelector] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation +[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation +*/ +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go new file mode 100644 index 000000000..701deb6d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -0,0 +1,151 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +// Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP. +type Exporter struct { + // Ensure synchronous access to the client across all functionality. + clientMu sync.Mutex + client interface { + UploadMetrics(context.Context, *metricpb.ResourceMetrics) error + Shutdown(context.Context) error + } + + temporalitySelector metric.TemporalitySelector + aggregationSelector metric.AggregationSelector + + shutdownOnce sync.Once +} + +func newExporter(c *client, cfg oconf.Config) (*Exporter, error) { + ts := cfg.Metrics.TemporalitySelector + if ts == nil { + ts = func(metric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality + } + } + + as := cfg.Metrics.AggregationSelector + if as == nil { + as = metric.DefaultAggregationSelector + } + + return &Exporter{ + client: c, + + temporalitySelector: ts, + aggregationSelector: as, + }, nil +} + +// Temporality returns the Temporality to use for an instrument kind. +func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality { + return e.temporalitySelector(k) +} + +// Aggregation returns the Aggregation to use for an instrument kind. +func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { + return e.aggregationSelector(k) +} + +// Export transforms and transmits metric data to an OTLP receiver. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { + defer global.Debug("OTLP/HTTP exporter export", "Data", rm) + + otlpRm, err := transform.ResourceMetrics(rm) + // Best effort upload of transformable metrics. + e.clientMu.Lock() + upErr := e.client.UploadMetrics(ctx, otlpRm) + e.clientMu.Unlock() + if upErr != nil { + if err == nil { + return fmt.Errorf("failed to upload metrics: %w", upErr) + } + // Merge the two errors. + return fmt.Errorf("failed to upload incomplete metrics (%w): %w", err, upErr) + } + return err +} + +// ForceFlush flushes any metric data held by an exporter. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +// +// This method is safe to call concurrently. +func (e *Exporter) ForceFlush(ctx context.Context) error { + // The exporter and client hold no state, nothing to flush. + return ctx.Err() +} + +// Shutdown flushes all metric data held by an exporter and releases any held +// computational resources. +// +// This method returns an error if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +// +// This method is safe to call concurrently. +func (e *Exporter) Shutdown(ctx context.Context) error { + err := errShutdown + e.shutdownOnce.Do(func() { + e.clientMu.Lock() + client := e.client + e.client = shutdownClient{} + e.clientMu.Unlock() + err = client.Shutdown(ctx) + }) + return err +} + +var errShutdown = fmt.Errorf("HTTP exporter is shutdown") + +type shutdownClient struct{} + +func (c shutdownClient) err(ctx context.Context) error { + if err := ctx.Err(); err != nil { + return err + } + return errShutdown +} + +func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error { + return c.err(ctx) +} + +func (c shutdownClient) Shutdown(ctx context.Context) error { + return c.err(ctx) +} + +// MarshalLog returns logging data about the Exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct{ Type string }{Type: "OTLP/HTTP"} +} + +// New returns an OpenTelemetry metric Exporter. The Exporter can be used with +// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving +// endpoint using protobufs over HTTP. +func New(_ context.Context, opts ...Option) (*Exporter, error) { + cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) + c, err := newClient(cfg) + if err != nil { + return nil, err + } + return newExporter(c, cfg) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go new file mode 100644 index 000000000..7ac42759f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go @@ -0,0 +1,215 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + "unicode" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) + continue + } + + // Only decode the value. + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go new file mode 100644 index 000000000..1b379f10c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go new file mode 100644 index 000000000..89b134a39 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go @@ -0,0 +1,210 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Metrics.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), + withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), + ) + + return opts +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Metrics.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} + +func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if s, ok := e.GetEnvValue(n); ok { + switch strings.ToLower(s) { + case "cumulative": + fn(cumulativeTemporality) + case "delta": + fn(deltaTemporality) + case "lowmemory": + fn(lowMemory) + default: + global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) + } + } + } +} + +func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { + switch ik { + case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { + switch ik { + case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if s, ok := e.GetEnvValue(n); ok { + switch strings.ToLower(s) { + case "explicit_bucket_histogram": + fn(metric.DefaultAggregationSelector) + case "base2_exponential_bucket_histogram": + fn(func(kind metric.InstrumentKind) metric.Aggregation { + if kind == metric.InstrumentKindHistogram { + return metric.AggregationBase2ExponentialHistogram{ + MaxSize: 160, + MaxScale: 20, + NoMinMax: false, + } + } + return metric.DefaultAggregationSelector(kind) + }) + default: + global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) + } + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go new file mode 100644 index 000000000..9bbf0941f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -0,0 +1,376 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric" +) + +const ( + // DefaultMaxAttempts describes how many times the driver + // should retry the sending of the payload in case of a + // retryable error. + DefaultMaxAttempts int = 5 + // DefaultMetricsPath is a default URL path for endpoint that + // receives metrics. + DefaultMetricsPath string = "/v1/metrics" + // DefaultBackoff is a default base backoff time used in the + // exponential backoff strategy. + DefaultBackoff time.Duration = 300 * time.Millisecond + // DefaultTimeout is a default max waiting time for the backend to process + // each span or metrics batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + TemporalitySelector metric.TemporalitySelector + AggregationSelector metric.AggregationSelector + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Metrics SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Metrics: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultMetricsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + + TemporalitySelector: metric.DefaultTemporalitySelector, + AggregationSelector: metric.DefaultAggregationSelector, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + cfg := Config{ + Metrics: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultMetricsPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + + TemporalitySelector: metric.DefaultTemporalitySelector, + AggregationSelector: metric.DefaultAggregationSelector, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Metrics.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) + } else if cfg.Metrics.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Metrics.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Metrics.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Endpoint = endpoint + return cfg + }) +} + +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlpmetric: parse endpoint url", "url", v) + return cfg + } + + cfg.Metrics.Endpoint = u.Host + cfg.Metrics.URLPath = u.Path + if u.Scheme != "https" { + cfg.Metrics.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Metrics.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Timeout = duration + return cfg + }) +} + +func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.TemporalitySelector = selector + return cfg + }) +} + +func WithAggregationSelector(selector metric.AggregationSelector) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.AggregationSelector = selector + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Metrics.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go new file mode 100644 index 000000000..ae3d09787 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go @@ -0,0 +1,47 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + +import "time" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// RetrySettings defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type RetrySettings struct { + // Enabled indicates whether to not retry sending batches in case of export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between + // consecutive retries will always be `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. + // Once this value is reached, the data is discarded. + MaxElapsedTime time.Duration +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go new file mode 100644 index 000000000..ae09ad57e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go @@ -0,0 +1,38 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "os" +) + +// ReadTLSConfigFromFile reads a PEM certificate file and creates +// a tls.Config that will use this certifate to verify a server certificate. +func ReadTLSConfigFromFile(path string) (*tls.Config, error) { + b, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return CreateTLSConfig(b) +} + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go new file mode 100644 index 000000000..ed93844a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go new file mode 100644 index 000000000..a9a08ffe6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %w", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go new file mode 100644 index 000000000..d607da78e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go @@ -0,0 +1,144 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" + +import ( + "go.opentelemetry.io/otel/attribute" + cpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// AttrIter transforms an attribute iterator into OTLP key-values. +func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go new file mode 100644 index 000000000..bb6d21f0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go @@ -0,0 +1,103 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" + +import ( + "errors" + "fmt" + "strings" + + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +var ( + errUnknownAggregation = errors.New("unknown aggregation") + errUnknownTemporality = errors.New("unknown temporality") +) + +type errMetric struct { + m *mpb.Metric + err error +} + +func (e errMetric) Unwrap() error { + return e.err +} + +func (e errMetric) Error() string { + format := "invalid metric (name: %q, description: %q, unit: %q): %s" + return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) +} + +func (e errMetric) Is(target error) bool { + return errors.Is(e.err, target) +} + +// multiErr is used by the data-type transform functions to wrap multiple +// errors into a single return value. The error message will show all errors +// as a list and scope them by the datatype name that is returning them. +type multiErr struct { + datatype string + errs []error +} + +// errOrNil returns nil if e contains no errors, otherwise it returns e. +func (e *multiErr) errOrNil() error { + if len(e.errs) == 0 { + return nil + } + return e +} + +// append adds err to e. If err is a multiErr, its errs are flattened into e. +func (e *multiErr) append(err error) { + // Do not use errors.As here, this should only be flattened one layer. If + // there is a *multiErr several steps down the chain, all the errors above + // it will be discarded if errors.As is used instead. + switch other := err.(type) { //nolint:errorlint + case *multiErr: + // Flatten err errors into e. + e.errs = append(e.errs, other.errs...) + default: + e.errs = append(e.errs, err) + } +} + +func (e *multiErr) Error() string { + es := make([]string, len(e.errs)) + for i, err := range e.errs { + es[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred transforming %s:\n\t%s" + return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) +} + +func (e *multiErr) Unwrap() error { + switch len(e.errs) { + case 0: + return nil + case 1: + return e.errs[0] + } + + // Return a multiErr without the leading error. + cp := &multiErr{ + datatype: e.datatype, + errs: make([]error, len(e.errs)-1), + } + copy(cp.errs, e.errs[1:]) + return cp +} + +func (e *multiErr) Is(target error) bool { + if len(e.errs) == 0 { + return false + } + // Check if the first error is target. + return errors.Is(e.errs[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go new file mode 100644 index 000000000..0a1a65c44 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -0,0 +1,352 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package transform provides transformation functionality from the +// sdk/metric/metricdata data-types into OTLP data-types. +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" + +import ( + "fmt" + "time" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm +// contains invalid ScopeMetrics, an error will be returned along with an OTLP +// ResourceMetrics that contains partial OTLP ScopeMetrics. +func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { + sms, err := ScopeMetrics(rm.ScopeMetrics) + return &mpb.ResourceMetrics{ + Resource: &rpb.Resource{ + Attributes: AttrIter(rm.Resource.Iter()), + }, + ScopeMetrics: sms, + SchemaUrl: rm.Resource.SchemaURL(), + }, err +} + +// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If +// sms contains invalid metric values, an error will be returned along with a +// slice that contains partial OTLP ScopeMetrics. +func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { + errs := &multiErr{datatype: "ScopeMetrics"} + out := make([]*mpb.ScopeMetrics, 0, len(sms)) + for _, sm := range sms { + ms, err := Metrics(sm.Metrics) + if err != nil { + errs.append(err) + } + + out = append(out, &mpb.ScopeMetrics{ + Scope: &cpb.InstrumentationScope{ + Name: sm.Scope.Name, + Version: sm.Scope.Version, + }, + Metrics: ms, + SchemaUrl: sm.Scope.SchemaURL, + }) + } + return out, errs.errOrNil() +} + +// Metrics returns a slice of OTLP Metric generated from ms. If ms contains +// invalid metric values, an error will be returned along with a slice that +// contains partial OTLP Metrics. +func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { + errs := &multiErr{datatype: "Metrics"} + out := make([]*mpb.Metric, 0, len(ms)) + for _, m := range ms { + o, err := metric(m) + if err != nil { + // Do not include invalid data. Drop the metric, report the error. + errs.append(errMetric{m: o, err: err}) + continue + } + out = append(out, o) + } + return out, errs.errOrNil() +} + +func metric(m metricdata.Metrics) (*mpb.Metric, error) { + var err error + out := &mpb.Metric{ + Name: m.Name, + Description: m.Description, + Unit: m.Unit, + } + switch a := m.Data.(type) { + case metricdata.Gauge[int64]: + out.Data = Gauge[int64](a) + case metricdata.Gauge[float64]: + out.Data = Gauge[float64](a) + case metricdata.Sum[int64]: + out.Data, err = Sum[int64](a) + case metricdata.Sum[float64]: + out.Data, err = Sum[float64](a) + case metricdata.Histogram[int64]: + out.Data, err = Histogram(a) + case metricdata.Histogram[float64]: + out.Data, err = Histogram(a) + case metricdata.ExponentialHistogram[int64]: + out.Data, err = ExponentialHistogram(a) + case metricdata.ExponentialHistogram[float64]: + out.Data, err = ExponentialHistogram(a) + case metricdata.Summary: + out.Data = Summary(a) + default: + return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) + } + return out, err +} + +// Gauge returns an OTLP Metric_Gauge generated from g. +func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { + return &mpb.Metric_Gauge{ + Gauge: &mpb.Gauge{ + DataPoints: DataPoints(g.DataPoints), + }, + } +} + +// Sum returns an OTLP Metric_Sum generated from s. An error is returned +// if the temporality of s is unknown. +func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { + t, err := Temporality(s.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Sum{ + Sum: &mpb.Sum{ + AggregationTemporality: t, + IsMonotonic: s.IsMonotonic, + DataPoints: DataPoints(s.DataPoints), + }, + }, nil +} + +// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. +func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { + out := make([]*mpb.NumberDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + ndp := &mpb.NumberDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Exemplars: Exemplars(dPt.Exemplars), + } + switch v := any(dPt.Value).(type) { + case int64: + ndp.Value = &mpb.NumberDataPoint_AsInt{ + AsInt: v, + } + case float64: + ndp.Value = &mpb.NumberDataPoint_AsDouble{ + AsDouble: v, + } + } + out = append(out, ndp) + } + return out +} + +// Histogram returns an OTLP Metric_Histogram generated from h. An error is +// returned if the temporality of h is unknown. +func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) { + t, err := Temporality(h.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Histogram{ + Histogram: &mpb.Histogram{ + AggregationTemporality: t, + DataPoints: HistogramDataPoints(h.DataPoints), + }, + }, nil +} + +// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated +// from dPts. +func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { + out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sum := float64(dPt.Sum) + hdp := &mpb.HistogramDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: &sum, + BucketCounts: dPt.BucketCounts, + ExplicitBounds: dPt.Bounds, + Exemplars: Exemplars(dPt.Exemplars), + } + if v, ok := dPt.Min.Value(); ok { + vF64 := float64(v) + hdp.Min = &vF64 + } + if v, ok := dPt.Max.Value(); ok { + vF64 := float64(v) + hdp.Max = &vF64 + } + out = append(out, hdp) + } + return out +} + +// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is +// returned if the temporality of h is unknown. +func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { + t, err := Temporality(h.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_ExponentialHistogram{ + ExponentialHistogram: &mpb.ExponentialHistogram{ + AggregationTemporality: t, + DataPoints: ExponentialHistogramDataPoints(h.DataPoints), + }, + }, nil +} + +// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated +// from dPts. +func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { + out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sum := float64(dPt.Sum) + ehdp := &mpb.ExponentialHistogramDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: &sum, + Scale: dPt.Scale, + ZeroCount: dPt.ZeroCount, + Exemplars: Exemplars(dPt.Exemplars), + + Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket), + Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket), + } + if v, ok := dPt.Min.Value(); ok { + vF64 := float64(v) + ehdp.Min = &vF64 + } + if v, ok := dPt.Max.Value(); ok { + vF64 := float64(v) + ehdp.Max = &vF64 + } + out = append(out, ehdp) + } + return out +} + +// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated +// from bucket. +func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { + return &mpb.ExponentialHistogramDataPoint_Buckets{ + Offset: bucket.Offset, + BucketCounts: bucket.Counts, + } +} + +// Temporality returns an OTLP AggregationTemporality generated from t. If t +// is unknown, an error is returned along with the invalid +// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. +func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { + switch t { + case metricdata.DeltaTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil + case metricdata.CumulativeTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil + default: + err := fmt.Errorf("%w: %s", errUnknownTemporality, t) + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err + } +} + +// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC as uint64. +// The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64 +// (a date before the year 1678 or after 2262). +// timeUnixNano on the zero Time returns 0. +// The result does not depend on the location associated with t. +func timeUnixNano(t time.Time) uint64 { + if t.IsZero() { + return 0 + } + return uint64(t.UnixNano()) +} + +// Exemplars returns a slice of OTLP Exemplars generated from exemplars. +func Exemplars[N int64 | float64](exemplars []metricdata.Exemplar[N]) []*mpb.Exemplar { + out := make([]*mpb.Exemplar, 0, len(exemplars)) + for _, exemplar := range exemplars { + e := &mpb.Exemplar{ + FilteredAttributes: KeyValues(exemplar.FilteredAttributes), + TimeUnixNano: timeUnixNano(exemplar.Time), + SpanId: exemplar.SpanID, + TraceId: exemplar.TraceID, + } + switch v := any(exemplar.Value).(type) { + case int64: + e.Value = &mpb.Exemplar_AsInt{ + AsInt: v, + } + case float64: + e.Value = &mpb.Exemplar_AsDouble{ + AsDouble: v, + } + } + out = append(out, e) + } + return out +} + +// Summary returns an OTLP Metric_Summary generated from s. +func Summary(s metricdata.Summary) *mpb.Metric_Summary { + return &mpb.Metric_Summary{ + Summary: &mpb.Summary{ + DataPoints: SummaryDataPoints(s.DataPoints), + }, + } +} + +// SummaryDataPoints returns a slice of OTLP SummaryDataPoint generated from +// dPts. +func SummaryDataPoints(dPts []metricdata.SummaryDataPoint) []*mpb.SummaryDataPoint { + out := make([]*mpb.SummaryDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + sdp := &mpb.SummaryDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: timeUnixNano(dPt.StartTime), + TimeUnixNano: timeUnixNano(dPt.Time), + Count: dPt.Count, + Sum: dPt.Sum, + QuantileValues: QuantileValues(dPt.QuantileValues), + } + out = append(out, sdp) + } + return out +} + +// QuantileValues returns a slice of OTLP SummaryDataPoint_ValueAtQuantile +// generated from quantiles. +func QuantileValues(quantiles []metricdata.QuantileValue) []*mpb.SummaryDataPoint_ValueAtQuantile { + out := make([]*mpb.SummaryDataPoint_ValueAtQuantile, 0, len(quantiles)) + for _, q := range quantiles { + quantile := &mpb.SummaryDataPoint_ValueAtQuantile{ + Quantile: q.Quantile, + Value: q.Value, + } + out = append(out, quantile) + } + return out +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go new file mode 100644 index 000000000..b758b7bb1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. +func Version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md new file mode 100644 index 000000000..50802d5ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -0,0 +1,3 @@ +# OTLP Trace Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go new file mode 100644 index 000000000..3c1a625c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Client manages connections to the collector, handles the +// transformation of data into wire format, and the transmission of that +// data to the collector. +type Client interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Start should establish connection(s) to endpoint(s). It is + // called just once by the exporter, so the implementation + // does not need to worry about idempotence and locking. + Start(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Stop should close the connections. The function is called + // only once by the exporter, so the implementation does not + // need to worry about idempotence, but it may be called + // concurrently with UploadTraces, so proper + // locking is required. The function serves as a + // synchronization point - after the function returns, the + // process of closing connections is assumed to be finished. + Stop(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // UploadTraces should transform the passed traces to the wire + // format and send it to the collector. May be called + // concurrently. + UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go new file mode 100644 index 000000000..09ad5eadb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptrace contains abstractions for OTLP span exporters. +See the official OTLP span exporter implementations: + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc], + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. +*/ +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go new file mode 100644 index 000000000..3f0a518ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + tracesdk "go.opentelemetry.io/otel/sdk/trace" +) + +var errAlreadyStarted = errors.New("already started") + +// Exporter exports trace data in the OTLP wire format. +type Exporter struct { + client Client + + mu sync.RWMutex + started bool + + startOnce sync.Once + stopOnce sync.Once +} + +// ExportSpans exports a batch of spans. +func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error { + protoSpans := tracetransform.Spans(ss) + if len(protoSpans) == 0 { + return nil + } + + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return fmt.Errorf("traces export: %w", err) + } + return nil +} + +// Start establishes a connection to the receiving endpoint. +func (e *Exporter) Start(ctx context.Context) error { + err := errAlreadyStarted + e.startOnce.Do(func() { + e.mu.Lock() + e.started = true + e.mu.Unlock() + err = e.client.Start(ctx) + }) + + return err +} + +// Shutdown flushes all exports and closes all connections to the receiving endpoint. +func (e *Exporter) Shutdown(ctx context.Context) error { + e.mu.RLock() + started := e.started + e.mu.RUnlock() + + if !started { + return nil + } + + var err error + + e.stopOnce.Do(func() { + err = e.client.Stop(ctx) + e.mu.Lock() + e.started = false + e.mu.Unlock() + }) + + return err +} + +var _ tracesdk.SpanExporter = (*Exporter)(nil) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, client Client) (*Exporter, error) { + exp := NewUnstarted(client) + if err := exp.Start(ctx); err != nil { + return nil, err + } + return exp, nil +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(client Client) *Exporter { + return &Exporter{ + client: client, + } +} + +// MarshalLog is the marshaling function used by the logging system to represent this Exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + Client Client + }{ + Type: "otlptrace", + Client: e.client, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go new file mode 100644 index 000000000..4571a5ca3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -0,0 +1,147 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// Iterator transforms an attribute iterator into OTLP key-values. +func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// ResourceAttributes transforms a Resource OTLP key-values. +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { + return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *commonpb.AnyValue { + av := new(commonpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &commonpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &commonpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &commonpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go new file mode 100644 index 000000000..f6dd3decc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { + if il == (instrumentation.Scope{}) { + return nil + } + return &commonpb.InstrumentationScope{ + Name: il.Name, + Version: il.Version, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go new file mode 100644 index 000000000..db7b698a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/resource" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// Resource transforms a Resource into an OTLP Resource. +func Resource(r *resource.Resource) *resourcepb.Resource { + if r == nil { + return nil + } + return &resourcepb.Resource{Attributes: ResourceAttributes(r)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go new file mode 100644 index 000000000..c3c69c5a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP +// ResourceSpans. +func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + ssm := make(map[key]*tracepb.ScopeSpans) + + var resources int + for _, sd := range sdl { + if sd == nil { + continue + } + + rKey := sd.Resource().Equivalent() + k := key{ + r: rKey, + is: sd.InstrumentationScope(), + } + scopeSpan, iOk := ssm[k] + if !iOk { + // Either the resource or instrumentation scope were unknown. + scopeSpan = &tracepb.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, + } + } + scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) + ssm[k] = scopeSpan + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &tracepb.ResourceSpans{ + Resource: Resource(sd.Resource()), + ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // ScopeSpans reference. + if !iOk { + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) + } + } + + // Transform the categorized map into a slice + rss := make([]*tracepb.ResourceSpans, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +// span transforms a Span into an OTLP span. +func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { + if sd == nil { + return nil + } + + tid := sd.SpanContext().TraceID() + sid := sd.SpanContext().SpanID() + + s := &tracepb.Span{ + TraceId: tid[:], + SpanId: sid[:], + TraceState: sd.SpanContext().TraceState().String(), + Status: status(sd.Status().Code, sd.Status().Description), + StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), + EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + Links: links(sd.Links()), + Kind: spanKind(sd.SpanKind()), + Name: sd.Name(), + Attributes: KeyValues(sd.Attributes()), + Events: spanEvents(sd.Events()), + DroppedAttributesCount: uint32(sd.DroppedAttributes()), + DroppedEventsCount: uint32(sd.DroppedEvents()), + DroppedLinksCount: uint32(sd.DroppedLinks()), + } + + if psid := sd.Parent().SpanID(); psid.IsValid() { + s.ParentSpanId = psid[:] + } + s.Flags = buildSpanFlags(sd.Parent()) + + return s +} + +// status transform a span code and message into an OTLP span status. +func status(status codes.Code, message string) *tracepb.Status { + var c tracepb.Status_StatusCode + switch status { + case codes.Ok: + c = tracepb.Status_STATUS_CODE_OK + case codes.Error: + c = tracepb.Status_STATUS_CODE_ERROR + default: + c = tracepb.Status_STATUS_CODE_UNSET + } + return &tracepb.Status{ + Code: c, + Message: message, + } +} + +// links transforms span Links to OTLP span links. +func links(links []tracesdk.Link) []*tracepb.Span_Link { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + tid := otLink.SpanContext.TraceID() + sid := otLink.SpanContext.SpanID() + + flags := buildSpanFlags(otLink.SpanContext) + + sl = append(sl, &tracepb.Span_Link{ + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + Flags: flags, + }) + } + return sl +} + +func buildSpanFlags(sc trace.SpanContext) uint32 { + flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if sc.IsRemote() { + flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + } + + return uint32(flags) +} + +// spanEvents transforms span Events to an OTLP span events. +func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { + if len(es) == 0 { + return nil + } + + events := make([]*tracepb.Span_Event, len(es)) + // Transform message events + for i := 0; i < len(es); i++ { + events[i] = &tracepb.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(es[i].Time.UnixNano()), + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + } + } + return events +} + +// spanKind transforms a SpanKind to an OTLP span kind. +func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindInternal: + return tracepb.Span_SPAN_KIND_INTERNAL + case trace.SpanKindClient: + return tracepb.Span_SPAN_KIND_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SPAN_KIND_SERVER + case trace.SpanKindProducer: + return tracepb.Span_SPAN_KIND_PRODUCER + case trace.SpanKindConsumer: + return tracepb.Span_SPAN_KIND_CONSUMER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md new file mode 100644 index 000000000..5309bb7cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Trace gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go new file mode 100644 index 000000000..3993df927 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -0,0 +1,295 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + "errors" + "sync" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +type client struct { + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc coltracepb.TraceServiceClient +} + +// Compile time check *client implements otlptrace.Client. +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new gRPC trace client. +func NewClient(opts ...Option) otlptrace.Client { + return newClient(opts...) +} + +func newClient(opts ...Option) *client { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &client{ + endpoint: cfg.Traces.Endpoint, + exportTimeout: cfg.Traces.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, + } + + if len(cfg.Traces.Headers) > 0 { + c.metadata = metadata.New(cfg.Traces.Headers) + } + + return c +} + +// Start establishes a gRPC connection to the collector. +func (c *client) Start(context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + conn, err := grpc.NewClient(c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + // The otlptrace.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = coltracepb.NewTraceServiceClient(c.conn) + c.tscMu.Unlock() + + return nil +} + +var errAlreadyStopped = errors.New("the client is already stopped") + +// Stop shuts down the client. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadTraces method of the client. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All client held resources +// will still be released in this situation. +// +// If the client has already stopped, an error will be returned describing +// this. +func (c *client) Stop(ctx context.Context) error { + // Make sure to return context error if the context is done when calling this method. + err := ctx.Err() + + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the client is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlptrace.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // client is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the client is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err +} + +var errShutdown = errors.New("the client is shutdown") + +// UploadTraces sends a batch of spans. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the client is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the client stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } + }() + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns of the status is RetryInfo +// and the its duration to wait for if an explicit throttle time. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go new file mode 100644 index 000000000..e783b57ac --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptracegrpc provides an OTLP span exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a host. +The value may additionally a port, a scheme, and a path. +The value accepts "http" and "https" scheme. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go new file mode 100644 index 000000000..b826b8424 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..9513c0a57 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -0,0 +1,191 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + name, err := url.PathUnescape(n) + if err != nil { + global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go new file mode 100644 index 000000000..97cd6c54f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go new file mode 100644 index 000000000..7bb189a94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -0,0 +1,142 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) + + return opts +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go new file mode 100644 index 000000000..8f84a7996 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -0,0 +1,353 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +const ( + // DefaultTracesPath is a default URL path for endpoint that + // receives spans. + DefaultTracesPath string = "/v1/traces" + // DefaultTimeout is a default max waiting time for the backend to process + // each span batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Traces SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Endpoint = endpoint + return cfg + }) +} + +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlptrace: parse endpoint url", "url", v) + return cfg + } + + cfg.Traces.Endpoint = u.Host + cfg.Traces.URLPath = u.Path + if u.Scheme != "https" { + cfg.Traces.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Traces.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Timeout = duration + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go new file mode 100644 index 000000000..3d4f699d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -0,0 +1,40 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector. +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go new file mode 100644 index 000000000..38b97a013 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -0,0 +1,26 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go new file mode 100644 index 000000000..a12ea4c48 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go new file mode 100644 index 000000000..4f2113ae2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go new file mode 100644 index 000000000..bbad0e6d0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" +) + +// Option applies an option to the gRPC driver. +type Option interface { + applyGRPCOption(otlpconfig.Config) otlpconfig.Config +} + +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of span batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, client security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "https://localhost:4317/v1/traces" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{otlpconfig.WithEndpointURL(u)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) otlpconfig.Compression { + if compressor == "gzip" { + return otlpconfig.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return otlpconfig.NoCompression +} + +// WithCompressor sets the compressor for the gRPC client to use when sending +// requests. Supported compressor values: "gzip". +func WithCompressor(compressor string) Option { + return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.Traces.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The client +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time a client will attempt to export a +// batch of spans. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of spans is dropped. +// +// If unset, the default timeout will be set to 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of spans. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md new file mode 100644 index 000000000..365264009 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/README.md @@ -0,0 +1,3 @@ +# OTLP Trace HTTP Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go new file mode 100644 index 000000000..1c487a096 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +const contentTypeProto = "application/x-protobuf" + +var gzPool = sync.Pool{ + New: func() interface{} { + w := gzip.NewWriter(io.Discard) + return w + }, +} + +// Keep it in sync with golang's DefaultTransport from net/http! We +// have our own copy to avoid handling a situation where the +// DefaultTransport is overwritten with some different implementation +// of http.RoundTripper or it's modified by other package. +var ourTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +type client struct { + name string + cfg otlpconfig.SignalConfig + generalCfg otlpconfig.Config + requestFunc retry.RequestFunc + client *http.Client + stopCh chan struct{} + stopOnce sync.Once +} + +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new HTTP trace client. +func NewClient(opts ...Option) otlptrace.Client { + cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) + + httpClient := &http.Client{ + Transport: ourTransport, + Timeout: cfg.Traces.Timeout, + } + + if cfg.Traces.TLSCfg != nil || cfg.Traces.Proxy != nil { + clonedTransport := ourTransport.Clone() + httpClient.Transport = clonedTransport + + if cfg.Traces.TLSCfg != nil { + clonedTransport.TLSClientConfig = cfg.Traces.TLSCfg + } + if cfg.Traces.Proxy != nil { + clonedTransport.Proxy = cfg.Traces.Proxy + } + } + + stopCh := make(chan struct{}) + return &client{ + name: "traces", + cfg: cfg.Traces, + generalCfg: cfg, + requestFunc: cfg.RetryConfig.RequestFunc(evaluate), + stopCh: stopCh, + client: httpClient, + } +} + +// Start does nothing in a HTTP client. +func (d *client) Start(ctx context.Context) error { + // nothing to do + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// Stop shuts down the client and interrupt any in-flight request. +func (d *client) Stop(ctx context.Context) error { + d.stopOnce.Do(func() { + close(d.stopCh) + }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// UploadTraces sends a batch of spans to the collector. +func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + pbRequest := &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + } + rawRequest, err := proto.Marshal(pbRequest) + if err != nil { + return err + } + + ctx, cancel := d.contextWithStop(ctx) + defer cancel() + + request, err := d.newRequest(rawRequest) + if err != nil { + return err + } + + return d.requestFunc(ctx, func(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + request.reset(ctx) + resp, err := d.client.Do(request.Request) + var urlErr *url.Error + if errors.As(err, &urlErr) && urlErr.Temporary() { + return newResponseError(http.Header{}) + } + if err != nil { + return err + } + + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } + + switch sc := resp.StatusCode; { + case sc >= 200 && sc <= 299: + // Success, do not retry. + // Read the partial success message, if any. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + if respData.Len() == 0 { + return nil + } + + if resp.Header.Get("Content-Type") == "application/x-protobuf" { + var respProto coltracepb.ExportTraceServiceResponse + if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + + if respProto.PartialSuccess != nil { + msg := respProto.PartialSuccess.GetErrorMessage() + n := respProto.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + } + return nil + + case sc == http.StatusTooManyRequests, + sc == http.StatusBadGateway, + sc == http.StatusServiceUnavailable, + sc == http.StatusGatewayTimeout: + // Retry-able failures. Drain the body to reuse the connection. + if _, err := io.Copy(io.Discard, resp.Body); err != nil { + otel.Handle(err) + } + return newResponseError(resp.Header) + default: + return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) + } + }) +} + +func (d *client) newRequest(body []byte) (request, error) { + u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} + r, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return request{Request: r}, err + } + + userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + r.Header.Set("User-Agent", userAgent) + + for k, v := range d.cfg.Headers { + r.Header.Set(k, v) + } + r.Header.Set("Content-Type", contentTypeProto) + + req := request{Request: r} + switch Compression(d.cfg.Compression) { + case NoCompression: + r.ContentLength = (int64)(len(body)) + req.bodyReader = bodyReader(body) + case GzipCompression: + // Ensure the content length is not used. + r.ContentLength = -1 + r.Header.Set("Content-Encoding", "gzip") + + gz := gzPool.Get().(*gzip.Writer) + defer gzPool.Put(gz) + + var b bytes.Buffer + gz.Reset(&b) + + if _, err := gz.Write(body); err != nil { + return req, err + } + // Close needs to be called to ensure body is fully written. + if err := gz.Close(); err != nil { + return req, err + } + + req.bodyReader = bodyReader(b.Bytes()) + } + + return req, nil +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (d *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + Insecure bool + }{ + Type: "otlphttphttp", + Endpoint: d.cfg.Endpoint, + Insecure: d.cfg.Insecure, + } +} + +// bodyReader returns a closure returning a new reader for buf. +func bodyReader(buf []byte) func() io.ReadCloser { + return func() io.ReadCloser { + return io.NopCloser(bytes.NewReader(buf)) + } +} + +// request wraps an http.Request with a resettable body reader. +type request struct { + *http.Request + + // bodyReader allows the same body to be used for multiple requests. + bodyReader func() io.ReadCloser +} + +// reset reinitializes the request Body and uses ctx for the request. +func (r *request) reset(ctx context.Context) { + r.Body = r.bodyReader() + r.Request = r.Request.WithContext(ctx) +} + +// retryableError represents a request failure that can be retried. +type retryableError struct { + throttle int64 +} + +// newResponseError returns a retryableError and will extract any explicit +// throttle delay contained in headers. +func newResponseError(header http.Header) error { + var rErr retryableError + if s, ok := header["Retry-After"]; ok { + if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { + rErr.throttle = t + } + } + return rErr +} + +func (e retryableError) Error() string { + return "retry-able request failure" +} + +// evaluate returns if err is retry-able. If it is and it includes an explicit +// throttling delay, that delay is also returned. +func evaluate(err error) (bool, time.Duration) { + if err == nil { + return false, 0 + } + + rErr, ok := err.(retryableError) + if !ok { + return false, 0 + } + + return true, time.Duration(rErr.throttle) +} + +func (d *client) getScheme() string { + if d.cfg.Insecure { + return "http" + } + return "https" +} + +func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { + // Unify the parent context Done signal with the client's stop + // channel. + ctx, cancel := context.WithCancel(ctx) + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + // Nothing to do, either cancelled or deadline + // happened. + case <-d.stopCh: + cancel() + } + }(ctx, cancel) + return ctx, cancel +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go new file mode 100644 index 000000000..43534cbfb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads. +By default the telemetry is sent to https://localhost:4318/v1/traces. + +Exporter should be created using [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") - +target base URL ("/v1/traces" is appended) to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +environment variable and by [WithEndpoint], [WithEndpointURL], [WithInsecure] options. + +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") - +target URL to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - +key-value pairs used as headers associated with HTTP requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - +the compression strategy the exporter uses to compress the HTTP body. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompression] option. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSClientConfig] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go new file mode 100644 index 000000000..fae89ea4f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go new file mode 100644 index 000000000..26a316d00 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go @@ -0,0 +1,191 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + name, err := url.PathUnescape(n) + if err != nil { + global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go new file mode 100644 index 000000000..e4142b9d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry\"}" --out=otlpconfig/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go new file mode 100644 index 000000000..ff4141b6d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go @@ -0,0 +1,142 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) + + return opts +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go new file mode 100644 index 000000000..45fefc4dd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -0,0 +1,348 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +const ( + // DefaultTracesPath is a default URL path for endpoint that + // receives spans. + DefaultTracesPath string = "/v1/traces" + // DefaultTimeout is a default max waiting time for the backend to process + // each span batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Traces SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Endpoint = endpoint + return cfg + }) +} + +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlptrace: parse endpoint url", "url", v) + return cfg + } + + cfg.Traces.Endpoint = u.Host + cfg.Traces.URLPath = u.Path + if u.Scheme != "https" { + cfg.Traces.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Traces.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Timeout = duration + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go new file mode 100644 index 000000000..bc4db0595 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go @@ -0,0 +1,40 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector. +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go new file mode 100644 index 000000000..dd6f12b22 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go @@ -0,0 +1,26 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go new file mode 100644 index 000000000..9e04a9bc1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go new file mode 100644 index 000000000..e3e647783 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go new file mode 100644 index 000000000..4b2758e4e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -0,0 +1,148 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression otlpconfig.Compression + +// HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. +// This type is compatible with http.Transport.Proxy and can be used to set a custom proxy function +// to the OTLP HTTP client. +type HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression = Compression(otlpconfig.NoCompression) + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression = Compression(otlpconfig.GzipCompression) +) + +// Option applies an option to the HTTP client. +type Option interface { + applyHTTPOption(otlpconfig.Config) otlpconfig.Config +} + +func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { + converted := make([]otlpconfig.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + +// RetryConfig defines configuration for retrying batches in case of export +// failure using an exponential backoff. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.HTTPOption +} + +func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyHTTPOption(cfg) +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{otlpconfig.WithEndpointURL(u)} +} + +// WithCompression tells the driver to compress the sent data. +func WithCompression(compression Compression) Option { + return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))} +} + +// WithURLPath allows one to override the default URL path used +// for sending traces. If unset, default ("/v1/traces") will be used. +func WithURLPath(urlPath string) Option { + return wrappedOption{otlpconfig.WithURLPath(urlPath)} +} + +// WithTLSClientConfig can be used to set up a custom TLS +// configuration for the client used to send payloads to the +// collector. Use it if you want to use a custom certificate. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)} +} + +// WithInsecure tells the driver to connect to the collector using the +// HTTP scheme, instead of HTTPS. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithHeaders allows one to tell the driver to send additional HTTP +// headers with the payloads. Specifying headers like Content-Length, +// Content-Encoding and Content-Type may result in a broken driver. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTimeout tells the driver the max waiting time for the backend to process +// each spans batch. If unset, the default will be 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry configures the retry policy for transient errors that may occurs +// when exporting traces. An exponential back-off algorithm is used to ensure +// endpoints are not overwhelmed with retries. If unset, the default retry +// policy will retry after 5 seconds and increase exponentially after each +// error for a total of 1 minute. +func WithRetry(rc RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))} +} + +// WithProxy sets the Proxy function the client will use to determine the +// proxy to use for an HTTP request. If this option is not used, the client +// will use [http.ProxyFromEnvironment]. +func WithProxy(pf HTTPTransportProxyFunc) Option { + return wrappedOption{otlpconfig.WithProxy(otlpconfig.HTTPTransportProxyFunc(pf))} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go new file mode 100644 index 000000000..14ad8c33b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. +func Version() string { + return "1.28.0" +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 000000000..93e80ea30 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 000000000..07623b679 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 000000000..822d84794 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 000000000..b4f85f44a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 000000000..3aea9c491 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 000000000..4259f0320 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 000000000..c657ff8e7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 000000000..3a0cc42f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 000000000..adbca7d34 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) + + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() + +// SetLogger sets the global Logger to l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +// GetLogger returns the global logger. +func GetLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + GetLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + GetLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + GetLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + GetLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 000000000..e3db438a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,549 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "reflect" + "sync" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments map[instID]delegatedInstrument + + registry list.List + + delegate metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + m.mtx.Lock() + defer m.mtx.Unlock() + + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { + r := e.Value.(*registration) + r.setDelegate(meter) + n = e.Next() + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } + i := &siCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } + i := &siUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } + i := &siHistogram{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } + i := &aiCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } + i := &aiUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } + i := &aiGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } + i := &sfCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } + i := &sfUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } + i := &sfHistogram{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } + i := &afCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } + i := &afUpDownCounter{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } + i := &afGauge{name: name, opts: options} + m.instruments[id] = i + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + m.mtx.Lock() + defer m.mtx.Unlock() + + if m.delegate != nil { + insts = unwrapInstruments(insts) + return m.delegate.RegisterCallback(f, insts...) + } + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 000000000..38560ff99 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 000000000..204ea142a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + errorHandlerHolder struct { + eh ErrorHandler + } + + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalErrorHandler = defaultErrorHandler() + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateErrorHandlerOnce sync.Once + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to its current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 000000000..e31f442b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct{ name, version, schema string } + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 000000000..b2fe3e41d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func RawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 000000000..6de7f2e4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 000000000..1e6473b32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 000000000..0cf902e01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 000000000..f8435d8f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record float64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 000000000..e079aaef1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 000000000..d9e3b13e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 000000000..f153745b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 000000000..1f6e0efa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 000000000..1a9dc6809 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 000000000..a535782e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64GaugeOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64GaugeOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributeSet(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 000000000..14e08c24a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 000000000..bb8969435 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 000000000..ca6fcbdc0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 000000000..8403a4bad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record float64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record float64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 000000000..783fdfba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous histogram instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 000000000..2fd949733 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 000000000..e2959ac74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 000000000..552263ba7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 000000000..33a3baf15 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 000000000..8c8286aab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 000000000..6870e316d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject injects the trace context from ctx into carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 000000000..0a29a2f13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,32 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 000000000..ab09daf9d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 000000000..f81b1576a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 000000000..06e6d8685 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 000000000..a4faa6a03 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 000000000..f2cdf3c65 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// +// Deprecated: use [Scope] instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 000000000..728115045 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 000000000..07923ed8d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // BatchSpanProcessorMaxQueueSize. + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value := os.Getenv(key) + if value == "" { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 000000000..fab61647c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 000000000..68d296cbe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md new file mode 100644 index 000000000..017f072a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md @@ -0,0 +1,3 @@ +# Metric SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go new file mode 100644 index 000000000..e6f5cfb2a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "fmt" + "slices" +) + +// errAgg is wrapped by misconfigured aggregations. +var errAgg = errors.New("aggregation") + +// Aggregation is the aggregation used to summarize recorded measurements. +type Aggregation interface { + // copy returns a deep copy of the Aggregation. + copy() Aggregation + + // err returns an error for any misconfigured Aggregation. + err() error +} + +// AggregationDrop is an Aggregation that drops all recorded data. +type AggregationDrop struct{} // AggregationDrop has no parameters. + +var _ Aggregation = AggregationDrop{} + +// copy returns a deep copy of d. +func (d AggregationDrop) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A drop aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDrop) err() error { return nil } + +// AggregationDefault is an Aggregation that uses the default instrument kind selection +// mapping to select another Aggregation. A metric reader can be configured to +// make an aggregation selection based on instrument kind that differs from +// the default. This Aggregation ensures the default is used. +// +// See the [DefaultAggregationSelector] for information about the default +// instrument kind selection mapping. +type AggregationDefault struct{} // AggregationDefault has no parameters. + +var _ Aggregation = AggregationDefault{} + +// copy returns a deep copy of d. +func (d AggregationDefault) copy() Aggregation { return d } + +// err returns an error for any misconfiguration. A default aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationDefault) err() error { return nil } + +// AggregationSum is an Aggregation that summarizes a set of measurements as their +// arithmetic sum. +type AggregationSum struct{} // AggregationSum has no parameters. + +var _ Aggregation = AggregationSum{} + +// copy returns a deep copy of s. +func (s AggregationSum) copy() Aggregation { return s } + +// err returns an error for any misconfiguration. A sum aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (AggregationSum) err() error { return nil } + +// AggregationLastValue is an Aggregation that summarizes a set of measurements as the +// last one made. +type AggregationLastValue struct{} // AggregationLastValue has no parameters. + +var _ Aggregation = AggregationLastValue{} + +// copy returns a deep copy of l. +func (l AggregationLastValue) copy() Aggregation { return l } + +// err returns an error for any misconfiguration. A last-value aggregation has +// no parameters and cannot be misconfigured, therefore this always returns +// nil. +func (AggregationLastValue) err() error { return nil } + +// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with explicitly defined buckets. +type AggregationExplicitBucketHistogram struct { + // Boundaries are the increasing bucket boundary values. Boundary values + // define bucket upper bounds. Buckets are exclusive of their lower + // boundary and inclusive of their upper bound (except at positive + // infinity). A measurement is defined to fall into the greatest-numbered + // bucket with a boundary that is greater than or equal to the + // measurement. As an example, boundaries defined as: + // + // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} + // + // Will define these buckets: + // + // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], + // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], + // (500.0, 1000.0], (1000.0, +∞) + Boundaries []float64 + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationExplicitBucketHistogram{} + +// errHist is returned by misconfigured ExplicitBucketHistograms. +var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) + +// err returns an error for any misconfiguration. +func (h AggregationExplicitBucketHistogram) err() error { + if len(h.Boundaries) <= 1 { + return nil + } + + // Check boundaries are monotonic. + i := h.Boundaries[0] + for _, j := range h.Boundaries[1:] { + if i >= j { + return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) + } + i = j + } + + return nil +} + +// copy returns a deep copy of h. +func (h AggregationExplicitBucketHistogram) copy() Aggregation { + return AggregationExplicitBucketHistogram{ + Boundaries: slices.Clone(h.Boundaries), + NoMinMax: h.NoMinMax, + } +} + +// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of +// measurements as an histogram with bucket widths that grow exponentially. +type AggregationBase2ExponentialHistogram struct { + // MaxSize is the maximum number of buckets to use for the histogram. + MaxSize int32 + // MaxScale is the maximum resolution scale to use for the histogram. + // + // MaxScale has a maximum value of 20. Using a value of 20 means the + // maximum number of buckets that can fit within the range of a + // signed 32-bit integer index could be used. + // + // MaxScale has a minimum value of -10. Using a value of -10 means only + // two buckets will be used. + MaxScale int32 + + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extrema are recorded. + // + // Recording these extrema for cumulative data is expected to have little + // value, they will represent the entire life of the instrument instead of + // just the current collection cycle. It is recommended to set this to true + // for that type of data to avoid computing the low-value extrema. + NoMinMax bool +} + +var _ Aggregation = AggregationBase2ExponentialHistogram{} + +// copy returns a deep copy of the Aggregation. +func (e AggregationBase2ExponentialHistogram) copy() Aggregation { + return e +} + +const ( + expoMaxScale = 20 + expoMinScale = -10 +) + +// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms. +var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg) + +// err returns an error for any misconfigured Aggregation. +func (e AggregationBase2ExponentialHistogram) err() error { + if e.MaxScale > expoMaxScale { + return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale) + } + if e.MaxSize <= 0 { + return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go new file mode 100644 index 000000000..63b88f086 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go @@ -0,0 +1,83 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "sync" +) + +// cache is a locking storage used to quickly return already computed values. +// +// The zero value of a cache is empty and ready to use. +// +// A cache must not be copied after first use. +// +// All methods of a cache are safe to call concurrently. +type cache[K comparable, V any] struct { + sync.Mutex + data map[K]V +} + +// Lookup returns the value stored in the cache with the associated key if it +// exists. Otherwise, f is called and its returned value is set in the cache +// for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cache lock, so f +// should not block excessively. +func (c *cache[K, V]) Lookup(key K, f func() V) V { + c.Lock() + defer c.Unlock() + + if c.data == nil { + val := f() + c.data = map[K]V{key: val} + return val + } + if v, ok := c.data[key]; ok { + return v + } + val := f() + c.data[key] = val + return val +} + +// HasKey returns true if Lookup has previously been called with that key +// +// HasKey is safe to call concurrently. +func (c *cache[K, V]) HasKey(key K) bool { + c.Lock() + defer c.Unlock() + _, ok := c.data[key] + return ok +} + +// cacheWithErr is a locking storage used to quickly return already computed values and an error. +// +// The zero value of a cacheWithErr is empty and ready to use. +// +// A cacheWithErr must not be copied after first use. +// +// All methods of a cacheWithErr are safe to call concurrently. +type cacheWithErr[K comparable, V any] struct { + cache[K, valAndErr[V]] +} + +type valAndErr[V any] struct { + val V + err error +} + +// Lookup returns the value stored in the cacheWithErr with the associated key +// if it exists. Otherwise, f is called and its returned value is set in the +// cacheWithErr for key and returned. +// +// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f +// should not block excessively. +func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) { + combined := c.cache.Lookup(key, func() valAndErr[V] { + val, err := f() + return valAndErr[V]{val: val, err: err} + }) + return combined.val, combined.err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go new file mode 100644 index 000000000..bbe7bf671 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/sdk/resource" +) + +// config contains configuration options for a MeterProvider. +type config struct { + res *resource.Resource + readers []Reader + views []View +} + +// readerSignals returns a force-flush and shutdown function for a +// MeterProvider to call in their respective options. All Readers c contains +// will have their force-flush and shutdown methods unified into returned +// single functions. +func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { + var fFuncs, sFuncs []func(context.Context) error + for _, r := range c.readers { + sFuncs = append(sFuncs, r.Shutdown) + if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok { + fFuncs = append(fFuncs, f.ForceFlush) + } + } + + return unify(fFuncs), unifyShutdown(sFuncs) +} + +// unify unifies calling all of funcs into a single function call. All errors +// returned from calls to funcs will be unify into a single error return +// value. +func unify(funcs []func(context.Context) error) func(context.Context) error { + return func(ctx context.Context) error { + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + return unifyErrors(errs) + } +} + +// unifyErrors combines multiple errors into a single error. +func unifyErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } +} + +// unifyShutdown unifies calling all of funcs once for a shutdown. If called +// more than once, an ErrReaderShutdown error is returned. +func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { + f := unify(funcs) + var once sync.Once + return func(ctx context.Context) error { + err := ErrReaderShutdown + once.Do(func() { err = f(ctx) }) + return err + } +} + +// newConfig returns a config configured with options. +func newConfig(options []Option) config { + conf := config{res: resource.Default()} + for _, o := range options { + conf = o.apply(conf) + } + return conf +} + +// Option applies a configuration option value to a MeterProvider. +type Option interface { + apply(config) config +} + +// optionFunc applies a set of options to a config. +type optionFunc func(config) config + +// apply returns a config with option(s) applied. +func (o optionFunc) apply(conf config) config { + return o(conf) +} + +// WithResource associates a Resource with a MeterProvider. This Resource +// represents the entity producing telemetry and is associated with all Meters +// the MeterProvider will create. +// +// By default, if this Option is not used, the default Resource from the +// go.opentelemetry.io/otel/sdk/resource package will be used. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(conf config) config { + conf.res = res + return conf + }) +} + +// WithReader associates Reader r with a MeterProvider. +// +// By default, if this option is not used, the MeterProvider will perform no +// operations; no data will be exported without a Reader. +func WithReader(r Reader) Option { + return optionFunc(func(cfg config) config { + if r == nil { + return cfg + } + cfg.readers = append(cfg.readers, r) + return cfg + }) +} + +// WithView associates views with a MeterProvider. +// +// Views are appended to existing ones in a MeterProvider if this option is +// used multiple times. +// +// By default, if this option is not used, the MeterProvider will use the +// default view. +func WithView(views ...View) Option { + return optionFunc(func(cfg config) config { + cfg.views = append(cfg.views, views...) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go new file mode 100644 index 000000000..90a4ae16c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package metric provides an implementation of the OpenTelemetry metrics SDK. +// +// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information +// about the concept of OpenTelemetry metrics and +// https://opentelemetry.io/docs/concepts/components/ for more information +// about OpenTelemetry SDKs. +// +// The entry point for the metric package is the MeterProvider. It is the +// object that all API calls use to create Meters, instruments, and ultimately +// make metric measurements. Also, it is an object that should be used to +// control the life-cycle (start, flush, and shutdown) of the SDK. +// +// A MeterProvider needs to be configured to export the measured data, this is +// done by configuring it with a Reader implementation (using the WithReader +// MeterProviderOption). Readers take two forms: ones that push to an endpoint +// (NewPeriodicReader), and ones that an endpoint pulls from. See +// [go.opentelemetry.io/otel/exporters] for exporters that can be used as +// or with these Readers. +// +// Each Reader, when registered with the MeterProvider, can be augmented with a +// View. Views allow users that run OpenTelemetry instrumented code to modify +// the generated data of that instrumentation. +// +// The data generated by a MeterProvider needs to include information about its +// origin. A MeterProvider needs to be configured with a Resource, using the +// WithResource MeterProviderOption, to include this information. This Resource +// should be used to describe the unique runtime environment instrumented code +// is being run on. That way when multiple instances of the code are collected +// at a single endpoint their origin is decipherable. +// +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// +// See [go.opentelemetry.io/otel/metric] for more information about +// the metric API. +// +// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about +// the experimental features. +package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go new file mode 100644 index 000000000..a6c403797 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "strconv" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // The time interval (in milliseconds) between the start of two export attempts. + envInterval = "OTEL_METRIC_EXPORT_INTERVAL" + // Maximum allowed time (in milliseconds) to export data. + envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT" +) + +// envDuration returns an environment variable's value as duration in milliseconds if it is exists, +// or the defaultValue if the environment variable is not defined or the value is not valid. +func envDuration(key string, defaultValue time.Duration) time.Duration { + v := os.Getenv(key) + if v == "" { + return defaultValue + } + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "environment variable", key, "value", v) + return defaultValue + } + if d <= 0 { + global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v) + return defaultValue + } + return time.Duration(d) * time.Millisecond +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go new file mode 100644 index 000000000..82619da78 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "os" + "runtime" + "slices" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/x" +) + +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and user defined +// environment variables. +// +// Note: This will only return non-nil values when the experimental exemplar +// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable +// is not set to always_off. +func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { + if !x.Exemplars.Enabled() { + return nil + } + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + var filter exemplar.Filter + + switch os.Getenv(filterEnvKey) { + case "always_on": + filter = exemplar.AlwaysOnFilter + case "always_off": + return exemplar.Drop + case "trace_based": + fallthrough + default: + filter = exemplar.SampledFilter + } + + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults + // Explicit bucket histogram aggregation with more than 1 bucket will + // use AlignedHistogramBucketExemplarReservoir. + a, ok := agg.(AggregationExplicitBucketHistogram) + if ok && len(a.Boundaries) > 0 { + cp := slices.Clone(a.Boundaries) + return func() exemplar.FilteredReservoir[N] { + bounds := cp + return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) + } + } + + var n int + if a, ok := agg.(AggregationBase2ExponentialHistogram); ok { + // Base2 Exponential Histogram Aggregation SHOULD use a + // SimpleFixedSizeExemplarReservoir with a reservoir equal to the + // smaller of the maximum number of buckets configured on the + // aggregation or twenty (e.g. min(20, max_buckets)). + n = int(a.MaxSize) + if n > 20 { + n = 20 + } + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir + // This Exemplar reservoir MAY take a configuration parameter for + // the size of the reservoir. If no size configuration is + // provided, the default size MAY be the number of possible + // concurrent threads (e.g. number of CPUs) to help reduce + // contention. Otherwise, a default size of 1 SHOULD be used. + n = runtime.NumCPU() + if n < 1 { + // Should never be the case, but be defensive. + n = 1 + } + } + + return func() exemplar.FilteredReservoir[N] { + return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go new file mode 100644 index 000000000..1a3cccb67 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ErrExporterShutdown is returned if Export or Shutdown are called after an +// Exporter has been Shutdown. +var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") + +// Exporter handles the delivery of metric data to external receivers. This is +// the final component in the metric push pipeline. +type Exporter interface { + // Temporality returns the Temporality to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Temporality(InstrumentKind) metricdata.Temporality + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Aggregation returns the Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Exporter methods. + Aggregation(InstrumentKind) Aggregation + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Export serializes and transmits metric data to a receiver. + // + // This is called synchronously, there is no concurrency safety + // requirement. Because of this, it is critical that all timeouts and + // cancellations of the passed context be honored. + // + // All retry logic must be contained in this function. The SDK does not + // implement any retry logic. All errors returned by this function are + // considered unrecoverable and will be reported to a configured error + // Handler. + // + // The passed ResourceMetrics may be reused when the call completes. If an + // exporter needs to hold this data after it returns, it needs to make a + // copy. + Export(context.Context, *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush flushes any metric data held by an exporter. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // This method needs to be concurrent safe. + ForceFlush(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric data held by an exporter and releases any + // held computational resources. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // After Shutdown is called, calls to Export will perform no operation and + // instead will return an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go new file mode 100644 index 000000000..b52a330b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -0,0 +1,347 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +var zeroScope instrumentation.Scope + +// InstrumentKind is the identifier of a group of instruments that all +// performing the same function. +type InstrumentKind uint8 + +const ( + // instrumentKindUndefined is an undefined instrument kind, it should not + // be used by any initialized type. + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused + // InstrumentKindCounter identifies a group of instruments that record + // increasing values synchronously with the code path they are measuring. + InstrumentKindCounter InstrumentKind = 1 + // InstrumentKindUpDownCounter identifies a group of instruments that + // record increasing and decreasing values synchronously with the code path + // they are measuring. + InstrumentKindUpDownCounter InstrumentKind = 2 + // InstrumentKindHistogram identifies a group of instruments that record a + // distribution of values synchronously with the code path they are + // measuring. + InstrumentKindHistogram InstrumentKind = 3 + // InstrumentKindObservableCounter identifies a group of instruments that + // record increasing values in an asynchronous callback. + InstrumentKindObservableCounter InstrumentKind = 4 + // InstrumentKindObservableUpDownCounter identifies a group of instruments + // that record increasing and decreasing values in an asynchronous + // callback. + InstrumentKindObservableUpDownCounter InstrumentKind = 5 + // InstrumentKindObservableGauge identifies a group of instruments that + // record current values in an asynchronous callback. + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 +) + +type nonComparable [0]func() // nolint: unused // This is indeed used. + +// Instrument describes properties an instrument is created with. +type Instrument struct { + // Name is the human-readable identifier of the instrument. + Name string + // Description describes the purpose of the instrument. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of measurement recorded by the instrument. + Unit string + // Scope identifies the instrumentation that created the instrument. + Scope instrumentation.Scope + + // Ensure forward compatibility if non-comparable fields need to be added. + nonComparable // nolint: unused +} + +// IsEmpty returns if all Instrument fields are their zero-value. +func (i Instrument) IsEmpty() bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +// matches returns whether all the non-zero-value fields of i match the +// corresponding fields of other. If i is empty it will match all other, and +// true will always be returned. +func (i Instrument) matches(other Instrument) bool { + return i.matchesName(other) && + i.matchesDescription(other) && + i.matchesKind(other) && + i.matchesUnit(other) && + i.matchesScope(other) +} + +// matchesName returns true if the Name of i is "" or it equals the Name of +// other, otherwise false. +func (i Instrument) matchesName(other Instrument) bool { + return i.Name == "" || i.Name == other.Name +} + +// matchesDescription returns true if the Description of i is "" or it equals +// the Description of other, otherwise false. +func (i Instrument) matchesDescription(other Instrument) bool { + return i.Description == "" || i.Description == other.Description +} + +// matchesKind returns true if the Kind of i is its zero-value or it equals the +// Kind of other, otherwise false. +func (i Instrument) matchesKind(other Instrument) bool { + return i.Kind == instrumentKindUndefined || i.Kind == other.Kind +} + +// matchesUnit returns true if the Unit of i is its zero-value or it equals the +// Unit of other, otherwise false. +func (i Instrument) matchesUnit(other Instrument) bool { + return i.Unit == "" || i.Unit == other.Unit +} + +// matchesScope returns true if the Scope of i is its zero-value or it equals +// the Scope of other, otherwise false. +func (i Instrument) matchesScope(other Instrument) bool { + return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) && + (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) && + (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL) +} + +// Stream describes the stream of data an instrument produces. +type Stream struct { + // Name is the human-readable identifier of the stream. + Name string + // Description describes the purpose of the data. + Description string + // Unit is the unit of measurement recorded. + Unit string + // Aggregation the stream uses for an instrument. + Aggregation Aggregation + // AttributeFilter is an attribute Filter applied to the attributes + // recorded for an instrument's measurement. If the filter returns false + // the attribute will not be recorded, otherwise, if it returns true, it + // will record the attribute. + // + // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to + // provide an allow-list of attribute keys here. + AttributeFilter attribute.Filter +} + +// instID are the identifying properties of a instrument. +type instID struct { + // Name is the name of the stream. + Name string + // Description is the description of the stream. + Description string + // Kind defines the functional group of the instrument. + Kind InstrumentKind + // Unit is the unit of the stream. + Unit string + // Number is the number type of the stream. + Number string +} + +// Returns a normalized copy of the instID i. +// +// Instrument names are considered case-insensitive. Standardize the instrument +// name to always be lowercase for the returned instID so it can be compared +// without the name casing affecting the comparison. +func (i instID) normalize() instID { + i.Name = strings.ToLower(i.Name) + return i +} + +type int64Inst struct { + measures []aggregate.Measure[int64] + + embedded.Int64Counter + embedded.Int64UpDownCounter + embedded.Int64Histogram + embedded.Int64Gauge +} + +var ( + _ metric.Int64Counter = (*int64Inst)(nil) + _ metric.Int64UpDownCounter = (*int64Inst)(nil) + _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) +) + +func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. + for _, in := range i.measures { + in(ctx, val, s) + } +} + +type float64Inst struct { + measures []aggregate.Measure[float64] + + embedded.Float64Counter + embedded.Float64UpDownCounter + embedded.Float64Histogram + embedded.Float64Gauge +} + +var ( + _ metric.Float64Counter = (*float64Inst)(nil) + _ metric.Float64UpDownCounter = (*float64Inst)(nil) + _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) +) + +func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { + c := metric.NewAddConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) { + c := metric.NewRecordConfig(opts) + i.aggregate(ctx, val, c.Attributes()) +} + +func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { + for _, in := range i.measures { + in(ctx, val, s) + } +} + +// observablID is a comparable unique identifier of an observable. +type observablID[N int64 | float64] struct { + name string + description string + kind InstrumentKind + unit string + scope instrumentation.Scope +} + +type float64Observable struct { + metric.Float64Observable + *observable[float64] + + embedded.Float64ObservableCounter + embedded.Float64ObservableUpDownCounter + embedded.Float64ObservableGauge +} + +var ( + _ metric.Float64ObservableCounter = float64Observable{} + _ metric.Float64ObservableUpDownCounter = float64Observable{} + _ metric.Float64ObservableGauge = float64Observable{} +) + +func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable { + return float64Observable{ + observable: newObservable[float64](m, kind, name, desc, u), + } +} + +type int64Observable struct { + metric.Int64Observable + *observable[int64] + + embedded.Int64ObservableCounter + embedded.Int64ObservableUpDownCounter + embedded.Int64ObservableGauge +} + +var ( + _ metric.Int64ObservableCounter = int64Observable{} + _ metric.Int64ObservableUpDownCounter = int64Observable{} + _ metric.Int64ObservableGauge = int64Observable{} +) + +func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable { + return int64Observable{ + observable: newObservable[int64](m, kind, name, desc, u), + } +} + +type observable[N int64 | float64] struct { + metric.Observable + observablID[N] + + meter *meter + measures measures[N] + dropAggregation bool +} + +func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { + return &observable[N]{ + observablID: observablID[N]{ + name: name, + description: desc, + kind: kind, + unit: u, + scope: m.scope, + }, + meter: m, + } +} + +// observe records the val for the set of attrs. +func (o *observable[N]) observe(val N, s attribute.Set) { + o.measures.observe(val, s) +} + +func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) { + o.measures = append(o.measures, meas...) +} + +type measures[N int64 | float64] []aggregate.Measure[N] + +// observe records the val for the set of attrs. +func (m measures[N]) observe(val N, s attribute.Set) { + for _, in := range m { + in(context.Background(), val, s) + } +} + +var errEmptyAgg = errors.New("no aggregators for observable instrument") + +// registerable returns an error if the observable o should not be registered, +// and nil if it should. An errEmptyAgg error is returned if o is effectively a +// no-op because it does not have any aggregators. Also, an error is returned +// if scope defines a Meter other than the one o was created by. +func (o *observable[N]) registerable(m *meter) error { + if len(o.measures) == 0 { + return errEmptyAgg + } + if m != o.meter { + return fmt.Errorf( + "invalid registration: observable %q from Meter %q, registered with Meter %q", + o.name, + o.scope.Name, + m.scope.Name, + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go new file mode 100644 index 000000000..25ea6244e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT. + +package metric + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[instrumentKindUndefined-0] + _ = x[InstrumentKindCounter-1] + _ = x[InstrumentKindUpDownCounter-2] + _ = x[InstrumentKindHistogram-3] + _ = x[InstrumentKindObservableCounter-4] + _ = x[InstrumentKindObservableUpDownCounter-5] + _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] +} + +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" + +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} + +func (i InstrumentKind) String() string { + if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go new file mode 100644 index 000000000..b18ee719b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// now is used to return the current local time while allowing tests to +// override the default time.Now function. +var now = time.Now + +// Measure receives measurements to be aggregated. +type Measure[N int64 | float64] func(context.Context, N, attribute.Set) + +// ComputeAggregation stores the aggregate of measurements into dest and +// returns the number of aggregate data-points output. +type ComputeAggregation func(dest *metricdata.Aggregation) int + +// Builder builds an aggregate function. +type Builder[N int64 | float64] struct { + // Temporality is the temporality used for the returned aggregate function. + // + // If this is not provided a default of cumulative will be used (except for + // the last-value aggregate function where delta is the only appropriate + // temporality). + Temporality metricdata.Temporality + // Filter is the attribute filter the aggregate function will use on the + // input of measurements. + Filter attribute.Filter + // ReservoirFunc is the factory function used by aggregate functions to + // create new exemplar reservoirs for a new seen attribute set. + // + // If this is not provided a default factory function that returns an + // exemplar.Drop reservoir will be used. + ReservoirFunc func() exemplar.FilteredReservoir[N] + // AggregationLimit is the cardinality limit of measurement attributes. Any + // measurement for new attributes once the limit has been reached will be + // aggregated into a single aggregate for the "otel.metric.overflow" + // attribute. + // + // If AggregationLimit is less than or equal to zero there will not be an + // aggregation limit imposed (i.e. unlimited attribute sets). + AggregationLimit int +} + +func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { + if b.ReservoirFunc != nil { + return b.ReservoirFunc + } + + return exemplar.Drop +} + +type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) + +func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { + if b.Filter != nil { + fltr := b.Filter // Copy to make it immutable after assignment. + return func(ctx context.Context, n N, a attribute.Set) { + fAttr, dropped := a.Filter(fltr) + f(ctx, n, fAttr, dropped) + } + } + return func(ctx context.Context, n N, a attribute.Set) { + f(ctx, n, a, nil) + } +} + +// LastValue returns a last-value aggregate function input and output. +func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { + lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} + +// PrecomputedSum returns a sum aggregate function input and output. The +// arguments passed to the input are expected to be the precomputed sum values. +func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// Sum returns a sum aggregate function input and output. +func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { + s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(s.measure), s.delta + default: + return b.filter(s.measure), s.cumulative + } +} + +// ExplicitBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// ExponentialBucketHistogram returns a histogram aggregate function input and +// output. +func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { + h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(h.measure), h.delta + default: + return b.filter(h.measure), h.cumulative + } +} + +// reset ensures s has capacity and sets it length. If the capacity of s too +// small, a new slice is returned with the specified capacity and length. +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go new file mode 100644 index 000000000..7b7225e6e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package aggregate provides aggregate types used compute aggregations and +// cycle the state of metric measurements made by the SDK. These types and +// functionality are meant only for internal SDK use. +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 000000000..170ae8e58 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go new file mode 100644 index 000000000..707342408 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -0,0 +1,444 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +const ( + expoMaxScale = 20 + expoMinScale = -10 + + smallestNonZeroNormalFloat64 = 0x1p-1022 + + // These redefine the Math constants with a type, so the compiler won't coerce + // them into an int on 32 bit platforms. + maxInt64 int64 = math.MaxInt64 + minInt64 int64 = math.MinInt64 +) + +// expoHistogramDataPoint is a single data point in an exponential histogram. +type expoHistogramDataPoint[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + count uint64 + min N + max N + sum N + + maxSize int + noMinMax bool + noSum bool + + scale int32 + + posBuckets expoBuckets + negBuckets expoBuckets + zeroCount uint64 +} + +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { + f := math.MaxFloat64 + max := N(f) // if N is int64, max will overflow to -9223372036854775808 + min := N(-f) + if N(maxInt64) > N(f) { + max = N(maxInt64) + min = N(minInt64) + } + return &expoHistogramDataPoint[N]{ + attrs: attrs, + min: max, + max: min, + maxSize: maxSize, + noMinMax: noMinMax, + noSum: noSum, + scale: maxScale, + } +} + +// record adds a new measurement to the histogram. It will rescale the buckets if needed. +func (p *expoHistogramDataPoint[N]) record(v N) { + p.count++ + + if !p.noMinMax { + if v < p.min { + p.min = v + } + if v > p.max { + p.max = v + } + } + if !p.noSum { + p.sum += v + } + + absV := math.Abs(float64(v)) + + if float64(absV) == 0.0 { + p.zeroCount++ + return + } + + bin := p.getBin(absV) + + bucket := &p.posBuckets + if v < 0 { + bucket = &p.negBuckets + } + + // If the new bin would make the counts larger than maxScale, we need to + // downscale current measurements. + if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { + if p.scale-scaleDelta < expoMinScale { + // With a scale of -10 there is only two buckets for the whole range of float64 values. + // This can only happen if there is a max size of 1. + otel.Handle(errors.New("exponential histogram scale underflow")) + return + } + // Downscale + p.scale -= scaleDelta + p.posBuckets.downscale(scaleDelta) + p.negBuckets.downscale(scaleDelta) + + bin = p.getBin(absV) + } + + bucket.record(bin) +} + +// getBin returns the bin v should be recorded into. +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec + if p.scale <= 0 { + // Because of the choice of fraction is always 1 power of two higher than we want. + var correction int32 = 1 + if frac == .5 { + // If v is an exact power of two the frac will be .5 and the exp + // will be one higher than we want. + correction = 2 + } + return (exp - correction) >> (-p.scale) + } + return exp<= bin { + low = int(bin) + high = int(startBin) + length - 1 + } + + var count int32 + for high-low >= p.maxSize { + low = low >> 1 + high = high >> 1 + count++ + if count > expoMaxScale-expoMinScale { + return count + } + } + return count +} + +// expoBuckets is a set of buckets in an exponential histogram. +type expoBuckets struct { + startBin int32 + counts []uint64 +} + +// record increments the count for the given bin, and expands the buckets if needed. +// Size changes must be done before calling this function. +func (b *expoBuckets) record(bin int32) { + if len(b.counts) == 0 { + b.counts = []uint64{1} + b.startBin = bin + return + } + + endBin := int(b.startBin) + len(b.counts) - 1 + + // if the new bin is inside the current range + if bin >= b.startBin && int(bin) <= endBin { + b.counts[bin-b.startBin]++ + return + } + // if the new bin is before the current start add spaces to the counts + if bin < b.startBin { + origLen := len(b.counts) + newLength := endBin - int(bin) + 1 + shift := b.startBin - bin + + if newLength > cap(b.counts) { + b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) + } + + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + b.counts = b.counts[:newLength] + for i := 1; i < int(shift); i++ { + b.counts[i] = 0 + } + b.startBin = bin + b.counts[0] = 1 + return + } + // if the new is after the end add spaces to the end + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { + b.counts = b.counts[:bin-b.startBin+1] + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { + b.counts[i] = 0 + } + b.counts[bin-b.startBin] = 1 + return + } + + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) + b.counts = append(b.counts, end...) + b.counts[bin-b.startBin] = 1 + } +} + +// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the +// correct lower resolution bucket. +func (b *expoBuckets) downscale(delta int32) { + // Example + // delta = 2 + // Original offset: -6 + // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 + // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1 + // new Offset: -2 + // new Counts: [4, 14, 30, 10] + + if len(b.counts) <= 1 || delta < 1 { + b.startBin = b.startBin >> delta + return + } + + steps := int32(1) << delta + offset := b.startBin % steps + offset = (offset + steps) % steps // to make offset positive + for i := 1; i < len(b.counts); i++ { + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] + continue + } + b.counts[idx/int(steps)] += b.counts[i] + } + + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) + b.counts = b.counts[:lastIdx+1] + b.startBin = b.startBin >> delta +} + +// newExponentialHistogram returns an Aggregator that summarizes a set of +// measurements as an exponential histogram. Each histogram is scoped by attributes +// and the aggregation cycle the measurements were made in. +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { + return &expoHistogram[N]{ + noSum: noSum, + noMinMax: noMinMax, + maxSize: int(maxSize), + maxScale: maxScale, + + newRes: r, + limit: newLimiter[*expoHistogramDataPoint[N]](limit), + values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), + + start: now(), + } +} + +// expoHistogram summarizes a set of measurements as an histogram with exponentially +// defined buckets. +type expoHistogram[N int64 | float64] struct { + noSum bool + noMinMax bool + maxSize int + maxScale int32 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*expoHistogramDataPoint[N]] + values map[attribute.Distinct]*expoHistogramDataPoint[N] + valuesMu sync.Mutex + + start time.Time +} + +func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // Ignore NaN and infinity. + if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { + return + } + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + attr := e.limit.Attributes(fltrAttr, e.values) + v, ok := e.values[attr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes() + + e.values[attr.Equivalent()] = v + } + v.record(value) + v.res.Offer(ctx, value, droppedAttr) +} + +func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.DeltaTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(e.values) + + e.start = t + h.DataPoints = hDPts + *dest = h + return n +} + +func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. + // In that case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.ExponentialHistogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + e.valuesMu.Lock() + defer e.valuesMu.Unlock() + + n := len(e.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range e.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = e.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Scale = val.scale + hDPts[i].ZeroCount = val.zeroCount + hDPts[i].ZeroThreshold = 0.0 + + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin + hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) + copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) + + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin + hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) + copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) + + if !e.noSum { + hDPts[i].Sum = val.sum + } + if !e.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go new file mode 100644 index 000000000..ade0941f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "slices" + "sort" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type buckets[N int64 | float64] struct { + attrs attribute.Set + res exemplar.FilteredReservoir[N] + + counts []uint64 + count uint64 + total N + min, max N +} + +// newBuckets returns buckets with n bins. +func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { + return &buckets[N]{attrs: attrs, counts: make([]uint64, n)} +} + +func (b *buckets[N]) sum(value N) { b.total += value } + +func (b *buckets[N]) bin(idx int, value N) { + b.counts[idx]++ + b.count++ + if value < b.min { + b.min = value + } else if value > b.max { + b.max = value + } +} + +// histValues summarizes a set of measurements as an histValues with +// explicitly defined buckets. +type histValues[N int64 | float64] struct { + noSum bool + bounds []float64 + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[*buckets[N]] + values map[attribute.Distinct]*buckets[N] + valuesMu sync.Mutex +} + +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { + // The responsibility of keeping all buckets correctly associated with the + // passed boundaries is ultimately this type's responsibility. Make a copy + // here so we can always guarantee this. Or, in the case of failure, have + // complete control over the fix. + b := slices.Clone(bounds) + slices.Sort(b) + return &histValues[N]{ + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[*buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), + } +} + +// Aggregate records the measurement value, scoped by attr, and aggregates it +// into a histogram. +func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + // This search will return an index in the range [0, len(s.bounds)], where + // it will return len(s.bounds) if value is greater than the last element + // of s.bounds. This aligns with the buckets in that the length of buckets + // is len(s.bounds)+1, with the last bucket representing: + // (s.bounds[len(s.bounds)-1], +∞). + idx := sort.SearchFloat64s(s.bounds, float64(value)) + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + b, ok := s.values[attr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](attr, len(s.bounds)+1) + b.res = s.newRes() + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[attr.Equivalent()] = b + } + b.bin(idx, value) + if !s.noSum { + b.sum(value) + } + b.res.Offer(ctx, value, droppedAttr) +} + +// newHistogram returns an Aggregator that summarizes a set of measurements as +// an histogram. +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { + return &histogram[N]{ + histValues: newHistValues[N](boundaries, noSum, limit, r), + noMinMax: noMinMax, + start: now(), + } +} + +// histogram summarizes a set of measurements as an histogram with explicitly +// defined buckets. +type histogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.DeltaTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + hDPts[i].BucketCounts = val.counts + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + h.DataPoints = hDPts + *dest = h + + return n +} + +func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Histogram, memory reuse is missed. In that + // case, use the zero-value h and hope for better alignment next cycle. + h, _ := (*dest).(metricdata.Histogram[N]) + h.Temporality = metricdata.CumulativeTemporality + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + // Do not allow modification of our copy of bounds. + bounds := slices.Clone(s.bounds) + + n := len(s.values) + hDPts := reset(h.DataPoints, n, n) + + var i int + for _, val := range s.values { + hDPts[i].Attributes = val.attrs + hDPts[i].StartTime = s.start + hDPts[i].Time = t + hDPts[i].Count = val.count + hDPts[i].Bounds = bounds + + // The HistogramDataPoint field values returned need to be copies of + // the buckets value as we will keep updating them. + // + // TODO (#3047): Making copies for bounds and counts incurs a large + // memory allocation footprint. Alternatives should be explored. + hDPts[i].BucketCounts = slices.Clone(val.counts) + + if !s.noSum { + hDPts[i].Sum = val.total + } + + if !s.noMinMax { + hDPts[i].Min = metricdata.NewExtrema(val.min) + hDPts[i].Max = metricdata.NewExtrema(val.max) + } + + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) + + i++ + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + + h.DataPoints = hDPts + *dest = h + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go new file mode 100644 index 000000000..c35936840 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -0,0 +1,162 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// datapoint is timestamped measurement data. +type datapoint[N int64 | float64] struct { + attrs attribute.Set + value N + res exemplar.FilteredReservoir[N] +} + +func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { + return &lastValue[N]{ + newRes: r, + limit: newLimiter[datapoint[N]](limit), + values: make(map[attribute.Distinct]datapoint[N]), + start: now(), + } +} + +// lastValue summarizes a set of measurements as the last one made. +type lastValue[N int64 | float64] struct { + sync.Mutex + + newRes func() exemplar.FilteredReservoir[N] + limit limiter[datapoint[N]] + values map[attribute.Distinct]datapoint[N] + start time.Time +} + +func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + d, ok := s.values[attr.Equivalent()] + if !ok { + d.res = s.newRes() + } + + d.attrs = attr + d.value = value + d.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = d +} + +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int { + n := len(s.values) + *dest = reset(*dest, n, n) + + var i int + for _, v := range s.values { + (*dest)[i].Attributes = v.attrs + (*dest)[i].StartTime = s.start + (*dest)[i].Time = t + (*dest)[i].Value = v.value + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) + i++ + } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = t + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints, t) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go new file mode 100644 index 000000000..9ea0251ed --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import "go.opentelemetry.io/otel/attribute" + +// overflowSet is the attribute set used to record a measurement when adding +// another distinct attribute set to the aggregate would exceed the aggregate +// limit. +var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true)) + +// limiter limits aggregate values. +type limiter[V any] struct { + // aggLimit is the maximum number of metric streams that can be aggregated. + // + // Any metric stream with attributes distinct from any set already + // aggregated once the aggLimit will be meet will instead be aggregated + // into an "overflow" metric stream. That stream will only contain the + // "otel.metric.overflow"=true attribute. + aggLimit int +} + +// newLimiter returns a new Limiter with the provided aggregation limit. +func newLimiter[V any](aggregation int) limiter[V] { + return limiter[V]{aggLimit: aggregation} +} + +// Attributes checks if adding a measurement for attrs will exceed the +// aggregation cardinality limit for the existing measurements. If it will, +// overflowSet is returned. Otherwise, if it will not exceed the limit, or the +// limit is not set (limit <= 0), attr is returned. +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { + if l.aggLimit > 0 { + _, exists := measurements[attrs.Equivalent()] + if !exists && len(measurements) >= l.aggLimit-1 { + return overflowSet + } + } + + return attrs +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go new file mode 100644 index 000000000..891366922 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -0,0 +1,238 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type sumValue[N int64 | float64] struct { + n N + res exemplar.FilteredReservoir[N] + attrs attribute.Set +} + +// valueMap is the storage for sums. +type valueMap[N int64 | float64] struct { + sync.Mutex + newRes func() exemplar.FilteredReservoir[N] + limit limiter[sumValue[N]] + values map[attribute.Distinct]sumValue[N] +} + +func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { + return &valueMap[N]{ + newRes: r, + limit: newLimiter[sumValue[N]](limit), + values: make(map[attribute.Distinct]sumValue[N]), + } +} + +func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + s.Lock() + defer s.Unlock() + + attr := s.limit.Attributes(fltrAttr, s.values) + v, ok := s.values[attr.Equivalent()] + if !ok { + v.res = s.newRes() + } + + v.attrs = attr + v.n += value + v.res.Offer(ctx, value, droppedAttr) + + s.values[attr.Equivalent()] = v +} + +// newSum returns an aggregator that summarizes a set of measurements as their +// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle +// the measurements were made in. +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { + return &sum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// sum summarizes a set of measurements made as their arithmetic sum. +type sum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *sum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + i++ + } + // Do not report stale values. + clear(s.values) + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, value := range s.values { + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = value.n + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + i++ + } + + sData.DataPoints = dPts + *dest = sData + + return n +} + +// newPrecomputedSum returns an aggregator that summarizes a set of +// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { + return &precomputedSum[N]{ + valueMap: newValueMap[N](limit, r), + monotonic: monotonic, + start: now(), + } +} + +// precomputedSum summarizes a set of observatrions as their arithmetic sum. +type precomputedSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time + + reported map[attribute.Distinct]N +} + +func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { + t := now() + newReported := make(map[attribute.Distinct]N) + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.DeltaTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for key, value := range s.values { + delta := value.n - s.reported[key] + + dPts[i].Attributes = value.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = delta + collectExemplars(&dPts[i].Exemplars, value.res.Collect) + + newReported[key] = value.n + i++ + } + // Unused attribute sets do not report. + clear(s.values) + s.reported = newReported + // The delta collection cycle resets. + s.start = t + + sData.DataPoints = dPts + *dest = sData + + return n +} + +func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { + t := now() + + // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, + // use the zero-value sData and hope for better alignment next cycle. + sData, _ := (*dest).(metricdata.Sum[N]) + sData.Temporality = metricdata.CumulativeTemporality + sData.IsMonotonic = s.monotonic + + s.Lock() + defer s.Unlock() + + n := len(s.values) + dPts := reset(sData.DataPoints, n, n) + + var i int + for _, val := range s.values { + dPts[i].Attributes = val.attrs + dPts[i].StartTime = s.start + dPts[i].Time = t + dPts[i].Value = val.n + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + + i++ + } + // Unused attribute sets do not report. + clear(s.values) + + sData.DataPoints = dPts + *dest = sData + + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go new file mode 100644 index 000000000..5394f48e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go new file mode 100644 index 000000000..5a0f39ae1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// Drop returns a [FilteredReservoir] that drops all measurements it is offered. +func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]Exemplar) { + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 000000000..fcaa6a469 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go new file mode 100644 index 000000000..152a069a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +// Filter determines if a measurement should be offered. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +type Filter func(context.Context) bool + +// SampledFilter is a [Filter] that will only offer measurements +// if the passed context associated with the measurement contains a sampled +// [go.opentelemetry.io/otel/trace.SpanContext]. +func SampledFilter(ctx context.Context) bool { + return trace.SpanContextFromContext(ctx).IsSampled() +} + +// AlwaysOnFilter is a [Filter] that always offers measurements. +func AlwaysOnFilter(ctx context.Context) bool { + return true +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go new file mode 100644 index 000000000..9fedfa4be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FilteredReservoir wraps a [Reservoir] with a filter. +type FilteredReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]Exemplar) +} + +// filteredReservoir handles the pre-sampled exemplar of measurements made. +type filteredReservoir[N int64 | float64] struct { + filter Filter + reservoir Reservoir +} + +// NewFilteredReservoir creates a [FilteredReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { + return &filteredReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurment. + f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) + } +} + +func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go new file mode 100644 index 000000000..a6ff86d02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Histogram returns a [Reservoir] that samples the last measurement that falls +// within a histogram bucket. The histogram bucket upper-boundaries are define +// by bounds. +// +// The passed bounds will be sorted by this function. +func Histogram(bounds []float64) Reservoir { + slices.Sort(bounds) + return &histRes{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +type histRes struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go new file mode 100644 index 000000000..199a2608f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -0,0 +1,191 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "math" + "math/rand" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +var ( + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng = rand.New(rand.NewSource(time.Now().UnixNano())) + // Ensure concurrent safe accecess to rng and its underlying source. + rngMu sync.Mutex +) + +// random returns, as a float64, a uniform pseudo-random number in the open +// interval (0.0,1.0). +func random() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + rngMu.Lock() + defer rngMu.Unlock() + + f := rng.Float64() + for f == 0 { + f = rng.Float64() + } + return f +} + +// FixedSize returns a [Reservoir] that samples at most k exemplars. If there +// are k or less measurements made, the Reservoir will sample each one. If +// there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} + r.reset() + return r +} + +type randRes struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 +} + +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *randRes) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *randRes) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 +} + +func (r *randRes) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go new file mode 100644 index 000000000..80fa59554 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go new file mode 100644 index 000000000..10b2976f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.Exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 000000000..1957d6b1e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go new file mode 100644 index 000000000..19ec6806f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +// ReuseSlice returns a zeroed view of slice if its capacity is greater than or +// equal to n. Otherwise, it returns a new []T with capacity equal to n. +func ReuseSlice[T any](slice []T, n int) []T { + if cap(slice) >= n { + return slice[:n] + } + return make([]T, n) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md new file mode 100644 index 000000000..aba69d654 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -0,0 +1,112 @@ +# Experimental Features + +The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Cardinality Limit](#cardinality-limit) +- [Exemplars](#exemplars) + +### Cardinality Limit + +The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. + +This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. +The value must be an integer value. +All other values are ignored. + +If the value set is less than or equal to `0`, no limit will be applied. + +#### Examples + +Set the cardinality limit to 2000. + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=2000 +``` + +Set an infinite cardinality limit (functionally equivalent to disabling the feature). + +```console +export OTEL_GO_X_CARDINALITY_LIMIT=-1 +``` + +Disable the cardinality limit. + +```console +unset OTEL_GO_X_CARDINALITY_LIMIT +``` + +### Exemplars + +A sample of measurements made may be exported directly as a set of exemplars. + +This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable. +The value of must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + +Exemplar filters are a supported. +The exemplar filter applies to all measurements made. +They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir. + +To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable. +The value must be the case-sensitive string defined by the [OpenTelemetry specification]. + +- `"always_on"`: allows all measurements +- `"always_off"`: denies all measurements +- `"trace_based"`: allows only sampled measurements + +All values other than these will result in the default, `"trace_based"`, exemplar filter being used. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar + +#### Examples + +Enable exemplars to be exported. + +```console +export OTEL_GO_X_EXEMPLAR=true +``` + +Disable exemplars from being exported. + +```console +unset OTEL_GO_X_EXEMPLAR +``` + +Set the exemplar filter to allow all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_on +``` + +Set the exemplar filter to deny all measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=always_off +``` + +Set the exemplar filter to only allow sampled measurements. + +```console +export OTEL_METRICS_EXEMPLAR_FILTER=trace_based +``` + +Revert to the default exemplar filter (`"trace_based"`) + +```console +unset OTEL_METRICS_EXEMPLAR_FILTER +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go new file mode 100644 index 000000000..8cd2f3741 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel metric SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" + +import ( + "os" + "strconv" + "strings" +) + +var ( + // Exemplars is an experimental feature flag that defines if exemplars + // should be recorded for metric data-points. + // + // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable + // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" + // will also enable this). + Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false + }) + + // CardinalityLimit is an experimental feature flag that defines if + // cardinality limits should be applied to the recorded metric data-points. + // + // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment + // variable to the integer limit value you want to use. + // + // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 + // will disable the cardinality limits. + CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true + }) +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go new file mode 100644 index 000000000..e0fd86ca7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ManualReader is a simple Reader that allows an application to +// read metrics on demand. +type ManualReader struct { + sdkProducer atomic.Value + shutdownOnce sync.Once + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// Compile time check the manualReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&ManualReader{}: {}} + +// NewManualReader returns a Reader which is directly called to collect metrics. +func NewManualReader(opts ...ManualReaderOption) *ManualReader { + cfg := newManualReaderConfig(opts) + r := &ManualReader{ + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + } + r.externalProducers.Store(cfg.producers) + return r +} + +// register stores the sdkProducer which enables the caller +// to read metrics from the SDK on demand. +func (mr *ManualReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register manual reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality { + return mr.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return mr.aggregationSelector(kind) +} + +// Shutdown closes any connections and frees any resources used by the reader. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Shutdown(context.Context) error { + err := ErrReaderShutdown + mr.shutdownOnce.Do(func() { + // Any future call to Collect will now return ErrReaderShutdown. + mr.sdkProducer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + mr.mu.Lock() + defer mr.mu.Unlock() + mr.isShutdown = true + // release references to Producer(s) + mr.externalProducers.Store([]Producer{}) + err = nil + }) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + } + p := mr.sdkProducer.Load() + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("manual reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range mr.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("ManualReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// MarshalLog returns logging data about the ManualReader. +func (r *ManualReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Registered bool + Shutdown bool + }{ + Type: "ManualReader", + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + } +} + +// manualReaderConfig contains configuration options for a ManualReader. +type manualReaderConfig struct { + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + producers []Producer +} + +// newManualReaderConfig returns a manualReaderConfig configured with options. +func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { + cfg := manualReaderConfig{ + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, opt := range opts { + cfg = opt.applyManual(cfg) + } + return cfg +} + +// ManualReaderOption applies a configuration option value to a ManualReader. +type ManualReaderOption interface { + applyManual(manualReaderConfig) manualReaderConfig +} + +// WithTemporalitySelector sets the TemporalitySelector a reader will use to +// determine the Temporality of an instrument based on its kind. If this +// option is not used, the reader will use the DefaultTemporalitySelector. +func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption { + return temporalitySelectorOption{selector: selector} +} + +type temporalitySelectorOption struct { + selector func(instrument InstrumentKind) metricdata.Temporality +} + +// applyManual returns a manualReaderConfig with option applied. +func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { + mrc.temporalitySelector = t.selector + return mrc +} + +// WithAggregationSelector sets the AggregationSelector a reader will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// or the aggregation explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector AggregationSelector) ManualReaderOption { + return aggregationSelectorOption{selector: selector} +} + +type aggregationSelectorOption struct { + selector AggregationSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.aggregationSelector = t.selector + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go new file mode 100644 index 000000000..2309e5b2b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -0,0 +1,729 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" +) + +// ErrInstrumentName indicates the created instrument has an invalid name. +// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter. +var ErrInstrumentName = errors.New("invalid instrument name") + +// meter handles the creation and coordination of all metric instruments. A +// meter represents a single instrumentation scope; all metric telemetry +// produced by an instrumentation scope will use metric instruments from a +// single meter. +type meter struct { + embedded.Meter + + scope instrumentation.Scope + pipes pipelines + + int64Insts *cacheWithErr[instID, *int64Inst] + float64Insts *cacheWithErr[instID, *float64Inst] + int64ObservableInsts *cacheWithErr[instID, int64Observable] + float64ObservableInsts *cacheWithErr[instID, float64Observable] + + int64Resolver resolver[int64] + float64Resolver resolver[float64] +} + +func newMeter(s instrumentation.Scope, p pipelines) *meter { + // viewCache ensures instrument conflicts, including number conflicts, this + // meter is asked to create are logged to the user. + var viewCache cache[string, instID] + + var int64Insts cacheWithErr[instID, *int64Inst] + var float64Insts cacheWithErr[instID, *float64Inst] + var int64ObservableInsts cacheWithErr[instID, int64Observable] + var float64ObservableInsts cacheWithErr[instID, float64Observable] + + return &meter{ + scope: s, + pipes: p, + int64Insts: &int64Insts, + float64Insts: &float64Insts, + int64ObservableInsts: &int64ObservableInsts, + float64ObservableInsts: &float64ObservableInsts, + int64Resolver: newResolver[int64](p, &viewCache), + float64Resolver: newResolver[float64](p, &viewCache), + } +} + +// Compile-time check meter implements metric.Meter. +var _ metric.Meter = (*meter)(nil) + +// Int64Counter returns a new instrument identified by name and configured with +// options. The instrument is used to synchronously record increasing int64 +// measurements during a computational operation. +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + cfg := metric.NewInt64CounterConfig(options...) + const kind = InstrumentKindCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// int64 measurements during a computational operation. +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + cfg := metric.NewInt64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + cfg := metric.NewInt64HistogramConfig(options...) + p := int64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// int64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) { + inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.int64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := int64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Int64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableUpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// int64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Int64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous int64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.int64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64Counter returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record increasing +// float64 measurements during a computational operation. +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + cfg := metric.NewFloat64CounterConfig(options...) + const kind = InstrumentKindCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64UpDownCounter returns a new instrument identified by name and +// configured with options. The instrument is used to synchronously record +// float64 measurements during a computational operation. +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + cfg := metric.NewFloat64UpDownCounterConfig(options...) + const kind = InstrumentKindUpDownCounter + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Histogram returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + cfg := metric.NewFloat64HistogramConfig(options...) + p := float64InstProvider{m} + i, err := p.lookupHistogram(name, cfg) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + +// float64ObservableInstrument returns a new observable identified by the Instrument. +// It registers callbacks for each reader's pipeline. +func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { + key := instID{ + Name: id.Name, + Description: id.Description, + Unit: id.Unit, + Kind: id.Kind, + } + if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 { + warnRepeatedObservableCallbacks(id) + } + return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) { + inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit) + for _, insert := range m.float64Resolver.inserters { + // Connect the measure functions for instruments in this pipeline with the + // callbacks for this pipeline. + in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind)) + if err != nil { + return inst, err + } + // Drop aggregation + if len(in) == 0 { + inst.dropAggregation = true + continue + } + inst.appendMeasures(in) + for _, cback := range callbacks { + inst := float64Observer{measures: in} + fn := cback + insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) }) + } + } + return inst, validateInstrumentName(id.Name) + }) +} + +// Float64ObservableCounter returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// increasing float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableUpDownCounter returns a new instrument identified by name +// and configured with options. The instrument is used to asynchronously record +// float64 measurements once per a measurement collection cycle. Only the +// measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableUpDownCounter, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +// Float64ObservableGauge returns a new instrument identified by name and +// configured with options. The instrument is used to asynchronously record +// instantaneous float64 measurements once per a measurement collection cycle. +// Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindObservableGauge, + Scope: m.scope, + } + return m.float64ObservableInstrument(id, cfg.Callbacks()) +} + +func validateInstrumentName(name string) error { + if len(name) == 0 { + return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) + } + if len(name) > 255 { + return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name) + } + if !isAlpha([]rune(name)[0]) { + return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name) + } + if len(name) == 1 { + return nil + } + for _, c := range name[1:] { + if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' { + return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name) + } + } + return nil +} + +func isAlpha(c rune) bool { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} + +func isAlphanumeric(c rune) bool { + return isAlpha(c) || ('0' <= c && c <= '9') +} + +func warnRepeatedObservableCallbacks(id Instrument) { + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + global.Warn("Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.", + "instrument", inst, + ) +} + +// RegisterCallback registers f to be called each collection cycle so it will +// make observations for insts during those cycles. +// +// The only instruments f can make observations for are insts. All other +// observations will be dropped and an error will be logged. +// +// Only instruments from this meter can be registered with f, an error is +// returned if other instrument are provided. +// +// Only observations made in the callback will be exported. Unlike synchronous +// instruments, asynchronous callbacks can "forget" attribute sets that are no +// longer relevant by omitting the observation during the callback. +// +// The returned Registration can be used to unregister f. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if len(insts) == 0 { + // Don't allocate a observer if not needed. + return noopRegister{}, nil + } + + reg := newObserver() + var errs multierror + for _, inst := range insts { + // Unwrap any global. + if u, ok := inst.(interface { + Unwrap() metric.Observable + }); ok { + inst = u.Unwrap() + } + + switch o := inst.(type) { + case int64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerInt64(o.observablID) + case float64Observable: + if err := o.registerable(m); err != nil { + if !errors.Is(err, errEmptyAgg) { + errs.append(err) + } + continue + } + reg.registerFloat64(o.observablID) + default: + // Instrument external to the SDK. + return nil, fmt.Errorf("invalid observable: from different implementation") + } + } + + err := errs.errorOrNil() + if reg.len() == 0 { + // All insts use drop aggregation or are invalid. + return noopRegister{}, err + } + + // Some or all instruments were valid. + cback := func(ctx context.Context) error { return f(ctx, reg) } + return m.pipes.registerMultiCallback(cback), err +} + +type observer struct { + embedded.Observer + + float64 map[observablID[float64]]struct{} + int64 map[observablID[int64]]struct{} +} + +func newObserver() observer { + return observer{ + float64: make(map[observablID[float64]]struct{}), + int64: make(map[observablID[int64]]struct{}), + } +} + +func (r observer) len() int { + return len(r.float64) + len(r.int64) +} + +func (r observer) registerFloat64(id observablID[float64]) { + r.float64[id] = struct{}{} +} + +func (r observer) registerInt64(id observablID[int64]) { + r.int64[id] = struct{}{} +} + +var ( + errUnknownObserver = errors.New("unknown observable instrument") + errUnregObserver = errors.New("observable instrument not registered for callback") +) + +func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) { + var oImpl float64Observable + switch conv := o.(type) { + case float64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(float64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.float64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", float64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { + var oImpl int64Observable + switch conv := o.(type) { + case int64Observable: + oImpl = conv + case interface { + Unwrap() metric.Observable + }: + // Unwrap any global. + async := conv.Unwrap() + var ok bool + if oImpl, ok = async.(int64Observable); !ok { + global.Error(errUnknownObserver, "failed to record asynchronous") + return + } + default: + global.Error(errUnknownObserver, "failed to record") + return + } + + if _, registered := r.int64[oImpl.observablID]; !registered { + if !oImpl.dropAggregation { + global.Error(errUnregObserver, "failed to record", + "name", oImpl.name, + "description", oImpl.description, + "unit", oImpl.unit, + "number", fmt.Sprintf("%T", int64(0)), + ) + } + return + } + c := metric.NewObserveConfig(opts) + oImpl.observe(v, c.Attributes()) +} + +type noopRegister struct{ embedded.Registration } + +func (noopRegister) Unregister() error { + return nil +} + +// int64InstProvider provides int64 OpenTelemetry instruments. +type int64InstProvider struct{ *meter } + +func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.int64Resolver.Aggregators(inst) +} + +func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*int64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &int64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) { + return p.meter.int64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*int64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &int64Inst{measures: aggs}, err + }) +} + +// float64InstProvider provides float64 OpenTelemetry instruments. +type float64InstProvider struct{ *meter } + +func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) { + inst := Instrument{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + Scope: p.scope, + } + return p.float64Resolver.Aggregators(inst) +} + +func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { + boundaries := cfg.ExplicitBucketBoundaries() + aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() + if aggError != nil { + // If boundaries are invalid, ignore them. + boundaries = nil + } + inst := Instrument{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + Scope: p.scope, + } + measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries) + return measures, errors.Join(aggError, err) +} + +// lookup returns the resolved instrumentImpl. +func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: desc, + Unit: u, + Kind: kind, + }, func() (*float64Inst, error) { + aggs, err := p.aggs(kind, name, desc, u) + return &float64Inst{measures: aggs}, err + }) +} + +// lookupHistogram returns the resolved instrumentImpl. +func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) { + return p.meter.float64Insts.Lookup(instID{ + Name: name, + Description: cfg.Description(), + Unit: cfg.Unit(), + Kind: InstrumentKindHistogram, + }, func() (*float64Inst, error) { + aggs, err := p.histogramAggs(name, cfg) + return &float64Inst{measures: aggs}, err + }) +} + +type int64Observer struct { + embedded.Int64Observer + measures[int64] +} + +func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} + +type float64Observer struct { + embedded.Float64Observer + measures[float64] +} + +func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) { + c := metric.NewObserveConfig(opts) + o.observe(val, c.Attributes()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md new file mode 100644 index 000000000..d1390df1b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md @@ -0,0 +1,3 @@ +# SDK Metric data + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/metricdata)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go new file mode 100644 index 000000000..d32cfc67d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go @@ -0,0 +1,296 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +import ( + "encoding/json" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// ResourceMetrics is a collection of ScopeMetrics and the associated Resource +// that created them. +type ResourceMetrics struct { + // Resource represents the entity that collected the metrics. + Resource *resource.Resource + // ScopeMetrics are the collection of metrics with unique Scopes. + ScopeMetrics []ScopeMetrics +} + +// ScopeMetrics is a collection of Metrics Produces by a Meter. +type ScopeMetrics struct { + // Scope is the Scope that the Meter was created with. + Scope instrumentation.Scope + // Metrics are a list of aggregations created by the Meter. + Metrics []Metrics +} + +// Metrics is a collection of one or more aggregated timeseries from an Instrument. +type Metrics struct { + // Name is the name of the Instrument that created this data. + Name string + // Description is the description of the Instrument, which can be used in documentation. + Description string + // Unit is the unit in which the Instrument reports. + Unit string + // Data is the aggregated data from an Instrument. + Data Aggregation +} + +// Aggregation is the store of data reported by an Instrument. +// It will be one of: Gauge, Sum, Histogram. +type Aggregation interface { + privateAggregation() +} + +// Gauge represents a measurement of the current value of an instrument. +type Gauge[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] +} + +func (Gauge[N]) privateAggregation() {} + +// Sum represents the sum of all measurements of values from an instrument. +type Sum[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []DataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality + // IsMonotonic represents if this aggregation only increases or decreases. + IsMonotonic bool +} + +func (Sum[N]) privateAggregation() {} + +// DataPoint is a single data point in a timeseries. +type DataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. (optional) + StartTime time.Time `json:",omitempty"` + // Time is the time when the timeseries was recorded. (optional) + Time time.Time `json:",omitempty"` + // Value is the value of this data point. + Value N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// Histogram represents the histogram of all measurements of values from an instrument. +type Histogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // Attributes. + DataPoints []HistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (Histogram[N]) privateAggregation() {} + +// HistogramDataPoint is a single histogram data point in a timeseries. +type HistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Bounds are the upper bounds of the buckets of the histogram. Because the + // last boundary is +infinity this one is implied. + Bounds []float64 + // BucketCounts is the count of each of the buckets. + BucketCounts []uint64 + + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialHistogram represents the histogram of all measurements of values from an instrument. +type ExponentialHistogram[N int64 | float64] struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []ExponentialHistogramDataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (ExponentialHistogram[N]) privateAggregation() {} + +// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries. +type ExponentialHistogramDataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Min is the minimum value recorded. (optional) + Min Extrema[N] + // Max is the maximum value recorded. (optional) + Max Extrema[N] + // Sum is the sum of the values recorded. + Sum N + + // Scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = 2 ^ (2 ^ -Scale) + Scale int32 + // ZeroCount is the number of values whose absolute value + // is less than or equal to [ZeroThreshold]. + // When ZeroThreshold is 0, this is the number of values that + // cannot be expressed using the standard exponential formula + // as well as values that have been rounded to zero. + // ZeroCount represents the special zero count bucket. + ZeroCount uint64 + + // PositiveBucket is range of positive value bucket counts. + PositiveBucket ExponentialBucket + // NegativeBucket is range of negative value bucket counts. + NegativeBucket ExponentialBucket + + // ZeroThreshold is the width of the zero region. Where the zero region is + // defined as the closed interval [-ZeroThreshold, ZeroThreshold]. + ZeroThreshold float64 + + // Exemplars is the sampled Exemplars collected during the timeseries. + Exemplars []Exemplar[N] `json:",omitempty"` +} + +// ExponentialBucket are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialBucket struct { + // Offset is the bucket index of the first entry in the Counts slice. + Offset int32 + // Counts is an slice where Counts[i] carries the count of the bucket at + // index (Offset+i). Counts[i] is the count of values greater than + // base^(Offset+i) and less than or equal to base^(Offset+i+1). + Counts []uint64 +} + +// Extrema is the minimum or maximum value of a dataset. +type Extrema[N int64 | float64] struct { + value N + valid bool +} + +// MarshalText converts the Extrema value to text. +func (e Extrema[N]) MarshalText() ([]byte, error) { + if !e.valid { + return json.Marshal(nil) + } + return json.Marshal(e.value) +} + +// MarshalJSON converts the Extrema value to JSON number. +func (e *Extrema[N]) MarshalJSON() ([]byte, error) { + return e.MarshalText() +} + +// NewExtrema returns an Extrema set to v. +func NewExtrema[N int64 | float64](v N) Extrema[N] { + return Extrema[N]{value: v, valid: true} +} + +// Value returns the Extrema value and true if the Extrema is defined. +// Otherwise, if the Extrema is its zero-value, defined will be false. +func (e Extrema[N]) Value() (v N, defined bool) { + return e.value, e.valid +} + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar[N int64 | float64] struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value N + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// data type. +// +// These data points cannot always be merged in a meaningful way. The Summary +// type is only used by bridges from other metrics libraries, and cannot be +// produced using OpenTelemetry instrumentation. +type Summary struct { + // DataPoints are the individual aggregated measurements with unique + // attributes. + DataPoints []SummaryDataPoint +} + +func (Summary) privateAggregation() {} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this summary has been calculated with. + Count uint64 + + // Sum is the sum of the values recorded. + Sum float64 + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []QuantileValue +} + +// QuantileValue is the value at a given quantile of a summary. +type QuantileValue struct { + // Quantile is the quantile of this value. + // + // Must be in the interval [0.0, 1.0]. + Quantile float64 + + // Value is the value at the given quantile of a summary. + // + // Quantile values must NOT be negative. + Value float64 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go new file mode 100644 index 000000000..187713dad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=Temporality + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +// Temporality defines the window that an aggregation was calculated over. +type Temporality uint8 + +const ( + // undefinedTemporality represents an unset Temporality. + //nolint:deadcode,unused,varcheck + undefinedTemporality Temporality = iota + + // CumulativeTemporality defines a measurement interval that continues to + // expand forward in time from a starting point. New measurements are + // added to all previous measurements since a start time. + CumulativeTemporality + + // DeltaTemporality defines a measurement interval that resets each cycle. + // Measurements from one cycle are recorded independently, measurements + // from other cycles do not affect them. + DeltaTemporality +) + +// MarshalText returns the byte encoded of t. +func (t Temporality) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go new file mode 100644 index 000000000..4da833cdc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=Temporality"; DO NOT EDIT. + +package metricdata + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[undefinedTemporality-0] + _ = x[CumulativeTemporality-1] + _ = x[DeltaTemporality-2] +} + +const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" + +var _Temporality_index = [...]uint8{0, 20, 41, 57} + +func (i Temporality) String() string { + if i >= Temporality(len(_Temporality_index)-1) { + return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go new file mode 100644 index 000000000..67ee1b11a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -0,0 +1,370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Default periodic reader timing. +const ( + defaultTimeout = time.Millisecond * 30000 + defaultInterval = time.Millisecond * 60000 +) + +// periodicReaderConfig contains configuration options for a PeriodicReader. +type periodicReaderConfig struct { + interval time.Duration + timeout time.Duration + producers []Producer +} + +// newPeriodicReaderConfig returns a periodicReaderConfig configured with +// options. +func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { + c := periodicReaderConfig{ + interval: envDuration(envInterval, defaultInterval), + timeout: envDuration(envTimeout, defaultTimeout), + } + for _, o := range options { + c = o.applyPeriodic(c) + } + return c +} + +// PeriodicReaderOption applies a configuration option value to a PeriodicReader. +type PeriodicReaderOption interface { + applyPeriodic(periodicReaderConfig) periodicReaderConfig +} + +// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. +type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig + +// applyPeriodic returns a periodicReaderConfig with option(s) applied. +func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { + return o(conf) +} + +// WithTimeout configures the time a PeriodicReader waits for an export to +// complete before canceling it. This includes an export which occurs as part +// of Shutdown or ForceFlush if the user passed context does not have a +// deadline. If the user passed context does have a deadline, it will be used +// instead. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_TIMEOUT environment variable. +// +// If this option is not used or d is less than or equal to zero, 30 seconds +// is used as the default. +func WithTimeout(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.timeout = d + return conf + }) +} + +// WithInterval configures the intervening time between exports for a +// PeriodicReader. +// +// This option overrides any value set for the +// OTEL_METRIC_EXPORT_INTERVAL environment variable. +// +// If this option is not used or d is less than or equal to zero, 60 seconds +// is used as the default. +func WithInterval(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.interval = d + return conf + }) +} + +// NewPeriodicReader returns a Reader that collects and exports metric data to +// the exporter at a defined interval. By default, the returned Reader will +// collect and export data every 60 seconds, and will cancel any attempts that +// exceed 30 seconds, collect and export combined. The collect and export time +// are not counted towards the interval between attempts. +// +// The Collect method of the returned Reader continues to gather and return +// metric data to the user. It will not automatically send that data to the +// exporter. That is left to the user to accomplish. +func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { + conf := newPeriodicReaderConfig(options) + ctx, cancel := context.WithCancel(context.Background()) + r := &PeriodicReader{ + interval: conf.interval, + timeout: conf.timeout, + exporter: exporter, + flushCh: make(chan chan error), + cancel: cancel, + done: make(chan struct{}), + rmPool: sync.Pool{ + New: func() interface{} { + return &metricdata.ResourceMetrics{} + }, + }, + } + r.externalProducers.Store(conf.producers) + + go func() { + defer func() { close(r.done) }() + r.run(ctx, conf.interval) + }() + + return r +} + +// PeriodicReader is a Reader that continuously collects and exports metric +// data at a set interval. +type PeriodicReader struct { + sdkProducer atomic.Value + + mu sync.Mutex + isShutdown bool + externalProducers atomic.Value + + interval time.Duration + timeout time.Duration + exporter Exporter + flushCh chan chan error + + done chan struct{} + cancel context.CancelFunc + shutdownOnce sync.Once + + rmPool sync.Pool +} + +// Compile time check the periodicReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&PeriodicReader{}: {}} + +// newTicker allows testing override. +var newTicker = time.NewTicker + +// run continuously collects and exports metric data at the specified +// interval. This will run until ctx is canceled or times out. +func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) { + ticker := newTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := r.collectAndExport(ctx) + if err != nil { + otel.Handle(err) + } + case errCh := <-r.flushCh: + errCh <- r.collectAndExport(ctx) + ticker.Reset(interval) + case <-ctx.Done(): + return + } + } +} + +// register registers p as the producer of this reader. +func (r *PeriodicReader) register(p sdkProducer) { + // Only register once. If producer is already set, do nothing. + if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register periodic reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality { + return r.exporter.Temporality(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.exporter.Aggregation(kind) +} + +// collectAndExport gather all metric data related to the periodicReader r from +// the SDK and exports it with r's exporter. +func (r *PeriodicReader) collectAndExport(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, r.timeout) + defer cancel() + + // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. + rm := r.rmPool.Get().(*metricdata.ResourceMetrics) + err := r.Collect(ctx, rm) + if err == nil { + err = r.export(ctx, rm) + } + r.rmPool.Put(rm) + return err +} + +// Collect gathers all metric data related to the Reader from +// the SDK and other Producers and stores the result in rm. The metric +// data is not exported to the configured exporter, it is left to the caller to +// handle that if desired. +// +// Collect will return an error if called after shutdown. +// Collect will return an error if rm is a nil ResourceMetrics. +// Collect will return an error if the context's Done channel is closed. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + if rm == nil { + return errors.New("periodic reader: *metricdata.ResourceMetrics is nil") + } + // TODO (#3047): When collect is updated to accept output as param, pass rm. + return r.collect(ctx, r.sdkProducer.Load(), rm) +} + +// collect unwraps p as a produceHolder and returns its produce results. +func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { + if p == nil { + return ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("periodic reader: invalid producer: %T", p) + return err + } + + err := ph.produce(ctx, rm) + if err != nil { + return err + } + var errs []error + for _, producer := range r.externalProducers.Load().([]Producer) { + externalMetrics, err := producer.Produce(ctx) + if err != nil { + errs = append(errs, err) + } + rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) + } + + global.Debug("PeriodicReader collection", "Data", rm) + + return unifyErrors(errs) +} + +// export exports metric data m using r's exporter. +func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error { + return r.exporter.Export(ctx, m) +} + +// ForceFlush flushes pending telemetry. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) ForceFlush(ctx context.Context) error { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + errCh := make(chan error, 1) + select { + case r.flushCh <- errCh: + select { + case err := <-errCh: + if err != nil { + return err + } + close(errCh) + case <-ctx.Done(): + return ctx.Err() + } + case <-r.done: + return ErrReaderShutdown + case <-ctx.Done(): + return ctx.Err() + } + return r.exporter.ForceFlush(ctx) +} + +// Shutdown flushes pending telemetry and then stops the export pipeline. +// +// This method is safe to call concurrently. +func (r *PeriodicReader) Shutdown(ctx context.Context) error { + err := ErrReaderShutdown + r.shutdownOnce.Do(func() { + // Prioritize the ctx timeout if it is set. + if _, ok := ctx.Deadline(); !ok { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.timeout) + defer cancel() + } + + // Stop the run loop. + r.cancel() + <-r.done + + // Any future call to Collect will now return ErrReaderShutdown. + ph := r.sdkProducer.Swap(produceHolder{ + produce: shutdownProducer{}.produce, + }) + + if ph != nil { // Reader was registered. + // Flush pending telemetry. + m := r.rmPool.Get().(*metricdata.ResourceMetrics) + err = r.collect(ctx, ph, m) + if err == nil { + err = r.export(ctx, m) + } + r.rmPool.Put(m) + } + + sErr := r.exporter.Shutdown(ctx) + if err == nil || errors.Is(err, ErrReaderShutdown) { + err = sErr + } + + r.mu.Lock() + defer r.mu.Unlock() + r.isShutdown = true + // release references to Producer(s) + r.externalProducers.Store([]Producer{}) + }) + return err +} + +// MarshalLog returns logging data about the PeriodicReader. +func (r *PeriodicReader) MarshalLog() interface{} { + r.mu.Lock() + down := r.isShutdown + r.mu.Unlock() + return struct { + Type string + Exporter Exporter + Registered bool + Shutdown bool + Interval time.Duration + Timeout time.Duration + }{ + Type: "PeriodicReader", + Exporter: r.exporter, + Registered: r.sdkProducer.Load() != nil, + Shutdown: down, + Interval: r.interval, + Timeout: r.timeout, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go new file mode 100644 index 000000000..823bf2fe3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -0,0 +1,655 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "container/list" + "context" + "errors" + "fmt" + "strings" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var ( + errCreatingAggregators = errors.New("could not create all aggregators") + errIncompatibleAggregation = errors.New("incompatible aggregation") + errUnknownAggregation = errors.New("unrecognized aggregation") +) + +// instrumentSync is a synchronization point between a pipeline and an +// instrument's aggregate function. +type instrumentSync struct { + name string + description string + unit string + compAgg aggregate.ComputeAggregation +} + +func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { + if res == nil { + res = resource.Empty() + } + return &pipeline{ + resource: res, + reader: reader, + views: views, + // aggregations is lazy allocated when needed. + } +} + +// pipeline connects all of the instruments created by a meter provider to a Reader. +// This is the object that will be `Reader.register()` when a meter provider is created. +// +// As instruments are created the instrument should be checked if it exists in +// the views of a the Reader, and if so each aggregate function should be added +// to the pipeline. +type pipeline struct { + resource *resource.Resource + + reader Reader + views []View + + sync.Mutex + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List +} + +// addSync adds the instrumentSync to pipeline p with scope. This method is not +// idempotent. Duplicate calls will result in duplicate additions, it is the +// callers responsibility to ensure this is called with unique values. +func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) { + p.Lock() + defer p.Unlock() + if p.aggregations == nil { + p.aggregations = map[instrumentation.Scope][]instrumentSync{ + scope: {iSync}, + } + return + } + p.aggregations[scope] = append(p.aggregations[scope], iSync) +} + +type multiCallback func(context.Context) error + +// addMultiCallback registers a multi-instrument callback to be run when +// `produce()` is called. +func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { + p.Lock() + defer p.Unlock() + e := p.multiCallbacks.PushBack(c) + return func() { + p.Lock() + p.multiCallbacks.Remove(e) + p.Unlock() + } +} + +// produce returns aggregated metrics from a single collection. +// +// This method is safe to call concurrently. +func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { + p.Lock() + defer p.Unlock() + + var errs multierror + for _, c := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + if err := c(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { + // TODO make the callbacks parallel. ( #3034 ) + f := e.Value.(multiCallback) + if err := f(ctx); err != nil { + errs.append(err) + } + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + rm.Resource = nil + rm.ScopeMetrics = rm.ScopeMetrics[:0] + return err + } + } + + rm.Resource = p.resource + rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations)) + + i := 0 + for scope, instruments := range p.aggregations { + rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments)) + j := 0 + for _, inst := range instruments { + data := rm.ScopeMetrics[i].Metrics[j].Data + if n := inst.compAgg(&data); n > 0 { + rm.ScopeMetrics[i].Metrics[j].Name = inst.name + rm.ScopeMetrics[i].Metrics[j].Description = inst.description + rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit + rm.ScopeMetrics[i].Metrics[j].Data = data + j++ + } + } + rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j] + if len(rm.ScopeMetrics[i].Metrics) > 0 { + rm.ScopeMetrics[i].Scope = scope + i++ + } + } + + rm.ScopeMetrics = rm.ScopeMetrics[:i] + + return errs.errorOrNil() +} + +// inserter facilitates inserting of new instruments from a single scope into a +// pipeline. +type inserter[N int64 | float64] struct { + // aggregators is a cache that holds aggregate function inputs whose + // outputs have been inserted into the underlying reader pipeline. This + // cache ensures no duplicate aggregate functions are inserted into the + // reader pipeline and if a new request during an instrument creation asks + // for the same aggregate function input the same instance is returned. + aggregators *cache[instID, aggVal[N]] + + // views is a cache that holds instrument identifiers for all the + // instruments a Meter has created, it is provided from the Meter that owns + // this inserter. This cache ensures during the creation of instruments + // with the same name but different options (e.g. description, unit) a + // warning message is logged. + views *cache[string, instID] + + pipeline *pipeline +} + +func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] { + if vc == nil { + vc = &cache[string, instID]{} + } + return &inserter[N]{ + aggregators: &cache[instID, aggVal[N]]{}, + views: vc, + pipeline: p, + } +} + +// Instrument inserts the instrument inst with instUnit into a pipeline. All +// views the pipeline contains are matched against, and any matching view that +// creates a unique aggregate function will have its output inserted into the +// pipeline and its input included in the returned slice. +// +// The returned aggregate function inputs are ensured to be deduplicated and +// unique. If another view in another pipeline that is cached by this +// inserter's cache has already inserted the same aggregate function for the +// same instrument, that functions input instance is returned. +// +// If another instrument has already been inserted by this inserter, or any +// other using the same cache, and it conflicts with the instrument being +// inserted in this call, an aggregate function input matching the arguments +// will still be returned but an Info level log message will also be logged to +// the OTel global logger. +// +// If the passed instrument would result in an incompatible aggregate function, +// an error is returned and that aggregate function output is not inserted nor +// is its input returned. +// +// If an instrument is determined to use a Drop aggregation, that instrument is +// not inserted nor returned. +func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) { + var ( + matched bool + measures []aggregate.Measure[N] + ) + + errs := &multierror{wrapped: errCreatingAggregators} + seen := make(map[uint64]struct{}) + for _, v := range i.pipeline.views { + stream, match := v(inst) + if !match { + continue + } + matched = true + in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in == nil { // Drop aggregation. + continue + } + if _, ok := seen[id]; ok { + // This aggregate function has already been added. + continue + } + seen[id] = struct{}{} + measures = append(measures, in) + } + + if matched { + return measures, errs.errorOrNil() + } + + // Apply implicit default view if no explicit matched. + stream := Stream{ + Name: inst.Name, + Description: inst.Description, + Unit: inst.Unit, + } + in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if err != nil { + errs.append(err) + } + if in != nil { + // Ensured to have not seen given matched was false. + measures = append(measures, in) + } + return measures, errs.errorOrNil() +} + +// addCallback registers a single instrument callback to be run when +// `produce()` is called. +func (i *inserter[N]) addCallback(cback func(context.Context) error) { + i.pipeline.Lock() + defer i.pipeline.Unlock() + i.pipeline.callbacks = append(i.pipeline.callbacks, cback) +} + +var aggIDCount uint64 + +// aggVal is the cached value in an aggregators cache. +type aggVal[N int64 | float64] struct { + ID uint64 + Measure aggregate.Measure[N] + Err error +} + +// readerDefaultAggregation returns the default aggregation for the instrument +// kind based on the reader's aggregation preferences. This is used unless the +// aggregation is overridden with a view. +func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation { + aggregation := i.pipeline.reader.aggregation(kind) + switch aggregation.(type) { + case nil, AggregationDefault: + // If the reader returns default or nil use the default selector. + aggregation = DefaultAggregationSelector(kind) + default: + // Deep copy and validate before using. + aggregation = aggregation.copy() + if err := aggregation.err(); err != nil { + orig := aggregation + aggregation = DefaultAggregationSelector(kind) + global.Error( + err, "using default aggregation instead", + "aggregation", orig, + "replacement", aggregation, + ) + } + } + return aggregation +} + +// cachedAggregator returns the appropriate aggregate input and output +// functions for an instrument configuration. If the exact instrument has been +// created within the inst.Scope, those aggregate function instances will be +// returned. Otherwise, new computed aggregate functions will be cached and +// returned. +// +// If the instrument configuration conflicts with an instrument that has +// already been created (e.g. description, unit, data type) a warning will be +// logged at the "Info" level with the global OTel logger. Valid new aggregate +// functions for the instrument configuration will still be returned without an +// error. +// +// If the instrument defines an unknown or incompatible aggregation, an error +// is returned. +func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { + switch stream.Aggregation.(type) { + case nil: + // The aggregation was not overridden with a view. Use the aggregation + // provided by the reader. + stream.Aggregation = readerAggregation + case AggregationDefault: + // The view explicitly requested the default aggregation. + stream.Aggregation = DefaultAggregationSelector(kind) + } + + if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { + return nil, 0, fmt.Errorf( + "creating aggregator with instrumentKind: %d, aggregation %v: %w", + kind, stream.Aggregation, err, + ) + } + + id := i.instID(kind, stream) + // If there is a conflict, the specification says the view should + // still be applied and a warning should be logged. + i.logConflict(id) + + // If there are requests for the same instrument with different name + // casing, the first-seen needs to be returned. Use a normalize ID for the + // cache lookup to ensure the correct comparison. + normID := id.normalize() + cv := i.aggregators.Lookup(normID, func() aggVal[N] { + b := aggregate.Builder[N]{ + Temporality: i.pipeline.reader.temporality(kind), + ReservoirFunc: reservoirFunc[N](stream.Aggregation), + } + b.Filter = stream.AttributeFilter + // A value less than or equal to zero will disable the aggregation + // limits for the builder (an all the created aggregates). + // CardinalityLimit.Lookup returns 0 by default if unset (or + // unrecognized input). Use that value directly. + b.AggregationLimit, _ = x.CardinalityLimit.Lookup() + + in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) + if err != nil { + return aggVal[N]{0, nil, err} + } + if in == nil { // Drop aggregator. + return aggVal[N]{0, nil, nil} + } + i.pipeline.addSync(scope, instrumentSync{ + // Use the first-seen name casing for this and all subsequent + // requests of this instrument. + name: stream.Name, + description: stream.Description, + unit: stream.Unit, + compAgg: out, + }) + id := atomic.AddUint64(&aggIDCount, 1) + return aggVal[N]{id, in, err} + }) + return cv.Measure, cv.ID, cv.Err +} + +// logConflict validates if an instrument with the same case-insensitive name +// as id has already been created. If that instrument conflicts with id, a +// warning is logged. +func (i *inserter[N]) logConflict(id instID) { + // The API specification defines names as case-insensitive. If there is a + // different casing of a name it needs to be a conflict. + name := id.normalize().Name + existing := i.views.Lookup(name, func() instID { return id }) + if id == existing { + return + } + + const msg = "duplicate metric stream definitions" + args := []interface{}{ + "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), + "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), + "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), + "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit), + "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number), + } + + // The specification recommends logging a suggested view to resolve + // conflicts if possible. + // + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration + if id.Unit != existing.Unit || id.Number != existing.Number { + // There is no view resolution for these, don't make a suggestion. + global.Warn(msg, args...) + return + } + + var stream string + if id.Name != existing.Name || id.Kind != existing.Kind { + stream = `Stream{Name: "{{NEW_NAME}}"}` + } else if id.Description != existing.Description { + stream = fmt.Sprintf("Stream{Description: %q}", existing.Description) + } + + inst := fmt.Sprintf( + "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", + id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, + ) + args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream)) + + global.Warn(msg, args...) +} + +func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { + var zero N + return instID{ + Name: stream.Name, + Description: stream.Description, + Unit: stream.Unit, + Kind: kind, + Number: fmt.Sprintf("%T", zero), + } +} + +// aggregateFunc returns new aggregate functions matching agg, kind, and +// monotonic. If the agg is unknown or temporality is invalid, an error is +// returned. +func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { + switch a := agg.(type) { + case AggregationDefault: + return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) + case AggregationDrop: + // Return nil in and out to signify the drop aggregator. + case AggregationLastValue: + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter: + meas, comp = b.PrecomputedSum(true) + case InstrumentKindObservableUpDownCounter: + meas, comp = b.PrecomputedSum(false) + case InstrumentKindCounter, InstrumentKindHistogram: + meas, comp = b.Sum(true) + default: + // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and + // instrumentKindUndefined or other invalid instrument kinds. + meas, comp = b.Sum(false) + } + case AggregationExplicitBucketHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum) + case AggregationBase2ExponentialHistogram: + var noSum bool + switch kind { + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: + // The sum should not be collected for any instrument that can make + // negative measurements: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations + noSum = true + } + meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum) + + default: + err = errUnknownAggregation + } + + return meas, comp, err +} + +// isAggregatorCompatible checks if the aggregation can be used by the instrument. +// Current compatibility: +// +// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | +// |--------------------------|------|-----------|-----|-----------|-----------------------| +// | Counter | ✓ | | ✓ | ✓ | ✓ | +// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | +// | Observable Counter | ✓ | | ✓ | ✓ | ✓ | +// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | +// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. +func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { + switch agg.(type) { + case AggregationDefault: + return nil + case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram: + switch kind { + case InstrumentKindCounter, + InstrumentKindUpDownCounter, + InstrumentKindHistogram, + InstrumentKindGauge, + InstrumentKindObservableCounter, + InstrumentKindObservableUpDownCounter, + InstrumentKindObservableGauge: + return nil + default: + return errIncompatibleAggregation + } + case AggregationSum: + switch kind { + case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter: + return nil + default: + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + } + case AggregationLastValue: + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: + return nil + } + // TODO: review need for aggregation check after + // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 + return errIncompatibleAggregation + case AggregationDrop: + return nil + default: + // This is used passed checking for default, it should be an error at this point. + return fmt.Errorf("%w: %v", errUnknownAggregation, agg) + } +} + +// pipelines is the group of pipelines connecting Readers with instrument +// measurement. +type pipelines []*pipeline + +func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { + pipes := make([]*pipeline, 0, len(readers)) + for _, r := range readers { + p := newPipeline(res, r, views) + r.register(p) + pipes = append(pipes, p) + } + return pipes +} + +func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { + unregs := make([]func(), len(p)) + for i, pipe := range p { + unregs[i] = pipe.addMultiCallback(c) + } + return unregisterFuncs{f: unregs} +} + +type unregisterFuncs struct { + embedded.Registration + f []func() +} + +func (u unregisterFuncs) Unregister() error { + for _, f := range u.f { + f() + } + return nil +} + +// resolver facilitates resolving aggregate functions an instrument calls to +// aggregate measurements with while updating all pipelines that need to pull +// from those aggregations. +type resolver[N int64 | float64] struct { + inserters []*inserter[N] +} + +func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] { + in := make([]*inserter[N], len(p)) + for i := range in { + in[i] = newInserter[N](p[i], vc) + } + return resolver[N]{in} +} + +// Aggregators returns the Aggregators that must be updated by the instrument +// defined by key. +func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument +// defined by key. If boundaries were provided on instrument instantiation, those take precedence +// over boundaries provided by the reader. +func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { + var measures []aggregate.Measure[N] + + errs := &multierror{} + for _, i := range r.inserters { + agg := i.readerDefaultAggregation(id.Kind) + if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { + histAgg.Boundaries = boundaries + agg = histAgg + } + in, err := i.Instrument(id, agg) + if err != nil { + errs.append(err) + } + measures = append(measures, in...) + } + return measures, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + if m.wrapped == nil { + return errors.New(strings.Join(m.errors, "; ")) + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go new file mode 100644 index 000000000..a82af538e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +// MeterProvider handles the creation and coordination of Meters. All Meters +// created by a MeterProvider will be associated with the same Resource, have +// the same Views applied to them, and have their produced metric telemetry +// passed to the configured Readers. +type MeterProvider struct { + embedded.MeterProvider + + pipes pipelines + meters cache[instrumentation.Scope, *meter] + + forceFlush, shutdown func(context.Context) error + stopped atomic.Bool +} + +// Compile-time check MeterProvider implements metric.MeterProvider. +var _ metric.MeterProvider = (*MeterProvider)(nil) + +// NewMeterProvider returns a new and configured MeterProvider. +// +// By default, the returned MeterProvider is configured with the default +// Resource and no Readers. Readers cannot be added after a MeterProvider is +// created. This means the returned MeterProvider, one created with no +// Readers, will perform no operations. +func NewMeterProvider(options ...Option) *MeterProvider { + conf := newConfig(options) + flush, sdown := conf.readerSignals() + + mp := &MeterProvider{ + pipes: newPipelines(conf.res, conf.readers, conf.views), + forceFlush: flush, + shutdown: sdown, + } + // Log after creation so all readers show correctly they are registered. + global.Info("MeterProvider created", + "Resource", conf.res, + "Readers", conf.readers, + "Views", len(conf.views), + ) + return mp +} + +// Meter returns a Meter with the given name and configured with options. +// +// The name should be the name of the instrumentation scope creating +// telemetry. This name may be the same as the instrumented code only if that +// code provides built-in instrumentation. +// +// Calls to the Meter method after Shutdown has been called will return Meters +// that perform no operations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { + if name == "" { + global.Warn("Invalid Meter name.", "name", name) + } + + if mp.stopped.Load() { + return noop.Meter{} + } + + c := metric.NewMeterConfig(options...) + s := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + global.Info("Meter created", + "Name", s.Name, + "Version", s.Version, + "SchemaURL", s.SchemaURL, + ) + + return mp.meters.Lookup(s, func() *meter { + return newMeter(s, mp.pipes) + }) +} + +// ForceFlush flushes all pending telemetry. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// ForceFlush calls ForceFlush(context.Context) error +// on all Readers that implements this method. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) ForceFlush(ctx context.Context) error { + if mp.forceFlush != nil { + return mp.forceFlush(ctx) + } + return nil +} + +// Shutdown shuts down the MeterProvider flushing all pending telemetry and +// releasing any held computational resources. +// +// This call is idempotent. The first call will perform all flush and +// releasing operations. Subsequent calls will perform no action and will +// return an error stating this. +// +// Measurements made by instruments from meters this MeterProvider created +// will not be exported after Shutdown is called. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Shutdown(ctx context.Context) error { + // Even though it may seem like there is a synchronization issue between the + // call to `Store` and checking `shutdown`, the Go concurrency model ensures + // that is not the case, as all the atomic operations executed in a program + // behave as though executed in some sequentially consistent order. This + // definition provides the same semantics as C++'s sequentially consistent + // atomics and Java's volatile variables. + // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic. + + mp.stopped.Store(true) + if mp.shutdown != nil { + return mp.shutdown(ctx) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go new file mode 100644 index 000000000..d94bdee75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// errDuplicateRegister is logged by a Reader when an attempt to registered it +// more than once occurs. +var errDuplicateRegister = fmt.Errorf("duplicate reader registration") + +// ErrReaderNotRegistered is returned if Collect or Shutdown are called before +// the reader is registered with a MeterProvider. +var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") + +// ErrReaderShutdown is returned if Collect or Shutdown are called after a +// reader has been Shutdown once. +var ErrReaderShutdown = fmt.Errorf("reader is shutdown") + +// errNonPositiveDuration is logged when an environmental variable +// has non-positive value. +var errNonPositiveDuration = fmt.Errorf("non-positive duration") + +// Reader is the interface used between the SDK and an +// exporter. Control flow is bi-directional through the +// Reader, since the SDK initiates ForceFlush and Shutdown +// while the exporter initiates collection. The Register() method here +// informs the Reader that it can begin reading, signaling the +// start of bi-directional control flow. +// +// Typically, push-based exporters that are periodic will +// implement PeriodicExporter themselves and construct a +// PeriodicReader to satisfy this interface. +// +// Pull-based exporters will typically implement Register +// themselves, since they read on demand. +// +// Warning: methods may be added to this interface in minor releases. +type Reader interface { + // register registers a Reader with a MeterProvider. + // The producer argument allows the Reader to signal the sdk to collect + // and send aggregated metric measurements. + register(sdkProducer) + + // temporality reports the Temporality for the instrument kind provided. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + temporality(InstrumentKind) metricdata.Temporality + + // aggregation returns what Aggregation to use for an instrument kind. + // + // This method needs to be concurrent safe with itself and all the other + // Reader methods. + aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. + + // Collect gathers and returns all metric data related to the Reader from + // the SDK and stores it in out. An error is returned if this is called + // after Shutdown or if out is nil. + // + // This method needs to be concurrent safe, and the cancellation of the + // passed context is expected to be honored. + Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown flushes all metric measurements held in an export pipeline and releases any + // held computational resources. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + // + // After Shutdown is called, calls to Collect will perform no operation and instead will return + // an error indicating the shutdown state. + // + // This method needs to be concurrent safe. + Shutdown(context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// sdkProducer produces metrics for a Reader. +type sdkProducer interface { + // produce returns aggregated metrics from a single collection. + // + // This method is safe to call concurrently. + produce(context.Context, *metricdata.ResourceMetrics) error +} + +// Producer produces metrics for a Reader from an external source. +type Producer interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Produce returns aggregated metrics from an external source. + // + // This method should be safe to call concurrently. + Produce(context.Context) ([]metricdata.ScopeMetrics, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// produceHolder is used as an atomic.Value to wrap the non-concrete producer +// type. +type produceHolder struct { + produce func(context.Context, *metricdata.ResourceMetrics) error +} + +// shutdownProducer produces an ErrReaderShutdown error always. +type shutdownProducer struct{} + +// produce returns an ErrReaderShutdown error. +func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { + return ErrReaderShutdown +} + +// TemporalitySelector selects the temporality to use based on the InstrumentKind. +type TemporalitySelector func(InstrumentKind) metricdata.Temporality + +// DefaultTemporalitySelector is the default TemporalitySelector used if +// WithTemporalitySelector is not provided. CumulativeTemporality will be used +// for all instrument kinds if this TemporalitySelector is used. +func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +// AggregationSelector selects the aggregation and the parameters to use for +// that aggregation based on the InstrumentKind. +// +// If the Aggregation returned is nil or DefaultAggregation, the selection from +// DefaultAggregationSelector will be used. +type AggregationSelector func(InstrumentKind) Aggregation + +// DefaultAggregationSelector returns the default aggregation and parameters +// that will be used to summarize measurement made from an instrument of +// InstrumentKind. This AggregationSelector using the following selection +// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum, +// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue, +// Histogram ⇨ ExplicitBucketHistogram. +func DefaultAggregationSelector(ik InstrumentKind) Aggregation { + switch ik { + case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: + return AggregationSum{} + case InstrumentKindObservableGauge, InstrumentKindGauge: + return AggregationLastValue{} + case InstrumentKindHistogram: + return AggregationExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, + NoMinMax: false, + } + } + panic("unknown instrument kind") +} + +// ReaderOption is an option which can be applied to manual or Periodic +// readers. +type ReaderOption interface { + PeriodicReaderOption + ManualReaderOption +} + +// WithProducer registers producers as an external Producer of metric data +// for this Reader. +func WithProducer(p Producer) ReaderOption { + return producerOption{p: p} +} + +type producerOption struct { + p Producer +} + +// applyManual returns a manualReaderConfig with option applied. +func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.producers = append(c.producers, o.p) + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.producers = append(c.producers, o.p) + return c +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go new file mode 100644 index 000000000..44316caa1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go new file mode 100644 index 000000000..cd08c6732 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -0,0 +1,117 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "errors" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/global" +) + +var ( + errMultiInst = errors.New("name replacement for multiple instruments") + errEmptyView = errors.New("no criteria provided for view") + + emptyView = func(Instrument) (Stream, bool) { return Stream{}, false } +) + +// View is an override to the default behavior of the SDK. It defines how data +// should be collected for certain instruments. It returns true and the exact +// Stream to use for matching Instruments. Otherwise, if the view does not +// match, false is returned. +type View func(Instrument) (Stream, bool) + +// NewView returns a View that applies the Stream mask for all instruments that +// match criteria. The returned View will only apply mask if all non-zero-value +// fields of criteria match the corresponding Instrument passed to the view. If +// no criteria are provided, all field of criteria are their zero-values, a +// view that matches no instruments is returned. If you need to match a +// zero-value field, create a View directly. +// +// The Name field of criteria supports wildcard pattern matching. The "*" +// wildcard is recognized as matching zero or more characters, and "?" is +// recognized as matching exactly one character. For example, a pattern of "*" +// matches all instrument names. +// +// The Stream mask only applies updates for non-zero-value fields. By default, +// the Instrument the View matches against will be use for the Name, +// Description, and Unit of the returned Stream and no Aggregation or +// AttributeFilter are set. All non-zero-value fields of mask are used instead +// of the default. If you need to zero out an Stream field returned from a +// View, create a View directly. +func NewView(criteria Instrument, mask Stream) View { + if criteria.IsEmpty() { + global.Error( + errEmptyView, "dropping view", + "mask", mask, + ) + return emptyView + } + + var matchFunc func(Instrument) bool + if strings.ContainsAny(criteria.Name, "*?") { + if mask.Name != "" { + global.Error( + errMultiInst, "dropping view", + "criteria", criteria, + "mask", mask, + ) + return emptyView + } + + // Handle branching here in NewView instead of criteria.matches so + // criteria.matches remains inlinable for the simple case. + pattern := regexp.QuoteMeta(criteria.Name) + pattern = "^" + pattern + "$" + pattern = strings.ReplaceAll(pattern, `\?`, ".") + pattern = strings.ReplaceAll(pattern, `\*`, ".*") + re := regexp.MustCompile(pattern) + matchFunc = func(i Instrument) bool { + return re.MatchString(i.Name) && + criteria.matchesDescription(i) && + criteria.matchesKind(i) && + criteria.matchesUnit(i) && + criteria.matchesScope(i) + } + } else { + matchFunc = criteria.matches + } + + var agg Aggregation + if mask.Aggregation != nil { + agg = mask.Aggregation.copy() + if err := agg.err(); err != nil { + global.Error( + err, "not using aggregation with view", + "criteria", criteria, + "mask", mask, + ) + agg = nil + } + } + + return func(i Instrument) (Stream, bool) { + if matchFunc(i) { + return Stream{ + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + }, true + } + return Stream{}, false + } +} + +// nonZero returns v if it is non-zero-valued, otherwise alt. +func nonZero[T comparable](v, alt T) T { + var zero T + if v != zero { + return v + } + return alt +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 000000000..4ad864d71 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 000000000..95a61d61d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 000000000..6ac1cdbf7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 000000000..0d6e213d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 000000000..5ecd859a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 000000000..64939a271 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 000000000..813f05624 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 000000000..2d0f65498 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 000000000..cc8b8938e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 000000000..b09fde3b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 000000000..d9e5d1a8f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 000000000..f84f17324 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 000000000..6354b3560 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 000000000..df12c44c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 000000000..71386e2da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 000000000..8a48ab4fa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 000000000..ce455dc54 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 000000000..f537e5ca5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 000000000..a6ff26a4d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 000000000..a77742b07 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 000000000..5e3d199d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 000000000..085fe68fd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 000000000..ad4b50df4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md new file mode 100644 index 000000000..f2936e143 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md @@ -0,0 +1,3 @@ +# SDK Trace + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 000000000..1d399a75d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,409 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchSpanProcessorOption configures a BatchSpanProcessor. +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} + stopped atomic.Bool +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will perform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans after Shutdown. + if bsp.stopped.Load() { + return + } + + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + bsp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + // Interrupt if context is already canceled. + if err := ctx.Err(); err != nil { + return err + } + + // Do nothing after Shutdown. + if bsp.stopped.Load() { + return nil + } + + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { + case <-bsp.stopCh: + // The batchSpanProcessor is Shutdown. + return nil + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + <-bsp.timer.C + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if _, ok := sd.(forceFlushSpan); ok { + // Ignore flush requests as they are not valid spans. + continue + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + // There are no more enqueued spans. Make final export. + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 000000000..1f60524e3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 000000000..60a7ed134 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 000000000..821c83faa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue[T any] struct { + queue []T + capacity int + droppedCount int + logDropped func() +} + +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue[T]) add(value T) { + if eq.capacity == 0 { + eq.droppedCount++ + eq.logDropped() + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + eq.logDropped() + } + eq.queue = append(eq.queue, value) +} + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 000000000..925bcf993 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 000000000..c03bdc90f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 000000000..14c2e5beb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,493 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig. +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this Provider. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. +type TracerProvider struct { + embedded.TracerProvider + + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer + spanProcessors atomic.Pointer[spanProcessorStates] + + isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Scope]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + global.Info("TracerProvider created", "config", o) + + spss := make(spanProcessorStates, 0, len(o.processors)) + for _, sp := range o.processors { + spss = append(spss, newSpanProcessorState(sp)) + } + tp.spanProcessors.Store(&spss) + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...) + } + c := trace.NewTracerConfig(opts...) + if name == "" { + name = defaultTracerName + } + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + t, ok := func() (trace.Tracer, bool) { + p.mu.Lock() + defer p.mu.Unlock() + // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran + // after the first check above but before we acquired the mutex. + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...), true + } + t, ok := p.namedTracer[is] + if !ok { + t = &tracer{ + provider: p, + instrumentationScope: is, + } + p.namedTracer[is] = t + } + return t, ok + }() + if !ok { + // This code is outside the mutex to not hold the lock while calling third party logging code: + // - That code may do slow things like I/O, which would prolong the duration the lock is held, + // slowing down all tracing consumers. + // - Logging code may be instrumented with tracing and deadlock because it could try + // acquiring the same non-reentrant mutex. + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + + current := p.getSpanProcessors() + newSPS := make(spanProcessorStates, 0, len(current)+1) + newSPS = append(newSPS, current...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(&newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + old := p.getSpanProcessors() + if len(old) == 0 { + return + } + spss := make(spanProcessorStates, len(old)) + copy(spss, old) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(&spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss := p.getSpanProcessors() + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. +// After Shutdown is called, all methods are no-ops. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + // This check prevents deadlocks in case of recursive shutdown. + if p.isShutdown.Load() { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown has already been done concurrently. + if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? + return nil + } + + var retErr error + for _, sps := range p.getSpanProcessors() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%w; %w", retErr, err) + } + } + } + p.spanProcessors.Store(&spanProcessorStates{}) + return retErr +} + +func (p *TracerProvider) getSpanProcessors() spanProcessorStates { + return *(p.spanProcessors.Load()) +} + +// TracerProviderOption configures a TracerProvider. +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 000000000..d2d1f7246 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 000000000..ebb6df6c9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,282 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions. +const ( + // Drop will not record the span and all attributes/events will be dropped. + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set. + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set. + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +// +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a sampler decorator which behaves differently, +// based on the parent of the span. If the span has no parent, +// the decorated sampler is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 000000000..554111bb4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.Mutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor makes it good for testing, debugging, or showing +// examples of other features, but it will be slow and have a high computation +// resource usage overhead. The BatchSpanProcessor is recommended for production +// use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") + + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.Lock() + defer ssp.exporterMu.Unlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a potential + // deadlock. If the exporter shut down operation generates a span, that + // span would need to be exported. Meaning, OnEnd would be called and + // try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down and + // the context to be done simultaneously. In that case this outer + // select statement will randomly choose a case. This will result in + // a different returned error for similar scenarios. Instead, double + // check if the exporter shut down at the same time and return that + // error if so. This will ensure consistency as well as ensure + // the caller knows the exporter shut down successfully (they can + // already determine if the deadline is expired given they passed + // the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent +// this Span Processor. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 000000000..d511d0f27 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility + return s.instrumentationScope +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 000000000..4945f5083 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,846 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "slices" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + // Deprecated: please use InstrumentationScope instead. + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + embedded.Span + + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + logDropAttrsOnce sync.Once + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue[Event] + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue[Link] + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var ( + _ ReadWriteSpan = (*recordingSpan)(nil) + _ runtimeTracer = (*recordingSpan)(nil) +) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.status = status +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.addDroppedAttr(len(attributes)) + return + } + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + } +} + +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the limit. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + sum := len(attrs) + len(s.attributes) + s.attributes = slices.Grow(s.attributes, min(sum, limit)) + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= limit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.addDroppedAttr(1) + } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is performed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := monotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionType(typeStr(recovered)), + semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + sps := s.tracer.provider.getSpanProcessors() + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } +} + +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.IsRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if !s.IsRecording() { + return + } + s.addEvent(name, o...) +} + +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] + } + + s.mu.Lock() + s.events.add(e) + s.mu.Unlock() +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := (*record)[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + (*record)[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.links.copy() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.events.copy() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) AddLink(link trace.Link) { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { + return + } + + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] + } + + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationScope = s.tracer.instrumentationScope + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.events.copy() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.links.copy() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) addChild() { + if !s.IsRecording() { + return + } + s.mu.Lock() + s.childSpanCount++ + s.mu.Unlock() +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + embedded.Span + + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 000000000..6bdda3d94 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 000000000..bec5e2097 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 000000000..af7f9177f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state sync.Once +} + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp} +} + +type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 000000000..43419d3b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +type tracer struct { + embedded.Tracer + + provider *TracerProvider + instrumentationScope instrumentation.Scope +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps := tr.provider.getSpanProcessors() + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.AddLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go new file mode 100644 index 000000000..b84dd2c5e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.16.0-rc.1" +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 000000000..b7cede891 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.29.0" +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md new file mode 100644 index 000000000..bc60aa603 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.21.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.21.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.21.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go new file mode 100644 index 000000000..a9a15a1da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go @@ -0,0 +1,1866 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - unix domain + // socket name, IPv4 or IPv6 address. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/tmp/my.sock', '10.1.2.80' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent client address behind + // any intermediaries (e.g. proxies) if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent client port behind any + // intermediaries (e.g. proxies) if it's available. + ClientPortKey = attribute.Key("client.port") + + // ClientSocketAddressKey is the attribute Key conforming to the + // "client.socket.address" semantic conventions. It represents the + // immediate client peer address - unix domain socket name, IPv4 or IPv6 + // address. + // + // Type: string + // RequirementLevel: Recommended (If different than `client.address`.) + // Stability: stable + // Examples: '/tmp/my.sock', '127.0.0.1' + ClientSocketAddressKey = attribute.Key("client.socket.address") + + // ClientSocketPortKey is the attribute Key conforming to the + // "client.socket.port" semantic conventions. It represents the immediate + // client peer port number + // + // Type: int + // RequirementLevel: Recommended (If different than `client.port`.) + // Stability: stable + // Examples: 35555 + ClientSocketPortKey = attribute.Key("client.socket.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// unix domain socket name, IPv4 or IPv6 address. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// ClientSocketAddress returns an attribute KeyValue conforming to the +// "client.socket.address" semantic conventions. It represents the immediate +// client peer address - unix domain socket name, IPv4 or IPv6 address. +func ClientSocketAddress(val string) attribute.KeyValue { + return ClientSocketAddressKey.String(val) +} + +// ClientSocketPort returns an attribute KeyValue conforming to the +// "client.socket.port" semantic conventions. It represents the immediate +// client peer port number +func ClientSocketPort(val int) attribute.KeyValue { + return ClientSocketPortKey.Int(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the deprecated, use + // `http.request.method` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the deprecated, + // use `http.response.status_code` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the deprecated, use `url.scheme` + // instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the deprecated, use `url.full` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + HTTPURLKey = attribute.Key("http.url") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the deprecated, use `url.path` and + // `url.query` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // deprecated, use `http.request.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // deprecated, use `http.response.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the deprecated, use +// `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the deprecated, use +// `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the deprecated, use `url.scheme` +// instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the deprecated, use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the deprecated, use `url.path` and +// `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the +// deprecated, use `http.request.body.size` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the +// deprecated, use `http.response.body.size` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the deprecated, + // use `server.socket.domain` on client spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address` on client spans and `client.socket.address` + // on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the deprecated, + // use `server.socket.port` on client spans and `client.socket.port` on + // server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the deprecated, use `server.address` + // on client spans and `client.address` on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the deprecated, use `server.port` on + // client spans and `client.port` on server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the deprecated, use + // `server.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the deprecated, use `server.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the deprecated, + // use `server.socket.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the deprecated, use + // `network.transport`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the deprecated, + // use `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the + // deprecated, use `network.protocol.version`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the deprecated, + // use `network.transport` and `network.type`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetSockFamilyKey = attribute.Key("net.sock.family") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the deprecated, use +// `server.socket.domain` on client spans. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address` on client spans and `client.socket.address` on +// server spans. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the deprecated, use +// `server.socket.port` on client spans and `client.socket.port` on server +// spans. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the deprecated, use +// `server.address` on client spans and `client.address` on server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the deprecated, use +// `server.port` on client spans and `client.port` on server spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the deprecated, use +// `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the deprecated, use +// `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the deprecated, use +// `server.socket.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the deprecated, use +// `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the deprecated, +// use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // DestinationDomainKey is the attribute Key conforming to the + // "destination.domain" semantic conventions. It represents the domain name + // of the destination system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + DestinationDomainKey = attribute.Key("destination.domain") + + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the peer + // address, for example IP address or UNIX socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the peer port + // number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationDomain returns an attribute KeyValue conforming to the +// "destination.domain" semantic conventions. It represents the domain name of +// the destination system. +func DestinationDomain(val string) attribute.KeyValue { + return DestinationDomainKey.String(val) +} + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the peer address, +// for example IP address or UNIX socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the peer port number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes HTTP attributes. +const ( + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER` and, except if reporting + // a metric, MUST + // set the exact method received in the request line as value of the + // `http.request.method_original` attribute. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTP Server attributes +const ( + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // TypeKey is the attribute Key conforming to the "type" semantic + // conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + TypeKey = attribute.Key("type") + + // PoolKey is the attribute Key conforming to the "pool" semantic + // conventions. It represents the name of the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + PoolKey = attribute.Key("pool") +) + +var ( + // Heap memory + TypeHeap = TypeKey.String("heap") + // Non-heap memory + TypeNonHeap = TypeKey.String("non_heap") +) + +// Pool returns an attribute KeyValue conforming to the "pool" semantic +// conventions. It represents the name of the memory pool. +func Pool(val string) attribute.KeyValue { + return PoolKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the logical server hostname, matches + // server FQDN if available, and IP or socket address if FQDN is not known. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the logical server port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + ServerPortKey = attribute.Key("server.port") + + // ServerSocketDomainKey is the attribute Key conforming to the + // "server.socket.domain" semantic conventions. It represents the domain + // name of an immediate peer. + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: 'proxy.example.com' + // Note: Typically observed from the client side, and represents a proxy or + // other intermediary domain name. + ServerSocketDomainKey = attribute.Key("server.socket.domain") + + // ServerSocketAddressKey is the attribute Key conforming to the + // "server.socket.address" semantic conventions. It represents the physical + // server IP address or Unix socket address. If set from the client, should + // simply use the socket's peer address, and not attempt to find any actual + // server IP (i.e., if set from client, this may represent some proxy + // server instead of the logical server). + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: '10.5.3.2' + ServerSocketAddressKey = attribute.Key("server.socket.address") + + // ServerSocketPortKey is the attribute Key conforming to the + // "server.socket.port" semantic conventions. It represents the physical + // server port. + // + // Type: int + // RequirementLevel: Recommended (If different than `server.port`.) + // Stability: stable + // Examples: 16456 + ServerSocketPortKey = attribute.Key("server.socket.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the logical server +// hostname, matches server FQDN if available, and IP or socket address if FQDN +// is not known. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the logical server port number +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// ServerSocketDomain returns an attribute KeyValue conforming to the +// "server.socket.domain" semantic conventions. It represents the domain name +// of an immediate peer. +func ServerSocketDomain(val string) attribute.KeyValue { + return ServerSocketDomainKey.String(val) +} + +// ServerSocketAddress returns an attribute KeyValue conforming to the +// "server.socket.address" semantic conventions. It represents the physical +// server IP address or Unix socket address. If set from the client, should +// simply use the socket's peer address, and not attempt to find any actual +// server IP (i.e., if set from client, this may represent some proxy server +// instead of the logical server). +func ServerSocketAddress(val string) attribute.KeyValue { + return ServerSocketAddressKey.String(val) +} + +// ServerSocketPort returns an attribute KeyValue conforming to the +// "server.socket.port" semantic conventions. It represents the physical server +// port. +func ServerSocketPort(val int) attribute.KeyValue { + return ServerSocketPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // SourceDomainKey is the attribute Key conforming to the "source.domain" + // semantic conventions. It represents the domain name of the source + // system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + SourceDomainKey = attribute.Key("source.domain") + + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address, for example IP + // address or Unix socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceDomain returns an attribute KeyValue conforming to the +// "source.domain" semantic conventions. It represents the domain name of the +// source system. +func SourceDomain(val string) attribute.KeyValue { + return SourceDomainKey.String(val) +} + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address, for +// example IP address or Unix socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // Transport Layer](https://osi-model.com/transport-layer/) or + // [Inter-process Communication + // method](https://en.wikipedia.org/wiki/Inter-process_communication). The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI Network + // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + NetworkTypeKey = attribute.Key("network.type") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // Application Layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. See note below + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// Application Layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. The value SHOULD be normalized to lowercase. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's different + // than `http.request.method`.) + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") +) + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Attributes describing URL. +const ( + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it should be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password should be redacted and attribute's value should be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: When missing, the value is assumed to be `/` + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") +) + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go new file mode 100644 index 000000000..461331a55 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.21.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go new file mode 100644 index 000000000..c09d9317e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go new file mode 100644 index 000000000..5184ee339 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go new file mode 100644 index 000000000..f7aaa50b9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go @@ -0,0 +1,2299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") + + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") +) + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // OCI defines a digest of manifest. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S does not have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go new file mode 100644 index 000000000..be07217d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go new file mode 100644 index 000000000..55698cc44 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go @@ -0,0 +1,2484 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended (If a client id is available) + // Stability: stable + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 000000000..2de1fc3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 000000000..d8dc822b2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 000000000..d031bbea7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 000000000..bfaee0d56 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 000000000..fcdb9f485 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 000000000..4c87c7adc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 000000000..6836c6547 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 000000000..58ccaba69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 000000000..273d58e00 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 000000000..8c45a7107 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpanInstance + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpanInstance +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 000000000..cdbf41d6d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a transitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 000000000..7754a239e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..3e359a00b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 000000000..c00221e7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 000000000..ca20e9997 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpanInstance + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var noopSpanInstance Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 000000000..cd382c82a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 000000000..64a4f1b36 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 000000000..ef85cb70c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 000000000..d3aa476ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 000000000..d49adf671 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "encoding/hex" + "encoding/json" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 000000000..77952d2a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 000000000..dc5e34cad --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,330 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 000000000..1e87855ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 000000000..c9b7cdbbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 000000000..6d3c7b1f4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.31.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 000000000..cdebdb5eb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,49 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +module-sets: + stable-v1: + version: v1.31.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.53.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/prometheus + experimental-logs: + version: v0.7.0 + modules: + - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog + experimental-schema: + version: v0.0.10 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go new file mode 100644 index 000000000..978efdde9 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.go @@ -0,0 +1,367 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/logs/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExportLogsServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceLogs []*v1.ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` +} + +func (x *ExportLogsServiceRequest) Reset() { + *x = ExportLogsServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportLogsServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportLogsServiceRequest) ProtoMessage() {} + +func (x *ExportLogsServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportLogsServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportLogsServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportLogsServiceRequest) GetResourceLogs() []*v1.ResourceLogs { + if x != nil { + return x.ResourceLogs + } + return nil +} + +type ExportLogsServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportLogsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` +} + +func (x *ExportLogsServiceResponse) Reset() { + *x = ExportLogsServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportLogsServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportLogsServiceResponse) ProtoMessage() {} + +func (x *ExportLogsServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportLogsServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportLogsServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ExportLogsServiceResponse) GetPartialSuccess() *ExportLogsPartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportLogsPartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected log records. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedLogRecords int64 `protobuf:"varint,1,opt,name=rejected_log_records,json=rejectedLogRecords,proto3" json:"rejected_log_records,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportLogsPartialSuccess) Reset() { + *x = ExportLogsPartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportLogsPartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportLogsPartialSuccess) ProtoMessage() {} + +func (x *ExportLogsPartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportLogsPartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportLogsPartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportLogsPartialSuccess) GetRejectedLogRecords() int64 { + if x != nil { + return x.RejectedLogRecords + } + return 0 +} + +func (x *ExportLogsPartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_opentelemetry_proto_collector_logs_v1_logs_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, + 0x31, 0x1a, 0x26, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, + 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x18, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, + 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x71, 0x0a, + 0x18, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x32, 0x9d, 0x01, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x8d, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x98, 0x01, 0x0a, 0x28, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x4c, + 0x6f, 0x67, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, + 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6c, 0x6f, 0x67, 0x73, + 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x25, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData = file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes = []interface{}{ + (*ExportLogsServiceRequest)(nil), // 0: opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest + (*ExportLogsServiceResponse)(nil), // 1: opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse + (*ExportLogsPartialSuccess)(nil), // 2: opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess + (*v1.ResourceLogs)(nil), // 3: opentelemetry.proto.logs.v1.ResourceLogs +} +var file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest.resource_logs:type_name -> opentelemetry.proto.logs.v1.ResourceLogs + 2, // 1: opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess + 0, // 2: opentelemetry.proto.collector.logs.v1.LogsService.Export:input_type -> opentelemetry.proto.collector.logs.v1.ExportLogsServiceRequest + 1, // 3: opentelemetry.proto.collector.logs.v1.LogsService.Export:output_type -> opentelemetry.proto.collector.logs.v1.ExportLogsServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_logs_v1_logs_service_proto_init() } +func file_opentelemetry_proto_collector_logs_v1_logs_service_proto_init() { + if File_opentelemetry_proto_collector_logs_v1_logs_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportLogsServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportLogsServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportLogsPartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_logs_v1_logs_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_logs_v1_logs_service_proto = out.File + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_goTypes = nil + file_opentelemetry_proto_collector_logs_v1_logs_service_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go new file mode 100644 index 000000000..d34f32d91 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client LogsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_LogsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server LogsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportLogsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterLogsServiceHandlerServer registers the http handlers for service LogsService to "mux". +// UnaryRPC :call LogsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLogsServiceHandlerFromEndpoint instead. +func RegisterLogsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server LogsServiceServer) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", runtime.WithHTTPPathPattern("/v1/logs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_LogsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterLogsServiceHandlerFromEndpoint is same as RegisterLogsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLogsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLogsServiceHandler(ctx, mux, conn) +} + +// RegisterLogsServiceHandler registers the http handlers for service LogsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLogsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLogsServiceHandlerClient(ctx, mux, NewLogsServiceClient(conn)) +} + +// RegisterLogsServiceHandlerClient registers the http handlers for service LogsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LogsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LogsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LogsServiceClient" to call the correct interceptors. +func RegisterLogsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LogsServiceClient) error { + + mux.Handle("POST", pattern_LogsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", runtime.WithHTTPPathPattern("/v1/logs")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_LogsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_LogsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_LogsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "logs"}, "")) +) + +var ( + forward_LogsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go new file mode 100644 index 000000000..e1b7c457c --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/logs/v1/logs_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.21.6 +// source: opentelemetry/proto/collector/logs/v1/logs_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LogsServiceClient is the client API for LogsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LogsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) +} + +type logsServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewLogsServiceClient(cc grpc.ClientConnInterface) LogsServiceClient { + return &logsServiceClient{cc} +} + +func (c *logsServiceClient) Export(ctx context.Context, in *ExportLogsServiceRequest, opts ...grpc.CallOption) (*ExportLogsServiceResponse, error) { + out := new(ExportLogsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.logs.v1.LogsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LogsServiceServer is the server API for LogsService service. +// All implementations must embed UnimplementedLogsServiceServer +// for forward compatibility +type LogsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) + mustEmbedUnimplementedLogsServiceServer() +} + +// UnimplementedLogsServiceServer must be embedded to have forward compatible implementations. +type UnimplementedLogsServiceServer struct { +} + +func (UnimplementedLogsServiceServer) Export(context.Context, *ExportLogsServiceRequest) (*ExportLogsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedLogsServiceServer) mustEmbedUnimplementedLogsServiceServer() {} + +// UnsafeLogsServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LogsServiceServer will +// result in compilation errors. +type UnsafeLogsServiceServer interface { + mustEmbedUnimplementedLogsServiceServer() +} + +func RegisterLogsServiceServer(s grpc.ServiceRegistrar, srv LogsServiceServer) { + s.RegisterService(&LogsService_ServiceDesc, srv) +} + +func _LogsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportLogsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LogsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LogsServiceServer).Export(ctx, req.(*ExportLogsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// LogsService_ServiceDesc is the grpc.ServiceDesc for LogsService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var LogsService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.logs.v1.LogsService", + HandlerType: (*LogsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _LogsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/logs/v1/logs_service.proto", +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go new file mode 100644 index 000000000..8e13d157c --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go @@ -0,0 +1,371 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/metrics/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExportMetricsServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` +} + +func (x *ExportMetricsServiceRequest) Reset() { + *x = ExportMetricsServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportMetricsServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportMetricsServiceRequest) ProtoMessage() {} + +func (x *ExportMetricsServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportMetricsServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics { + if x != nil { + return x.ResourceMetrics + } + return nil +} + +type ExportMetricsServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` +} + +func (x *ExportMetricsServiceResponse) Reset() { + *x = ExportMetricsServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportMetricsServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportMetricsServiceResponse) ProtoMessage() {} + +func (x *ExportMetricsServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportMetricsServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ExportMetricsServiceResponse) GetPartialSuccess() *ExportMetricsPartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportMetricsPartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected data points. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportMetricsPartialSuccess) Reset() { + *x = ExportMetricsPartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportMetricsPartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportMetricsPartialSuccess) ProtoMessage() {} + +func (x *ExportMetricsPartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportMetricsPartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 { + if x != nil { + return x.RejectedDataPoints + } + return 0 +} + +func (x *ExportMetricsPartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = []byte{ + 0x0a, 0x3e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xac, 0x01, 0x0a, 0x0e, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x99, 0x01, + 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x46, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xa4, 0x01, 0x0a, 0x2b, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, + 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x28, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = []interface{}{ + (*ExportMetricsServiceRequest)(nil), // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest + (*ExportMetricsServiceResponse)(nil), // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse + (*ExportMetricsPartialSuccess)(nil), // 2: opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess + (*v1.ResourceMetrics)(nil), // 3: opentelemetry.proto.metrics.v1.ResourceMetrics +} +var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics + 2, // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess + 0, // 2: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:input_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest + 1, // 3: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:output_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() } +func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() { + if File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportMetricsServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportMetricsServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportMetricsPartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto = out.File + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = nil + file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 000000000..c42f1454e --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportMetricsServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux". +// UnaryRPC :call MetricsServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead. +func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_MetricsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "")) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go new file mode 100644 index 000000000..31d25fc15 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.21.6 +// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) +} + +type metricsServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) { + out := new(ExportMetricsServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +// All implementations must embed UnimplementedMetricsServiceServer +// for forward compatibility +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) + mustEmbedUnimplementedMetricsServiceServer() +} + +// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (UnimplementedMetricsServiceServer) Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {} + +// UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MetricsServiceServer will +// result in compilation errors. +type UnsafeMetricsServiceServer interface { + mustEmbedUnimplementedMetricsServiceServer() +} + +func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) { + s.RegisterService(&MetricsService_ServiceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportMetricsServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MetricsServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// MetricsService_ServiceDesc is the grpc.ServiceDesc for MetricsService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MetricsService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _MetricsService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto", +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go new file mode 100644 index 000000000..c1af04e84 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,367 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/trace/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExportTraceServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *ExportTraceServiceRequest) Reset() { + *x = ExportTraceServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceRequest) ProtoMessage() {} + +func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` +} + +func (x *ExportTraceServiceResponse) Reset() { + *x = ExportTraceServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceResponse) ProtoMessage() {} + +func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f, + 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ + (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans +} +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { + if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 000000000..bb1bd261e --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go new file mode 100644 index 000000000..dd1b73f1e --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +// All implementations must embed UnimplementedTraceServiceServer +// for forward compatibility +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) + mustEmbedUnimplementedTraceServiceServer() +} + +// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {} + +// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TraceServiceServer will +// result in compilation errors. +type UnsafeTraceServiceServer interface { + mustEmbedUnimplementedTraceServiceServer() +} + +func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { + s.RegisterService(&TraceService_ServiceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TraceService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go new file mode 100644 index 000000000..852209b09 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -0,0 +1,630 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + // + // Types that are assignable to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + // *AnyValue_BytesValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +func (x *AnyValue) Reset() { + *x = AnyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyValue) ProtoMessage() {} + +func (x *AnyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead. +func (*AnyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0} +} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *AnyValue) GetStringValue() string { + if x, ok := x.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *AnyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *AnyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *AnyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (x *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +func (x *AnyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*AnyValue_BytesValue); ok { + return x.BytesValue + } + return nil +} + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"` +} + +type AnyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} + +func (*AnyValue_BoolValue) isAnyValue_Value() {} + +func (*AnyValue_IntValue) isAnyValue_Value() {} + +func (*AnyValue_DoubleValue) isAnyValue_Value() {} + +func (*AnyValue_ArrayValue) isAnyValue_Value() {} + +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (*AnyValue_BytesValue) isAnyValue_Value() {} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ArrayValue) Reset() { + *x = ArrayValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArrayValue) ProtoMessage() {} + +func (x *ArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead. +func (*ArrayValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1} +} + +func (x *ArrayValue) GetValues() []*AnyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *KeyValueList) Reset() { + *x = KeyValueList{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueList) ProtoMessage() {} + +func (x *KeyValueList) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead. +func (*KeyValueList) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2} +} + +func (x *KeyValueList) GetValues() []*KeyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() *AnyValue { + if x != nil { + return x.Value + } + return nil +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +type InstrumentationScope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An empty instrumentation scope name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *InstrumentationScope) Reset() { + *x = InstrumentationScope{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstrumentationScope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstrumentationScope) ProtoMessage() {} + +func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. +func (*InstrumentationScope) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} +} + +func (x *InstrumentationScope) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InstrumentationScope) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *InstrumentationScope) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, + 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, + 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, + 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once + file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc +) + +func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData) + }) + return file_opentelemetry_proto_common_v1_common_proto_rawDescData +} + +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope +} +var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue + 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList + 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue + 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_common_v1_common_proto_init() } +func file_opentelemetry_proto_common_v1_common_proto_init() { + if File_opentelemetry_proto_common_v1_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArrayValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstrumentationScope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + (*AnyValue_BytesValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes, + }.Build() + File_opentelemetry_proto_common_v1_common_proto = out.File + file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil + file_opentelemetry_proto_common_v1_common_proto_goTypes = nil + file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go new file mode 100644 index 000000000..e0246bb07 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go @@ -0,0 +1,847 @@ +// Copyright 2020, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/logs/v1/logs.proto + +package v1 + +import ( + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Possible values for LogRecord.SeverityNumber. +type SeverityNumber int32 + +const ( + // UNSPECIFIED is the default SeverityNumber, it MUST NOT be used. + SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED SeverityNumber = 0 + SeverityNumber_SEVERITY_NUMBER_TRACE SeverityNumber = 1 + SeverityNumber_SEVERITY_NUMBER_TRACE2 SeverityNumber = 2 + SeverityNumber_SEVERITY_NUMBER_TRACE3 SeverityNumber = 3 + SeverityNumber_SEVERITY_NUMBER_TRACE4 SeverityNumber = 4 + SeverityNumber_SEVERITY_NUMBER_DEBUG SeverityNumber = 5 + SeverityNumber_SEVERITY_NUMBER_DEBUG2 SeverityNumber = 6 + SeverityNumber_SEVERITY_NUMBER_DEBUG3 SeverityNumber = 7 + SeverityNumber_SEVERITY_NUMBER_DEBUG4 SeverityNumber = 8 + SeverityNumber_SEVERITY_NUMBER_INFO SeverityNumber = 9 + SeverityNumber_SEVERITY_NUMBER_INFO2 SeverityNumber = 10 + SeverityNumber_SEVERITY_NUMBER_INFO3 SeverityNumber = 11 + SeverityNumber_SEVERITY_NUMBER_INFO4 SeverityNumber = 12 + SeverityNumber_SEVERITY_NUMBER_WARN SeverityNumber = 13 + SeverityNumber_SEVERITY_NUMBER_WARN2 SeverityNumber = 14 + SeverityNumber_SEVERITY_NUMBER_WARN3 SeverityNumber = 15 + SeverityNumber_SEVERITY_NUMBER_WARN4 SeverityNumber = 16 + SeverityNumber_SEVERITY_NUMBER_ERROR SeverityNumber = 17 + SeverityNumber_SEVERITY_NUMBER_ERROR2 SeverityNumber = 18 + SeverityNumber_SEVERITY_NUMBER_ERROR3 SeverityNumber = 19 + SeverityNumber_SEVERITY_NUMBER_ERROR4 SeverityNumber = 20 + SeverityNumber_SEVERITY_NUMBER_FATAL SeverityNumber = 21 + SeverityNumber_SEVERITY_NUMBER_FATAL2 SeverityNumber = 22 + SeverityNumber_SEVERITY_NUMBER_FATAL3 SeverityNumber = 23 + SeverityNumber_SEVERITY_NUMBER_FATAL4 SeverityNumber = 24 +) + +// Enum value maps for SeverityNumber. +var ( + SeverityNumber_name = map[int32]string{ + 0: "SEVERITY_NUMBER_UNSPECIFIED", + 1: "SEVERITY_NUMBER_TRACE", + 2: "SEVERITY_NUMBER_TRACE2", + 3: "SEVERITY_NUMBER_TRACE3", + 4: "SEVERITY_NUMBER_TRACE4", + 5: "SEVERITY_NUMBER_DEBUG", + 6: "SEVERITY_NUMBER_DEBUG2", + 7: "SEVERITY_NUMBER_DEBUG3", + 8: "SEVERITY_NUMBER_DEBUG4", + 9: "SEVERITY_NUMBER_INFO", + 10: "SEVERITY_NUMBER_INFO2", + 11: "SEVERITY_NUMBER_INFO3", + 12: "SEVERITY_NUMBER_INFO4", + 13: "SEVERITY_NUMBER_WARN", + 14: "SEVERITY_NUMBER_WARN2", + 15: "SEVERITY_NUMBER_WARN3", + 16: "SEVERITY_NUMBER_WARN4", + 17: "SEVERITY_NUMBER_ERROR", + 18: "SEVERITY_NUMBER_ERROR2", + 19: "SEVERITY_NUMBER_ERROR3", + 20: "SEVERITY_NUMBER_ERROR4", + 21: "SEVERITY_NUMBER_FATAL", + 22: "SEVERITY_NUMBER_FATAL2", + 23: "SEVERITY_NUMBER_FATAL3", + 24: "SEVERITY_NUMBER_FATAL4", + } + SeverityNumber_value = map[string]int32{ + "SEVERITY_NUMBER_UNSPECIFIED": 0, + "SEVERITY_NUMBER_TRACE": 1, + "SEVERITY_NUMBER_TRACE2": 2, + "SEVERITY_NUMBER_TRACE3": 3, + "SEVERITY_NUMBER_TRACE4": 4, + "SEVERITY_NUMBER_DEBUG": 5, + "SEVERITY_NUMBER_DEBUG2": 6, + "SEVERITY_NUMBER_DEBUG3": 7, + "SEVERITY_NUMBER_DEBUG4": 8, + "SEVERITY_NUMBER_INFO": 9, + "SEVERITY_NUMBER_INFO2": 10, + "SEVERITY_NUMBER_INFO3": 11, + "SEVERITY_NUMBER_INFO4": 12, + "SEVERITY_NUMBER_WARN": 13, + "SEVERITY_NUMBER_WARN2": 14, + "SEVERITY_NUMBER_WARN3": 15, + "SEVERITY_NUMBER_WARN4": 16, + "SEVERITY_NUMBER_ERROR": 17, + "SEVERITY_NUMBER_ERROR2": 18, + "SEVERITY_NUMBER_ERROR3": 19, + "SEVERITY_NUMBER_ERROR4": 20, + "SEVERITY_NUMBER_FATAL": 21, + "SEVERITY_NUMBER_FATAL2": 22, + "SEVERITY_NUMBER_FATAL3": 23, + "SEVERITY_NUMBER_FATAL4": 24, + } +) + +func (x SeverityNumber) Enum() *SeverityNumber { + p := new(SeverityNumber) + *p = x + return p +} + +func (x SeverityNumber) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SeverityNumber) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[0].Descriptor() +} + +func (SeverityNumber) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[0] +} + +func (x SeverityNumber) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SeverityNumber.Descriptor instead. +func (SeverityNumber) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{0} +} + +// LogRecordFlags represents constants used to interpret the +// LogRecord.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) +// +type LogRecordFlags int32 + +const ( + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + LogRecordFlags_LOG_RECORD_FLAGS_DO_NOT_USE LogRecordFlags = 0 + // Bits 0-7 are used for trace flags. + LogRecordFlags_LOG_RECORD_FLAGS_TRACE_FLAGS_MASK LogRecordFlags = 255 +) + +// Enum value maps for LogRecordFlags. +var ( + LogRecordFlags_name = map[int32]string{ + 0: "LOG_RECORD_FLAGS_DO_NOT_USE", + 255: "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK", + } + LogRecordFlags_value = map[string]int32{ + "LOG_RECORD_FLAGS_DO_NOT_USE": 0, + "LOG_RECORD_FLAGS_TRACE_FLAGS_MASK": 255, + } +) + +func (x LogRecordFlags) Enum() *LogRecordFlags { + p := new(LogRecordFlags) + *p = x + return p +} + +func (x LogRecordFlags) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LogRecordFlags) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[1].Descriptor() +} + +func (LogRecordFlags) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_logs_v1_logs_proto_enumTypes[1] +} + +func (x LogRecordFlags) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LogRecordFlags.Descriptor instead. +func (LogRecordFlags) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{1} +} + +// LogsData represents the logs data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP logs data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type LogsData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceLogs. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceLogs []*ResourceLogs `protobuf:"bytes,1,rep,name=resource_logs,json=resourceLogs,proto3" json:"resource_logs,omitempty"` +} + +func (x *LogsData) Reset() { + *x = LogsData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogsData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogsData) ProtoMessage() {} + +func (x *LogsData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogsData.ProtoReflect.Descriptor instead. +func (*LogsData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{0} +} + +func (x *LogsData) GetResourceLogs() []*ResourceLogs { + if x != nil { + return x.ResourceLogs + } + return nil +} + +// A collection of ScopeLogs from a Resource. +type ResourceLogs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the logs in this message. + // If this field is not set then resource info is unknown. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of ScopeLogs that originate from a resource. + ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_logs" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceLogs) Reset() { + *x = ResourceLogs{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceLogs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceLogs) ProtoMessage() {} + +func (x *ResourceLogs) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceLogs.ProtoReflect.Descriptor instead. +func (*ResourceLogs) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceLogs) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceLogs) GetScopeLogs() []*ScopeLogs { + if x != nil { + return x.ScopeLogs + } + return nil +} + +func (x *ResourceLogs) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Logs produced by a Scope. +type ScopeLogs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation scope information for the logs in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of log records. + LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the log data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all logs in the "logs" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ScopeLogs) Reset() { + *x = ScopeLogs{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopeLogs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeLogs) ProtoMessage() {} + +func (x *ScopeLogs) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopeLogs.ProtoReflect.Descriptor instead. +func (*ScopeLogs) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{2} +} + +func (x *ScopeLogs) GetScope() *v11.InstrumentationScope { + if x != nil { + return x.Scope + } + return nil +} + +func (x *ScopeLogs) GetLogRecords() []*LogRecord { + if x != nil { + return x.LogRecords + } + return nil +} + +func (x *ScopeLogs) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A log record according to OpenTelemetry Log Data Model: +// https://github.com/open-telemetry/oteps/blob/main/text/logs/0097-log-data-model.md +type LogRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // time_unix_nano is the time when the event occurred. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // Time when the event was observed by the collection system. + // For events that originate in OpenTelemetry (e.g. using OpenTelemetry Logging SDK) + // this timestamp is typically set at the generation time and is equal to Timestamp. + // For events originating externally and collected by OpenTelemetry (e.g. using + // Collector) this is the time when OpenTelemetry's code observed the event measured + // by the clock of the OpenTelemetry code. This field MUST be set once the event is + // observed by OpenTelemetry. + // + // For converting OpenTelemetry log data to formats that support only one timestamp or + // when receiving OpenTelemetry log data by recipients that support only one timestamp + // internally the following logic is recommended: + // - Use time_unix_nano if it is present, otherwise use observed_time_unix_nano. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // Value of 0 indicates unknown or missing timestamp. + ObservedTimeUnixNano uint64 `protobuf:"fixed64,11,opt,name=observed_time_unix_nano,json=observedTimeUnixNano,proto3" json:"observed_time_unix_nano,omitempty"` + // Numerical value of the severity, normalized to values described in Log Data Model. + // [Optional]. + SeverityNumber SeverityNumber `protobuf:"varint,2,opt,name=severity_number,json=severityNumber,proto3,enum=opentelemetry.proto.logs.v1.SeverityNumber" json:"severity_number,omitempty"` + // The severity text (also known as log level). The original string representation as + // it is known at the source. [Optional]. + SeverityText string `protobuf:"bytes,3,opt,name=severity_text,json=severityText,proto3" json:"severity_text,omitempty"` + // A value containing the body of the log record. Can be for example a human-readable + // string message (including multi-line) describing the event in a free form or it can + // be a structured data composed of arrays and maps of other values. [Optional]. + Body *v11.AnyValue `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` + // Additional attributes that describe the specific event occurrence. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,7,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. 8 least significant bits are the trace flags as + // defined in W3C Trace Context specification. 24 most significant bits are reserved + // and must be set to 0. Readers must not assume that 24 most significant bits + // will be zero and must correctly mask the bits when reading 8-bit trace flag (use + // flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK). [Optional]. + Flags uint32 `protobuf:"fixed32,8,opt,name=flags,proto3" json:"flags,omitempty"` + // A unique identifier for a trace. All logs from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. + // + // The receivers SHOULD assume that the log record is not associated with a + // trace if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + TraceId []byte `protobuf:"bytes,9,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is optional. If the sender specifies a valid span_id then it SHOULD also + // specify a valid trace_id. + // + // The receivers SHOULD assume that the log record is not associated with a + // span if any of the following is true: + // - the field is not present, + // - the field contains an invalid value. + SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` +} + +func (x *LogRecord) Reset() { + *x = LogRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogRecord) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogRecord) ProtoMessage() {} + +func (x *LogRecord) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogRecord.ProtoReflect.Descriptor instead. +func (*LogRecord) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP(), []int{3} +} + +func (x *LogRecord) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *LogRecord) GetObservedTimeUnixNano() uint64 { + if x != nil { + return x.ObservedTimeUnixNano + } + return 0 +} + +func (x *LogRecord) GetSeverityNumber() SeverityNumber { + if x != nil { + return x.SeverityNumber + } + return SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} + +func (x *LogRecord) GetSeverityText() string { + if x != nil { + return x.SeverityText + } + return "" +} + +func (x *LogRecord) GetBody() *v11.AnyValue { + if x != nil { + return x.Body + } + return nil +} + +func (x *LogRecord) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *LogRecord) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *LogRecord) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *LogRecord) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *LogRecord) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +var File_opentelemetry_proto_logs_v1_logs_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x6f, + 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, + 0x67, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5a, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x22, 0xc3, 0x01, + 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x45, + 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6c, + 0x6f, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67, + 0x73, 0x52, 0x09, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, + 0x10, 0xe9, 0x07, 0x22, 0xbe, 0x01, 0x0a, 0x09, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4c, 0x6f, 0x67, + 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x47, 0x0a, 0x0b, + 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0xf3, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, + 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, + 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x35, 0x0a, 0x17, 0x6f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, + 0x61, 0x6e, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x06, 0x52, 0x14, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, + 0x54, 0x0a, 0x0f, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, + 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, + 0x79, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x54, 0x65, 0x78, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x62, 0x6f, + 0x64, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, + 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, + 0x61, 0x6e, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x2a, 0xc3, 0x05, 0x0a, 0x0e, 0x53, + 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1f, 0x0a, + 0x1b, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, + 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, + 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, + 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, + 0x43, 0x45, 0x32, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, + 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x33, 0x10, + 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, + 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x34, 0x10, 0x04, 0x12, 0x19, 0x0a, + 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, + 0x47, 0x32, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x33, 0x10, 0x07, + 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x34, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, + 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x09, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x32, 0x10, + 0x0a, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, + 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x33, 0x10, 0x0b, 0x12, 0x19, 0x0a, 0x15, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, + 0x49, 0x4e, 0x46, 0x4f, 0x34, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, + 0x0d, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, + 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x32, 0x10, 0x0e, 0x12, 0x19, 0x0a, 0x15, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, + 0x57, 0x41, 0x52, 0x4e, 0x33, 0x10, 0x0f, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x34, + 0x10, 0x10, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, + 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x1a, 0x0a, + 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x32, 0x10, 0x12, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, + 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, + 0x4f, 0x52, 0x33, 0x10, 0x13, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, + 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x34, 0x10, + 0x14, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, + 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x15, 0x12, 0x1a, 0x0a, 0x16, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, + 0x46, 0x41, 0x54, 0x41, 0x4c, 0x32, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, + 0x4c, 0x33, 0x10, 0x17, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x34, 0x10, 0x18, + 0x2a, 0x59, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, + 0x45, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x21, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, + 0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, + 0x41, 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x73, 0x0a, 0x1e, 0x69, + 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x4c, + 0x6f, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x26, 0x67, 0x6f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_logs_v1_logs_proto_rawDescOnce sync.Once + file_opentelemetry_proto_logs_v1_logs_proto_rawDescData = file_opentelemetry_proto_logs_v1_logs_proto_rawDesc +) + +func file_opentelemetry_proto_logs_v1_logs_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_logs_v1_logs_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_logs_v1_logs_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_logs_v1_logs_proto_rawDescData) + }) + return file_opentelemetry_proto_logs_v1_logs_proto_rawDescData +} + +var file_opentelemetry_proto_logs_v1_logs_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_opentelemetry_proto_logs_v1_logs_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_opentelemetry_proto_logs_v1_logs_proto_goTypes = []interface{}{ + (SeverityNumber)(0), // 0: opentelemetry.proto.logs.v1.SeverityNumber + (LogRecordFlags)(0), // 1: opentelemetry.proto.logs.v1.LogRecordFlags + (*LogsData)(nil), // 2: opentelemetry.proto.logs.v1.LogsData + (*ResourceLogs)(nil), // 3: opentelemetry.proto.logs.v1.ResourceLogs + (*ScopeLogs)(nil), // 4: opentelemetry.proto.logs.v1.ScopeLogs + (*LogRecord)(nil), // 5: opentelemetry.proto.logs.v1.LogRecord + (*v1.Resource)(nil), // 6: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 7: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.AnyValue)(nil), // 8: opentelemetry.proto.common.v1.AnyValue + (*v11.KeyValue)(nil), // 9: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_logs_v1_logs_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.logs.v1.LogsData.resource_logs:type_name -> opentelemetry.proto.logs.v1.ResourceLogs + 6, // 1: opentelemetry.proto.logs.v1.ResourceLogs.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 4, // 2: opentelemetry.proto.logs.v1.ResourceLogs.scope_logs:type_name -> opentelemetry.proto.logs.v1.ScopeLogs + 7, // 3: opentelemetry.proto.logs.v1.ScopeLogs.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 5, // 4: opentelemetry.proto.logs.v1.ScopeLogs.log_records:type_name -> opentelemetry.proto.logs.v1.LogRecord + 0, // 5: opentelemetry.proto.logs.v1.LogRecord.severity_number:type_name -> opentelemetry.proto.logs.v1.SeverityNumber + 8, // 6: opentelemetry.proto.logs.v1.LogRecord.body:type_name -> opentelemetry.proto.common.v1.AnyValue + 9, // 7: opentelemetry.proto.logs.v1.LogRecord.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_logs_v1_logs_proto_init() } +func file_opentelemetry_proto_logs_v1_logs_proto_init() { + if File_opentelemetry_proto_logs_v1_logs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogsData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceLogs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopeLogs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_logs_v1_logs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_logs_v1_logs_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_logs_v1_logs_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_logs_v1_logs_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_logs_v1_logs_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_logs_v1_logs_proto_msgTypes, + }.Build() + File_opentelemetry_proto_logs_v1_logs_proto = out.File + file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = nil + file_opentelemetry_proto_logs_v1_logs_proto_goTypes = nil + file_opentelemetry_proto_logs_v1_logs_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go new file mode 100644 index 000000000..bca86dc44 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -0,0 +1,2515 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/metrics/v1/metrics.proto + +package v1 + +import ( + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +// Enum value maps for AggregationTemporality. +var ( + AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", + } + AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, + } +) + +func (x AggregationTemporality) Enum() *AggregationTemporality { + p := new(AggregationTemporality) + *p = x + return p +} + +func (x AggregationTemporality) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AggregationTemporality) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0].Descriptor() +} + +func (AggregationTemporality) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[0] +} + +func (x AggregationTemporality) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AggregationTemporality.Descriptor instead. +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0} +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +// +type DataPointFlags int32 + +const ( + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE DataPointFlags = 0 + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK DataPointFlags = 1 +) + +// Enum value maps for DataPointFlags. +var ( + DataPointFlags_name = map[int32]string{ + 0: "DATA_POINT_FLAGS_DO_NOT_USE", + 1: "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK", + } + DataPointFlags_value = map[string]int32{ + "DATA_POINT_FLAGS_DO_NOT_USE": 0, + "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK": 1, + } +) + +func (x DataPointFlags) Enum() *DataPointFlags { + p := new(DataPointFlags) + *p = x + return p +} + +func (x DataPointFlags) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DataPointFlags) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1].Descriptor() +} + +func (DataPointFlags) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes[1] +} + +func (x DataPointFlags) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DataPointFlags.Descriptor instead. +func (DataPointFlags) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1} +} + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type MetricsData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` +} + +func (x *MetricsData) Reset() { + *x = MetricsData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricsData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricsData) ProtoMessage() {} + +func (x *MetricsData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricsData.ProtoReflect.Descriptor instead. +func (*MetricsData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *MetricsData) GetResourceMetrics() []*ResourceMetrics { + if x != nil { + return x.ResourceMetrics + } + return nil +} + +// A collection of ScopeMetrics from a Resource. +type ResourceMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of metrics that originate from a resource. + ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceMetrics) Reset() { + *x = ResourceMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceMetrics) ProtoMessage() {} + +func (x *ResourceMetrics) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceMetrics.ProtoReflect.Descriptor instead. +func (*ResourceMetrics) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceMetrics) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceMetrics) GetScopeMetrics() []*ScopeMetrics { + if x != nil { + return x.ScopeMetrics + } + return nil +} + +func (x *ResourceMetrics) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Metrics produced by an Scope. +type ScopeMetrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation scope information for the metrics in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of metrics that originate from an instrumentation library. + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the metric data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all metrics in the "metrics" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ScopeMetrics) Reset() { + *x = ScopeMetrics{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopeMetrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeMetrics) ProtoMessage() {} + +func (x *ScopeMetrics) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopeMetrics.ProtoReflect.Descriptor instead. +func (*ScopeMetrics) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{2} +} + +func (x *ScopeMetrics) GetScope() *v11.InstrumentationScope { + if x != nil { + return x.Scope + } + return nil +} + +func (x *ScopeMetrics) GetMetrics() []*Metric { + if x != nil { + return x.Metrics + } + return nil +} + +func (x *ScopeMetrics) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +type Metric struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name of the metric. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // Types that are assignable to Data: + // *Metric_Gauge + // *Metric_Sum + // *Metric_Histogram + // *Metric_ExponentialHistogram + // *Metric_Summary + Data isMetric_Data `protobuf_oneof:"data"` + // Additional metadata attributes that describe the metric. [Optional]. + // Attributes are non-identifying. + // Consumers SHOULD NOT need to be aware of these attributes. + // These attributes MAY be used to encode information allowing + // for lossless roundtrip translation to / from another data model. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Metadata []*v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3} +} + +func (x *Metric) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Metric) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Metric) GetUnit() string { + if x != nil { + return x.Unit + } + return "" +} + +func (m *Metric) GetData() isMetric_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *Metric) GetGauge() *Gauge { + if x, ok := x.GetData().(*Metric_Gauge); ok { + return x.Gauge + } + return nil +} + +func (x *Metric) GetSum() *Sum { + if x, ok := x.GetData().(*Metric_Sum); ok { + return x.Sum + } + return nil +} + +func (x *Metric) GetHistogram() *Histogram { + if x, ok := x.GetData().(*Metric_Histogram); ok { + return x.Histogram + } + return nil +} + +func (x *Metric) GetExponentialHistogram() *ExponentialHistogram { + if x, ok := x.GetData().(*Metric_ExponentialHistogram); ok { + return x.ExponentialHistogram + } + return nil +} + +func (x *Metric) GetSummary() *Summary { + if x, ok := x.GetData().(*Metric_Summary); ok { + return x.Summary + } + return nil +} + +func (x *Metric) GetMetadata() []*v11.KeyValue { + if x != nil { + return x.Metadata + } + return nil +} + +type isMetric_Data interface { + isMetric_Data() +} + +type Metric_Gauge struct { + Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"` +} + +type Metric_Sum struct { + Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"` +} + +type Metric_Histogram struct { + Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"` +} + +type Metric_ExponentialHistogram struct { + ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"` +} + +type Metric_Summary struct { + Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"` +} + +func (*Metric_Gauge) isMetric_Data() {} + +func (*Metric_Sum) isMetric_Data() {} + +func (*Metric_Histogram) isMetric_Data() {} + +func (*Metric_ExponentialHistogram) isMetric_Data() {} + +func (*Metric_Summary) isMetric_Data() {} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type Gauge struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{4} +} + +func (x *Gauge) GetDataPoints() []*NumberDataPoint { + if x != nil { + return x.DataPoints + } + return nil +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +type Sum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` +} + +func (x *Sum) Reset() { + *x = Sum{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sum) ProtoMessage() {} + +func (x *Sum) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sum.ProtoReflect.Descriptor instead. +func (*Sum) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5} +} + +func (x *Sum) GetDataPoints() []*NumberDataPoint { + if x != nil { + return x.DataPoints + } + return nil +} + +func (x *Sum) GetAggregationTemporality() AggregationTemporality { + if x != nil { + return x.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (x *Sum) GetIsMonotonic() bool { + if x != nil { + return x.IsMonotonic + } + return false +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{6} +} + +func (x *Histogram) GetDataPoints() []*HistogramDataPoint { + if x != nil { + return x.DataPoints + } + return nil +} + +func (x *Histogram) GetAggregationTemporality() AggregationTemporality { + if x != nil { + return x.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +type ExponentialHistogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (x *ExponentialHistogram) Reset() { + *x = ExponentialHistogram{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExponentialHistogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExponentialHistogram) ProtoMessage() {} + +func (x *ExponentialHistogram) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExponentialHistogram.ProtoReflect.Descriptor instead. +func (*ExponentialHistogram) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7} +} + +func (x *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint { + if x != nil { + return x.DataPoints + } + return nil +} + +func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality { + if x != nil { + return x.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +type Summary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *Summary) GetDataPoints() []*SummaryDataPoint { + if x != nil { + return x.DataPoints + } + return nil +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +type NumberDataPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + // + // Types that are assignable to Value: + // *NumberDataPoint_AsDouble + // *NumberDataPoint_AsInt + Value isNumberDataPoint_Value `protobuf_oneof:"value"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *NumberDataPoint) Reset() { + *x = NumberDataPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NumberDataPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NumberDataPoint) ProtoMessage() {} + +func (x *NumberDataPoint) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NumberDataPoint.ProtoReflect.Descriptor instead. +func (*NumberDataPoint) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{9} +} + +func (x *NumberDataPoint) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *NumberDataPoint) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *NumberDataPoint) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *NumberDataPoint) GetAsDouble() float64 { + if x, ok := x.GetValue().(*NumberDataPoint_AsDouble); ok { + return x.AsDouble + } + return 0 +} + +func (x *NumberDataPoint) GetAsInt() int64 { + if x, ok := x.GetValue().(*NumberDataPoint_AsInt); ok { + return x.AsInt + } + return 0 +} + +func (x *NumberDataPoint) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +func (x *NumberDataPoint) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +type isNumberDataPoint_Value interface { + isNumberDataPoint_Value() +} + +type NumberDataPoint_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"` +} + +type NumberDataPoint_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"` +} + +func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {} + +func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +type HistogramDataPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // min is the minimum value over (start_time, end_time]. + Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"` + // max is the maximum value over (start_time, end_time]. + Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"` +} + +func (x *HistogramDataPoint) Reset() { + *x = HistogramDataPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HistogramDataPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistogramDataPoint) ProtoMessage() {} + +func (x *HistogramDataPoint) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistogramDataPoint.ProtoReflect.Descriptor instead. +func (*HistogramDataPoint) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{10} +} + +func (x *HistogramDataPoint) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *HistogramDataPoint) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *HistogramDataPoint) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *HistogramDataPoint) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *HistogramDataPoint) GetSum() float64 { + if x != nil && x.Sum != nil { + return *x.Sum + } + return 0 +} + +func (x *HistogramDataPoint) GetBucketCounts() []uint64 { + if x != nil { + return x.BucketCounts + } + return nil +} + +func (x *HistogramDataPoint) GetExplicitBounds() []float64 { + if x != nil { + return x.ExplicitBounds + } + return nil +} + +func (x *HistogramDataPoint) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +func (x *HistogramDataPoint) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *HistogramDataPoint) GetMin() float64 { + if x != nil && x.Min != nil { + return *x.Min + } + return 0 +} + +func (x *HistogramDataPoint) GetMax() float64 { + if x != nil && x.Max != nil { + return *x.Max + } + return 0 +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +type ExponentialHistogramDataPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + // positive carries the positive range of exponential bucket counts. + Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"` + // negative carries the negative range of exponential bucket counts. + Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // min is the minimum value over (start_time, end_time]. + Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` + // max is the maximum value over (start_time, end_time]. + Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` +} + +func (x *ExponentialHistogramDataPoint) Reset() { + *x = ExponentialHistogramDataPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExponentialHistogramDataPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExponentialHistogramDataPoint) ProtoMessage() {} + +func (x *ExponentialHistogramDataPoint) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExponentialHistogramDataPoint.ProtoReflect.Descriptor instead. +func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11} +} + +func (x *ExponentialHistogramDataPoint) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetSum() float64 { + if x != nil && x.Sum != nil { + return *x.Sum + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetScale() int32 { + if x != nil { + return x.Scale + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetZeroCount() uint64 { + if x != nil { + return x.ZeroCount + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetPositive() *ExponentialHistogramDataPoint_Buckets { + if x != nil { + return x.Positive + } + return nil +} + +func (x *ExponentialHistogramDataPoint) GetNegative() *ExponentialHistogramDataPoint_Buckets { + if x != nil { + return x.Negative + } + return nil +} + +func (x *ExponentialHistogramDataPoint) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +func (x *ExponentialHistogramDataPoint) GetMin() float64 { + if x != nil && x.Min != nil { + return *x.Min + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetMax() float64 { + if x != nil && x.Max != nil { + return *x.Max + } + return 0 +} + +func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { + if x != nil { + return x.ZeroThreshold + } + return 0 +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *SummaryDataPoint) Reset() { + *x = SummaryDataPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SummaryDataPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SummaryDataPoint) ProtoMessage() {} + +func (x *SummaryDataPoint) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SummaryDataPoint.ProtoReflect.Descriptor instead. +func (*SummaryDataPoint) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12} +} + +func (x *SummaryDataPoint) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *SummaryDataPoint) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *SummaryDataPoint) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *SummaryDataPoint) GetCount() uint64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *SummaryDataPoint) GetSum() float64 { + if x != nil { + return x.Sum + } + return 0 +} + +func (x *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile { + if x != nil { + return x.QuantileValues + } + return nil +} + +func (x *SummaryDataPoint) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + FilteredAttributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + // + // Types that are assignable to Value: + // *Exemplar_AsDouble + // *Exemplar_AsInt + Value isExemplar_Value `protobuf_oneof:"value"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` +} + +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{13} +} + +func (x *Exemplar) GetFilteredAttributes() []*v11.KeyValue { + if x != nil { + return x.FilteredAttributes + } + return nil +} + +func (x *Exemplar) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (m *Exemplar) GetValue() isExemplar_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *Exemplar) GetAsDouble() float64 { + if x, ok := x.GetValue().(*Exemplar_AsDouble); ok { + return x.AsDouble + } + return 0 +} + +func (x *Exemplar) GetAsInt() int64 { + if x, ok := x.GetValue().(*Exemplar_AsInt); ok { + return x.AsInt + } + return 0 +} + +func (x *Exemplar) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Exemplar) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +type isExemplar_Value interface { + isExemplar_Value() +} + +type Exemplar_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"` +} + +type Exemplar_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"` +} + +func (*Exemplar_AsDouble) isExemplar_Value() {} + +func (*Exemplar_AsInt) isExemplar_Value() {} + +// Buckets are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialHistogramDataPoint_Buckets struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` +} + +func (x *ExponentialHistogramDataPoint_Buckets) Reset() { + *x = ExponentialHistogramDataPoint_Buckets{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExponentialHistogramDataPoint_Buckets) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {} + +func (x *ExponentialHistogramDataPoint_Buckets) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExponentialHistogramDataPoint_Buckets.ProtoReflect.Descriptor instead. +func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { + if x != nil { + return x.BucketCounts + } + return nil +} + +// Represents the value at a given quantile of a distribution. +// +// To record Min and Max values following conventions are used: +// - The 1.0 quantile is equivalent to the maximum value observed. +// - The 0.0 quantile is equivalent to the minimum value observed. +// +// See the following issue for more context: +// https://github.com/open-telemetry/opentelemetry-proto/issues/125 +type SummaryDataPoint_ValueAtQuantile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *SummaryDataPoint_ValueAtQuantile) Reset() { + *x = SummaryDataPoint_ValueAtQuantile{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SummaryDataPoint_ValueAtQuantile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {} + +func (x *SummaryDataPoint_ValueAtQuantile) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SummaryDataPoint_ValueAtQuantile.ProtoReflect.Descriptor instead. +func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 { + if x != nil { + return x.Quantile + } + return 0 +} + +func (x *SummaryDataPoint_ValueAtQuantile) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_opentelemetry_proto_metrics_v1_metrics_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2a, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x0b, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0xd2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x51, 0x0a, 0x0d, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x0c, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, + 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x22, 0xba, 0x01, 0x0a, 0x0c, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x52, + 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xa6, 0x04, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x3d, 0x0a, 0x05, + 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x61, 0x75, + 0x67, 0x65, 0x48, 0x00, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x03, 0x73, + 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x48, 0x00, 0x52, + 0x03, 0x73, 0x75, 0x6d, 0x12, 0x49, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x48, 0x00, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, + 0x6b, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x68, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x48, 0x00, 0x52, 0x14, 0x65, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x43, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x22, 0x59, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xeb, 0x01, 0x0a, 0x03, + 0x53, 0x75, 0x6d, 0x12, 0x50, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, + 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x6d, 0x6f, 0x6e, + 0x6f, 0x74, 0x6f, 0x6e, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x4d, 0x6f, 0x6e, 0x6f, 0x74, 0x6f, 0x6e, 0x69, 0x63, 0x22, 0xd1, 0x01, 0x0a, 0x09, 0x48, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x53, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6f, 0x0a, 0x17, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x16, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xe7, 0x01, + 0x0a, 0x14, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x5e, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x6f, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, + 0x16, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, + 0x6f, 0x72, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0x5c, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x51, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0xd6, 0x02, 0x0a, 0x0f, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, + 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, + 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, + 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, + 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, 0x09, 0x61, 0x73, 0x5f, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x08, + 0x61, 0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x61, 0x73, 0x5f, 0x69, + 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, 0x61, 0x73, 0x49, 0x6e, + 0x74, 0x12, 0x46, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x42, + 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd9, + 0x03, 0x0a, 0x12, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, + 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, + 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, + 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x15, 0x0a, 0x03, 0x73, + 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x88, + 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x06, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6c, 0x69, + 0x63, 0x69, 0x74, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x01, + 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x73, + 0x12, 0x46, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x08, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, + 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x15, + 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x03, 0x6d, + 0x69, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, + 0x5f, 0x73, 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, 0x06, 0x0a, 0x04, + 0x5f, 0x6d, 0x61, 0x78, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xfa, 0x05, 0x0a, 0x1d, 0x45, + 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, + 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, + 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, + 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x15, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x03, 0x73, 0x75, 0x6d, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x61, + 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x11, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x61, + 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x12, 0x61, 0x0a, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x08, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x46, 0x0a, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x73, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x01, 0x52, 0x03, 0x6d, 0x69, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x6d, 0x61, 0x78, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x48, 0x02, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x88, 0x01, 0x01, + 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, + 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x46, 0x0a, 0x07, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x04, 0x52, 0x0c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x42, + 0x06, 0x0a, 0x04, 0x5f, 0x73, 0x75, 0x6d, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x69, 0x6e, 0x42, + 0x06, 0x0a, 0x04, 0x5f, 0x6d, 0x61, 0x78, 0x22, 0xa6, 0x03, 0x0a, 0x10, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, + 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, + 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x06, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x03, 0x73, 0x75, 0x6d, 0x12, 0x69, 0x0a, 0x0f, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, + 0x0e, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, + 0x66, 0x6c, 0x61, 0x67, 0x73, 0x1a, 0x43, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x74, + 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x22, 0x85, 0x02, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x58, 0x0a, + 0x13, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x12, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x06, 0x52, + 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x1d, 0x0a, + 0x09, 0x61, 0x73, 0x5f, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x48, 0x00, 0x52, 0x08, 0x61, 0x73, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x06, + 0x61, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, 0x05, + 0x61, 0x73, 0x49, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x19, + 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, 0x8c, 0x01, 0x0a, 0x16, 0x41, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, + 0x69, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x23, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, + 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x45, 0x4d, 0x50, + 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, + 0x26, 0x0a, 0x22, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, + 0x45, 0x4d, 0x50, 0x4f, 0x52, 0x41, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, 0x55, 0x4d, 0x55, 0x4c, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x2a, 0x5e, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x41, 0x54, + 0x41, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x44, 0x41, + 0x54, 0x41, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x4e, + 0x4f, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x45, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x01, 0x42, 0x7f, 0x0a, 0x21, 0x69, 0x6f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x67, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, + 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce sync.Once + file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc +) + +func file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData) + }) + return file_opentelemetry_proto_metrics_v1_metrics_proto_rawDescData +} + +var file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = []interface{}{ + (AggregationTemporality)(0), // 0: opentelemetry.proto.metrics.v1.AggregationTemporality + (DataPointFlags)(0), // 1: opentelemetry.proto.metrics.v1.DataPointFlags + (*MetricsData)(nil), // 2: opentelemetry.proto.metrics.v1.MetricsData + (*ResourceMetrics)(nil), // 3: opentelemetry.proto.metrics.v1.ResourceMetrics + (*ScopeMetrics)(nil), // 4: opentelemetry.proto.metrics.v1.ScopeMetrics + (*Metric)(nil), // 5: opentelemetry.proto.metrics.v1.Metric + (*Gauge)(nil), // 6: opentelemetry.proto.metrics.v1.Gauge + (*Sum)(nil), // 7: opentelemetry.proto.metrics.v1.Sum + (*Histogram)(nil), // 8: opentelemetry.proto.metrics.v1.Histogram + (*ExponentialHistogram)(nil), // 9: opentelemetry.proto.metrics.v1.ExponentialHistogram + (*Summary)(nil), // 10: opentelemetry.proto.metrics.v1.Summary + (*NumberDataPoint)(nil), // 11: opentelemetry.proto.metrics.v1.NumberDataPoint + (*HistogramDataPoint)(nil), // 12: opentelemetry.proto.metrics.v1.HistogramDataPoint + (*ExponentialHistogramDataPoint)(nil), // 13: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint + (*SummaryDataPoint)(nil), // 14: opentelemetry.proto.metrics.v1.SummaryDataPoint + (*Exemplar)(nil), // 15: opentelemetry.proto.metrics.v1.Exemplar + (*ExponentialHistogramDataPoint_Buckets)(nil), // 16: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets + (*SummaryDataPoint_ValueAtQuantile)(nil), // 17: opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile + (*v1.Resource)(nil), // 18: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 19: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 20: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.metrics.v1.MetricsData.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics + 18, // 1: opentelemetry.proto.metrics.v1.ResourceMetrics.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 4, // 2: opentelemetry.proto.metrics.v1.ResourceMetrics.scope_metrics:type_name -> opentelemetry.proto.metrics.v1.ScopeMetrics + 19, // 3: opentelemetry.proto.metrics.v1.ScopeMetrics.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 5, // 4: opentelemetry.proto.metrics.v1.ScopeMetrics.metrics:type_name -> opentelemetry.proto.metrics.v1.Metric + 6, // 5: opentelemetry.proto.metrics.v1.Metric.gauge:type_name -> opentelemetry.proto.metrics.v1.Gauge + 7, // 6: opentelemetry.proto.metrics.v1.Metric.sum:type_name -> opentelemetry.proto.metrics.v1.Sum + 8, // 7: opentelemetry.proto.metrics.v1.Metric.histogram:type_name -> opentelemetry.proto.metrics.v1.Histogram + 9, // 8: opentelemetry.proto.metrics.v1.Metric.exponential_histogram:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogram + 10, // 9: opentelemetry.proto.metrics.v1.Metric.summary:type_name -> opentelemetry.proto.metrics.v1.Summary + 20, // 10: opentelemetry.proto.metrics.v1.Metric.metadata:type_name -> opentelemetry.proto.common.v1.KeyValue + 11, // 11: opentelemetry.proto.metrics.v1.Gauge.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint + 11, // 12: opentelemetry.proto.metrics.v1.Sum.data_points:type_name -> opentelemetry.proto.metrics.v1.NumberDataPoint + 0, // 13: opentelemetry.proto.metrics.v1.Sum.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality + 12, // 14: opentelemetry.proto.metrics.v1.Histogram.data_points:type_name -> opentelemetry.proto.metrics.v1.HistogramDataPoint + 0, // 15: opentelemetry.proto.metrics.v1.Histogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality + 13, // 16: opentelemetry.proto.metrics.v1.ExponentialHistogram.data_points:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint + 0, // 17: opentelemetry.proto.metrics.v1.ExponentialHistogram.aggregation_temporality:type_name -> opentelemetry.proto.metrics.v1.AggregationTemporality + 14, // 18: opentelemetry.proto.metrics.v1.Summary.data_points:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint + 20, // 19: opentelemetry.proto.metrics.v1.NumberDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 15, // 20: opentelemetry.proto.metrics.v1.NumberDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar + 20, // 21: opentelemetry.proto.metrics.v1.HistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 15, // 22: opentelemetry.proto.metrics.v1.HistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar + 20, // 23: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 16, // 24: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.positive:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets + 16, // 25: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.negative:type_name -> opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets + 15, // 26: opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.exemplars:type_name -> opentelemetry.proto.metrics.v1.Exemplar + 20, // 27: opentelemetry.proto.metrics.v1.SummaryDataPoint.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 17, // 28: opentelemetry.proto.metrics.v1.SummaryDataPoint.quantile_values:type_name -> opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile + 20, // 29: opentelemetry.proto.metrics.v1.Exemplar.filtered_attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_metrics_v1_metrics_proto_init() } +func file_opentelemetry_proto_metrics_v1_metrics_proto_init() { + if File_opentelemetry_proto_metrics_v1_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopeMetrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExponentialHistogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NumberDataPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HistogramDataPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExponentialHistogramDataPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SummaryDataPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExponentialHistogramDataPoint_Buckets); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SummaryDataPoint_ValueAtQuantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*Metric_Gauge)(nil), + (*Metric_Sum)(nil), + (*Metric_Histogram)(nil), + (*Metric_ExponentialHistogram)(nil), + (*Metric_Summary)(nil), + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*NumberDataPoint_AsDouble)(nil), + (*NumberDataPoint_AsInt)(nil), + } + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[10].OneofWrappers = []interface{}{} + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[11].OneofWrappers = []interface{}{} + file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*Exemplar_AsDouble)(nil), + (*Exemplar_AsInt)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc, + NumEnums: 2, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_metrics_v1_metrics_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_metrics_v1_metrics_proto_msgTypes, + }.Build() + File_opentelemetry_proto_metrics_v1_metrics_proto = out.File + file_opentelemetry_proto_metrics_v1_metrics_proto_rawDesc = nil + file_opentelemetry_proto_metrics_v1_metrics_proto_goTypes = nil + file_opentelemetry_proto_metrics_v1_metrics_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go new file mode 100644 index 000000000..b7545b03b --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -0,0 +1,193 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource information. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetAttributes() []*v1.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Resource) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc +) + +func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData) + }) + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData +} + +var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } +func file_opentelemetry_proto_resource_v1_resource_proto_init() { + if File_opentelemetry_proto_resource_v1_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes, + }.Build() + File_opentelemetry_proto_resource_v1_resource_proto = out.File + file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil + file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil + file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go new file mode 100644 index 000000000..d7099c35b --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -0,0 +1,1281 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0 + // Bits 0-7 are used for trace flags. + SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256 + SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512 +) + +// Enum value maps for SpanFlags. +var ( + SpanFlags_name = map[int32]string{ + 0: "SPAN_FLAGS_DO_NOT_USE", + 255: "SPAN_FLAGS_TRACE_FLAGS_MASK", + 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + } + SpanFlags_value = map[string]int32{ + "SPAN_FLAGS_DO_NOT_USE": 0, + "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, + "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256, + "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512, + } +) + +func (x SpanFlags) Enum() *SpanFlags { + p := new(SpanFlags) + *p = x + return p +} + +func (x SpanFlags) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SpanFlags) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor() +} + +func (SpanFlags) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0] +} + +func (x SpanFlags) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SpanFlags.Descriptor instead. +func (SpanFlags) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +// Enum value maps for Span_SpanKind. +var ( + Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", + } + Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, + } +) + +func (x Span_SpanKind) Enum() *Span_SpanKind { + p := new(Span_SpanKind) + *p = x + return p +} + +func (x Span_SpanKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor() +} + +func (Span_SpanKind) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1] +} + +func (x Span_SpanKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Span_SpanKind.Descriptor instead. +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +// Enum value maps for Status_StatusCode. +var ( + Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", + } + Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, + } +) + +func (x Status_StatusCode) Enum() *Status_StatusCode { + p := new(Status_StatusCode) + *p = x + return p +} + +func (x Status_StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor() +} + +func (Status_StatusCode) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2] +} + +func (x Status_StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status_StatusCode.Descriptor instead. +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *TracesData) Reset() { + *x = TracesData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracesData) ProtoMessage() {} + +func (x *TracesData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracesData.ProtoReflect.Descriptor instead. +func (*TracesData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TracesData) GetResourceSpans() []*ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceSpans) Reset() { + *x = ResourceSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSpans) ProtoMessage() {} + +func (x *ResourceSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead. +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceSpans) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { + if x != nil { + return x.ScopeSpans + } + return nil +} + +func (x *ResourceSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ScopeSpans) Reset() { + *x = ScopeSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopeSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeSpans) ProtoMessage() {} + +func (x *ScopeSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead. +func (*ScopeSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} +} + +func (x *ScopeSpans) GetScope() *v11.InstrumentationScope { + if x != nil { + return x.Scope + } + return nil +} + +func (x *ScopeSpans) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *ScopeSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} +} + +func (x *Span) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span) GetParentSpanId() []byte { + if x != nil { + return x.ParentSpanId + } + return nil +} + +func (x *Span) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetKind() Span_SpanKind { + if x != nil { + return x.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (x *Span) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *Span) GetEndTimeUnixNano() uint64 { + if x != nil { + return x.EndTimeUnixNano + } + return 0 +} + +func (x *Span) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span) GetEvents() []*Span_Event { + if x != nil { + return x.Events + } + return nil +} + +func (x *Span) GetDroppedEventsCount() uint32 { + if x != nil { + return x.DroppedEventsCount + } + return 0 +} + +func (x *Span) GetLinks() []*Span_Link { + if x != nil { + return x.Links + } + return nil +} + +func (x *Span) GetDroppedLinksCount() uint32 { + if x != nil { + return x.DroppedLinksCount + } + return 0 +} + +func (x *Span) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetCode() Status_StatusCode { + if x != nil { + return x.Code + } + return Status_STATUS_CODE_UNSET +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Span_Event) Reset() { + *x = Span_Event{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Event) ProtoMessage() {} + +func (x *Span_Event) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. +func (*Span_Event) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Span_Event) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *Span_Event) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span_Event) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Event) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *Span_Link) Reset() { + *x = Span_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Link) ProtoMessage() {} + +func (x *Span_Link) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. +func (*Span_Link) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Span_Link) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span_Link) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span_Link) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span_Link) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Link) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span_Link) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, + 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, + 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, + 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, + 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, + 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, + 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, + 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, + 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, + 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, + 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, + 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50, + 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, + 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, + 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f, + 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77, + 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc +) + +func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData) + }) + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData +} + +var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ + (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags + (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ + 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans + 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } +func file_opentelemetry_proto_trace_v1_trace_proto_init() { + if File_opentelemetry_proto_trace_v1_trace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopeSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes, + }.Build() + File_opentelemetry_proto_trace_v1_trace_proto = out.File + file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil + file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil + file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..571116cc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..2e337a0ed --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,15 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 000000000..38f564e2b --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,100 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.9.0] - 2021-07-15 +### Added +- Add `Float64.Swap` to match int atomic operations. +- Add `atomic.Time` type for atomic operations on `time.Time` values. + +[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 + +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..46c945b32 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,79 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + git status; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..96b47a1f1 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 000000000..209df7bbc --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(val bool) *Bool { + x := &Bool{} + if val != _zeroBool { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(val bool) { + x.v.Store(boolToInt(val)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(old, new bool) (swapped bool) { + return x.v.CAS(boolToInt(old), boolToInt(new)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(val bool) (old bool) { + return truthy(x.v.Swap(boolToInt(val))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 000000000..a2e60e987 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() (old bool) { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 000000000..ae7390ee6 --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 000000000..207594f5e --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(val time.Duration) *Duration { + x := &Duration{} + if val != _zeroDuration { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(val time.Duration) { + x.v.Store(int64(val)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(old, new time.Duration) (swapped bool) { + return x.v.CAS(int64(old), int64(new)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(x.v.Swap(int64(val))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 000000000..4c18b0a9e --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(delta))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(delta))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..3be19c35e --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(val error) *Error { + x := &Error{} + if val != _zeroError { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(val error) { + x.v.Store(packError(val)) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 000000000..ffe0be21c --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 000000000..8a1367184 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(val float64) *Float64 { + x := &Float64{} + if val != _zeroFloat64 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(val float64) { + x.v.Store(math.Float64bits(val)) +} + +// Swap atomically stores the given float64 and returns the old +// value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(x.v.Swap(math.Float64bits(val))) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 000000000..df36b0107 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,69 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(delta float64) float64 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(delta float64) float64 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float64 values. +// +// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CAS allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CAS loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CAS(old, new) { +// break +// } +// } +// +// If CAS did not match NaN to match, then the above would loop forever. +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 000000000..1e9ef4f87 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 000000000..640ea36a1 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(val int32) *Int32 { + return &Int32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 000000000..9ab66b980 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(val int64) *Int64 { + return &Int64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 000000000..a8201cb4a --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..80df93d09 --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(val string) *String { + x := &String{} + if val != _zeroString { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(val string) { + x.v.Store(val) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 000000000..83d92edaf --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,45 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go +// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which +// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go new file mode 100644 index 000000000..33460fc37 --- /dev/null +++ b/vendor/go.uber.org/atomic/time.go @@ -0,0 +1,55 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go new file mode 100644 index 000000000..1e3dc978a --- /dev/null +++ b/vendor/go.uber.org/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 000000000..7859a9cc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 000000000..2f2a7db63 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 000000000..ecf7a7727 --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 000000000..169f793dc --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CAS is an atomic compare-and-swap. +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 000000000..671f3a382 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..2a7cf70da --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 000000000..29f0a2de4 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 000000000..10f46948d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 000000000..063e7784f --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 000000000..c3895478e --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,2791 @@ +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func blamkaSSE4(b *block) +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET + +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $0x00000080, DI + +loop: + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI + JA loop + RET + +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ c+24(FP), CX + MOVQ $0x00000080, DI + +loop: + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 000000000..a481b2243 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 000000000..16d58c650 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 000000000..d2e98d429 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 000000000..199c21d27 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 000000000..f75162e03 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,4559 @@ +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + RET + +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 + +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $0x80, R8 + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + +noinc: + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 + VPXOR X15, X6, X6 + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET + +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 000000000..9a0ce2124 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,1441 @@ +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. + +//go:build amd64 && gc && !purego + +#include "textflag.h" + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 + MOVOU 16(AX), X15 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $0x80, R8 + CMPQ R8, $0x80 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET + +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 000000000..3168a8aa3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 000000000..6e28668cd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 000000000..52c414db0 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 000000000..54e446e1d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 000000000..2492f796a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 000000000..90ef6a241 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 000000000..cf254f5f1 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 000000000..4b0f8097f --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 000000000..21ca3b2ee --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,90 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of the X25519 function, which +// performs scalar multiplication on the elliptic curve known as Curve25519. +// See RFC 7748. +// +// This package is a wrapper for the X25519 implementation +// in the crypto/ecdh package. +package curve25519 + +import "crypto/ecdh" + +// ScalarMult sets dst to the product scalar * point. +// +// Deprecated: when provided a low-order point, ScalarMult will set dst to all +// zeroes, irrespective of the scalar. Instead, use the X25519 function, which +// will return an error. +func ScalarMult(dst, scalar, point *[32]byte) { + if _, err := x25519(dst, scalar[:], point[:]); err != nil { + // The only error condition for x25519 when the inputs are 32 bytes long + // is if the output would have been the all-zero value. + for i := range dst { + dst[i] = 0 + } + } +} + +// ScalarBaseMult sets dst to the product scalar * base where base is the +// standard generator. +// +// It is recommended to use the X25519 function with Basepoint instead, as +// copying into fixed size arrays can lead to unexpected bugs. +func ScalarBaseMult(dst, scalar *[32]byte) { + curve := ecdh.X25519() + priv, err := curve.NewPrivateKey(scalar[:]) + if err != nil { + panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + } + copy(dst[:], priv.PublicKey().Bytes()) +} + +const ( + // ScalarSize is the size of the scalar input to X25519. + ScalarSize = 32 + // PointSize is the size of the point input to X25519. + PointSize = 32 +) + +// Basepoint is the canonical Curve25519 generator. +var Basepoint []byte + +var basePoint = [32]byte{9} + +func init() { Basepoint = basePoint[:] } + +// X25519 returns the result of the scalar multiplication (scalar * point), +// according to RFC 7748, Section 5. scalar, point and the return value are +// slices of 32 bytes. +// +// scalar can be generated at random, for example with crypto/rand. point should +// be either Basepoint or the output of another X25519 call. +// +// If point is Basepoint (but not if it's a different slice with the same +// contents) a precomputed implementation might be used for performance. +func X25519(scalar, point []byte) ([]byte, error) { + // Outline the body of function, to let the allocation be inlined in the + // caller, and possibly avoid escaping to the heap. + var dst [32]byte + return x25519(&dst, scalar, point) +} + +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + curve := ecdh.X25519() + pub, err := curve.NewPublicKey(point) + if err != nil { + return nil, err + } + priv, err := curve.NewPrivateKey(scalar) + if err != nil { + return nil, err + } + out, err := priv.ECDH(pub) + if err != nil { + return nil, err + } + copy(dst[:], out) + return dst[:], nil +} diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 000000000..08261bffd Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/children differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 000000000..1dae6ede8 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 000000000..7e516413f --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +birkenesoddtangentinglogoweirbitbucketrzynishikatakayamatta-varjjatjomembersaltdalovepopartysfjordiskussionsbereichatinhlfanishikatsuragitappassenger-associationishikawazukamiokameokamakurazakitaurayasudabitternidisrechtrainingloomy-routerbjarkoybjerkreimdbalsan-suedtirololitapunkapsienamsskoganeibmdeveloperauniteroirmemorialombardiadempresashibetsukumiyamagasakinderoyonagunicloudevelopmentaxiijimarriottayninhaccanthobby-siteval-d-aosta-valleyoriikaracolognebinatsukigataiwanumatajimidsundgcahcesuolocustomer-ocimperiautoscanalytics-gatewayonagoyaveroykenflfanpachihayaakasakawaiishopitsitemasekd1kappenginedre-eikerimo-siemenscaledekaascolipicenoboribetsucks3-eu-west-3utilities-16-balestrandabergentappsseekloges3-eu-west-123paginawebcamauction-acornfshostrodawaraktyubinskaunicommbank123kotisivultrobjectselinogradimo-i-rana4u2-localhostrolekanieruchomoscientistordal-o-g-i-nikolaevents3-ap-northeast-2-ddnsking123homepagefrontappchizip61123saitamakawababia-goracleaningheannakadomarineat-urlimanowarudakuneustarostwodzislawdev-myqnapcloudcontrolledgesuite-stagingdyniamusementdllclstagehirnikonantomobelementorayokosukanoyakumoliserniaurland-4-salernord-aurdalipaywhirlimiteddnslivelanddnss3-ap-south-123siteweberlevagangaviikanonji234lima-cityeats3-ap-southeast-123webseiteambulancechireadmyblogspotaribeiraogakicks-assurfakefurniturealmpmninoheguribigawaurskog-holandinggfarsundds3-ap-southeast-20001wwwedeployokote123hjemmesidealerdalaheadjuegoshikibichuobiraustevollimombetsupplyokoze164-balena-devices3-ca-central-123websiteleaf-south-12hparliamentatsunobninsk8s3-eu-central-1337bjugnishimerablackfridaynightjxn--11b4c3ditchyouripatriabloombergretaijindustriesteinkjerbloxcmsaludivtasvuodnakaiwanairlinekobayashimodatecnologiablushakotanishinomiyashironomniwebview-assetsalvadorbmoattachmentsamegawabmsamnangerbmwellbeingzonebnrweatherchannelsdvrdnsamparalleluxenishinoomotegotsukishiwadavvenjargamvikarpaczest-a-la-maisondre-landivttasvuotnakamai-stagingloppennebomlocalzonebonavstackartuzybondigitaloceanspacesamsclubartowest1-usamsunglugsmall-webspacebookonlineboomlaakesvuemielecceboschristmasakilatiron-riopretoeidsvollovesickaruizawabostik-serverrankoshigayachtsandvikcoromantovalle-d-aostakinouebostonakijinsekikogentlentapisa-geekarumaifmemsetkmaxxn--12c1fe0bradescotksatmpaviancapitalonebouncemerckmsdscloudiybounty-fullensakerrypropertiesangovtoyosatoyokawaboutiquebecologialaichaugiangmbhartiengiangminakamichiharaboutireservdrangedalpusercontentoyotapfizerboyfriendoftheinternetflixn--12cfi8ixb8lublindesnesanjosoyrovnoticiasannanishinoshimattelemarkasaokamikitayamatsurinfinitigopocznore-og-uvdalucaniabozen-sudtiroluccanva-appstmnishiokoppegardray-dnsupdaterbozen-suedtirolukowesteuropencraftoyotomiyazakinsurealtypeformesswithdnsannohekinanporovigonohejinternationaluroybplacedogawarabikomaezakirunordkappgfoggiabrandrayddns5ybrasiliadboxoslockerbresciaogashimadachicappadovaapstemp-dnswatchest-mon-blogueurodirumagazinebrindisiciliabroadwaybroke-itvedestrandraydnsanokashibatakashimashikiyosatokigawabrokerbrothermesserlifestylebtimnetzpisdnpharmaciensantamariakebrowsersafetymarketingmodumetacentrumeteorappharmacymruovatlassian-dev-builderschaefflerbrumunddalutskashiharabrusselsantoandreclaimsanukintlon-2bryanskiptveterinaireadthedocsaobernardovre-eikerbrynebwestus2bzhitomirbzzwhitesnowflakecommunity-prochowicecomodalenissandoycompanyaarphdfcbankasumigaurawa-mazowszexn--1ck2e1bambinagisobetsuldalpha-myqnapcloudaccess3-us-east-2ixboxeroxfinityolasiteastus2comparemarkerryhotelsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacomsecaasnesoddeno-stagingrondarcondoshifteditorxn--1ctwolominamatarnobrzegrongrossetouchijiwadedyn-berlincolnissayokoshibahikariyaltakazakinzais-a-bookkeepermarshallstatebankasuyalibabahccavuotnagaraholtaleniwaizumiotsurugashimaintenanceomutazasavonarviikaminoyamaxunispaceconferenceconstructionflashdrivefsncf-ipfsaxoconsuladobeio-static-accesscamdvrcampaniaconsultantranoyconsultingroundhandlingroznysaitohnoshookuwanakayamangyshlakdnepropetrovskanlandyndns-freeboxostrowwlkpmgrphilipsyno-dschokokekscholarshipschoolbusinessebycontactivetrailcontagematsubaravendbambleborkdalvdalcest-le-patron-rancherkasydneyukuhashimokawavoues3-sa-east-1contractorskenissedalcookingruecoolblogdnsfor-better-thanhhoarairforcentralus-1cooperativano-frankivskodjeephonefosschoolsztynsetransiphotographysiocoproductionschulplattforminamiechizenisshingucciprianiigatairaumalatvuopmicrolightinguidefinimaringatlancastercorsicafjschulservercosenzakopanecosidnshome-webservercellikescandypopensocialcouchpotatofrieschwarzgwangjuh-ohtawaramotoineppueblockbusternopilawacouncilcouponscrapper-sitecozoravennaharimalborkaszubytemarketscrappinguitarscrysecretrosnubananarepublic-inquiryurihonjoyenthickaragandaxarnetbankanzakiwielunnerepairbusanagochigasakishimabarakawaharaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangjesdalolipopmcdirepbodyn53cqcxn--1lqs03niyodogawacrankyotobetsumidaknongujaratmallcrdyndns-homednscwhminamifuranocreditcardyndns-iphutholdingservehttpbincheonl-ams-1creditunionionjukujitawaravpagecremonashorokanaiecrewhoswholidaycricketnedalcrimeast-kazakhstanangercrotonecrowniphuyencrsvp4cruiseservehumourcuisinellair-traffic-controllagdenesnaaseinet-freakserveircasertainaircraftingvolloansnasaarlanduponthewifidelitypedreamhostersaotomeldaluxurycuneocupcakecuritibacgiangiangryggeecurvalled-aostargets-itranslatedyndns-mailcutegirlfriendyndns-office-on-the-webhoptogurafedoraprojectransurlfeirafembetsukuis-a-bruinsfanfermodenakasatsunairportrapaniizaferraraferraris-a-bulls-fanferrerotikagoshimalopolskanittedalfetsundyndns-wikimobetsumitakagildeskaliszkolamericanfamilydservemp3fgunmaniwamannorth-kazakhstanfhvalerfilegear-augustowiiheyakagefilegear-deatnuniversitysvardofilegear-gbizfilegear-iefilegear-jpmorgangwonporterfilegear-sg-1filminamiizukamiminefinalchikugokasellfyis-a-candidatefinancefinnoyfirebaseappiemontefirenetlifylkesbiblackbaudcdn-edgestackhero-networkinggroupowiathletajimabaria-vungtaudiopsysharpigboatshawilliamhillfirenzefirestonefireweblikes-piedmontravelersinsurancefirmdalegalleryfishingoldpoint2thisamitsukefitjarfitnessettsurugiminamimakis-a-catererfjalerfkatsushikabeebyteappilottonsberguovdageaidnunjargausdalflekkefjordyndns-workservep2phxn--1lqs71dyndns-remotewdyndns-picserveminecraftransporteflesbergushikamifuranorthflankatsuyamashikokuchuoflickragerokunohealthcareershellflierneflirfloginlinefloppythonanywherealtorfloraflorencefloripalmasfjordenfloristanohatajiris-a-celticsfanfloromskogxn--2m4a15eflowershimokitayamafltravinhlonganflynnhosting-clusterfncashgabadaddjabbottoyourafndyndns1fnwkzfolldalfoolfor-ourfor-somegurownproviderfor-theaterfordebianforexrotheworkpccwinbar0emmafann-arborlandd-dnsiskinkyowariasahikawarszawashtenawsmppl-wawsglobalacceleratorahimeshimakanegasakievennodebalancern4t3l3p0rtatarantours3-ap-northeast-123minsidaarborteaches-yogano-ipifony-123miwebaccelastx4432-b-datacenterprisesakijobservableusercontentateshinanomachintaifun-dnsdojournalistoloseyouriparisor-fronavuotnarashinoharaetnabudejjunipereggio-emilia-romagnaroyboltateyamajureggiocalabriakrehamnayoro0o0forgotdnshimonitayanagithubpreviewsaikisarazure-mobileirfjordynnservepicservequakeforli-cesena-forlicesenaforlillehammerfeste-ipimientaketomisatoolshimonosekikawaforsalegoismailillesandefjordynservebbservesarcasmileforsandasuolodingenfortalfortefosneshimosuwalkis-a-chefashionstorebaseljordyndns-serverisignfotrdynulvikatowicefoxn--2scrj9casinordlandurbanamexnetgamersapporomurafozfr-1fr-par-1fr-par-2franamizuhoboleslawiecommerce-shoppingyeongnamdinhachijohanamakisofukushimaoris-a-conservativegarsheiheijis-a-cparachutingfredrikstadynv6freedesktopazimuthaibinhphuocelotenkawakayamagnetcieszynh-servebeero-stageiseiroumugifuchungbukharag-cloud-championshiphoplixn--30rr7yfreemyiphosteurovisionredumbrellangevagrigentobishimadridvagsoygardenebakkeshibechambagricoharugbydgoszczecin-berlindasdaburfreesitefreetlshimotsukefreisennankokubunjis-a-cubicle-slavellinodeobjectshimotsumafrenchkisshikindleikangerfreseniushinichinanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshinjotelulubin-vpncateringebunkyonanaoshimamateramockashiwarafrognfrolandynvpnpluservicesevastopolitiendafrom-akamaized-stagingfrom-alfrom-arfrom-azurewebsiteshikagamiishibuyabukihokuizumobaragusabaerobaticketshinjukuleuvenicefrom-campobassociatest-iserveblogsytenrissadistdlibestadultrentin-sudtirolfrom-coachaseljeducationcillahppiacenzaganfrom-ctrentin-sued-tirolfrom-dcatfooddagestangefrom-decagliarikuzentakataikillfrom-flapymntrentin-suedtirolfrom-gap-east-1from-higashiagatsumagoianiafrom-iafrom-idyroyrvikingulenfrom-ilfrom-in-the-bandairtelebitbridgestonemurorangecloudplatform0from-kshinkamigototalfrom-kyfrom-langsonyantakahamalselveruminamiminowafrom-malvikaufentigerfrom-mdfrom-mein-vigorlicefrom-mifunefrom-mnfrom-modshinshinotsurgeryfrom-mshinshirofrom-mtnfrom-ncatholicurus-4from-ndfrom-nefrom-nhs-heilbronnoysundfrom-njshintokushimafrom-nminamioguni5from-nvalledaostargithubusercontentrentino-a-adigefrom-nycaxiaskvollpagesardegnarutolgaulardalvivanovoldafrom-ohdancefrom-okegawassamukawataris-a-democratrentino-aadigefrom-orfrom-panasonichernovtsykkylvenneslaskerrylogisticsardiniafrom-pratohmamurogawatsonrenderfrom-ris-a-designerimarugame-hostyhostingfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagaeroclubmedecin-addrammenuorodoyerfrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebthingsjcbnpparibashkiriafrom-wvallee-aosteroyfrom-wyfrosinonefrostabackplaneapplebesbyengerdalp1froyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairtrafficplexus-2fujinomiyadapliefujiokazakinkobearalvahkikonaibetsubame-south-1fujisatoshoeshintomikasaharafujisawafujishiroishidakabiratoridediboxn--3bst00minamisanrikubetsupportrentino-altoadigefujitsuruokakamigaharafujiyoshidappnodearthainguyenfukayabeardubaikawagoefukuchiyamadatsunanjoburgfukudomigawafukuis-a-doctorfukumitsubishigakirkeneshinyoshitomiokamisatokamachippubetsuikitchenfukuokakegawafukuroishikariwakunigamigrationfukusakirovogradoyfukuyamagatakaharunusualpersonfunabashiriuchinadattorelayfunagatakahashimamakiryuohkurafunahashikamiamakusatsumasendaisenergyeongginowaniihamatamakinoharafundfunkfeuerfuoiskujukuriyamandalfuosskoczowindowskrakowinefurubirafurudonordreisa-hockeynutwentertainmentrentino-s-tirolfurukawajimangolffanshiojirishirifujiedafusoctrangfussagamiharafutabayamaguchinomihachimanagementrentino-stirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-financialadvisor-aurdalfuturecmshioyamelhushirahamatonbetsurnadalfuturehostingfuturemailingfvghakuis-a-gurunzenhakusandnessjoenhaldenhalfmoonscalebookinghostedpictetrentino-sud-tirolhalsakakinokiaham-radio-opinbar1hamburghammarfeastasiahamurakamigoris-a-hard-workershiraokamisunagawahanamigawahanawahandavvesiidanangodaddyn-o-saurealestatefarmerseinehandcrafteducatorprojectrentino-sudtirolhangglidinghangoutrentino-sued-tirolhannannestadhannosegawahanoipinkazohanyuzenhappouzshiratakahagianghasamap-northeast-3hasaminami-alpshishikuis-a-hunterhashbanghasudazaifudaigodogadobeioruntimedio-campidano-mediocampidanomediohasura-appinokokamikoaniikappudopaashisogndalhasvikazteleportrentino-suedtirolhatogayahoooshikamagayaitakamoriokakudamatsuehatoyamazakitahiroshimarcheapartmentshisuifuettertdasnetzhatsukaichikaiseiyoichipshitaramahattfjelldalhayashimamotobusells-for-lesshizukuishimoichilloutsystemscloudsitehazuminobushibukawahelplfinancialhelsinkitakamiizumisanofidonnakamurataitogliattinnhemneshizuokamitondabayashiogamagoriziahemsedalhepforgeblockshoujis-a-knightpointtokaizukamaishikshacknetrentinoa-adigehetemlbfanhigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakatakanabeautychyattorneyagawakkanaioirasebastopoleangaviikadenagahamaroyhigashikagawahigashikagurasoedahigashikawakitaaikitakyushunantankazunovecorebungoonow-dnshowahigashikurumeinforumzhigashimatsushimarnardalhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshowtimeloyhigashinarusells-for-uhigashinehigashiomitamanoshiroomghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitamihamadahigashitsunospamproxyhigashiurausukitamotosunnydayhigashiyamatokoriyamanashiibaclieu-1higashiyodogawahigashiyoshinogaris-a-landscaperspectakasakitanakagusukumoldeliveryhippyhiraizumisatohokkaidontexistmein-iservschulecznakaniikawatanagurahirakatashinagawahiranais-a-lawyerhirarahiratsukaeruhirayaizuwakamatsubushikusakadogawahitachiomiyaginozawaonsensiositehitachiotaketakaokalmykiahitraeumtgeradegreehjartdalhjelmelandholyhomegoodshwinnersiiitesilkddiamondsimple-urlhomeipioneerhomelinkyard-cloudjiffyresdalhomelinuxn--3ds443ghomeofficehomesecuritymacaparecidahomesecuritypchiryukyuragiizehomesenseeringhomeskleppippugliahomeunixn--3e0b707ehondahonjyoitakarazukaluganskfh-muensterhornindalhorsells-itrentinoaadigehortendofinternet-dnsimplesitehospitalhotelwithflightsirdalhotmailhoyangerhoylandetakasagooglecodespotrentinoalto-adigehungyenhurdalhurumajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiwafuneis-very-evillasalleitungsenis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandoomdnstraceisk01isk02jenv-arubacninhbinhdinhktistoryjeonnamegawajetztrentinostiroljevnakerjewelryjgorajlljls-sto1jls-sto2jls-sto3jmpixolinodeusercontentrentinosud-tiroljnjcloud-ver-jpchitosetogitsuliguriajoyokaichibahcavuotnagaivuotnagaokakyotambabymilk3jozis-a-musicianjpnjprsolarvikhersonlanxessolundbeckhmelnitskiyamasoykosaigawakosakaerodromegalloabatobamaceratachikawafaicloudineencoreapigeekoseis-a-painterhostsolutionslupskhakassiakosheroykoshimizumakis-a-patsfankoshughesomakosugekotohiradomainstitutekotourakouhokumakogenkounosupersalevangerkouyamasudakouzushimatrixn--3pxu8khplaystation-cloudyclusterkozagawakozakis-a-personaltrainerkozowiosomnarviklabudhabikinokawachinaganoharamcocottekpnkppspbarcelonagawakepnord-odalwaysdatabaseballangenkainanaejrietisalatinabenogiehtavuoatnaamesjevuemielnombrendlyngen-rootaruibxos3-us-gov-west-1krasnikahokutokonamegatakatoris-a-photographerokussldkrasnodarkredstonekrelliankristiansandcatsoowitdkmpspawnextdirectrentinosudtirolkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-playershiftcryptonomichinomiyakekryminamiyamashirokawanabelaudnedalnkumamotoyamatsumaebashimofusakatakatsukis-a-republicanonoichinosekigaharakumanowtvaokumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-rockstarachowicekunitachiaraisaijolsterkunitomigusukukis-a-socialistgstagekunneppubtlsopotrentinosued-tirolkuokgroupizzakurgankurobegetmyipirangalluplidlugolekagaminorddalkurogimimozaokinawashirosatochiokinoshimagentositempurlkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusuppliesor-odalkutchanelkutnokuzumakis-a-techietipslzkvafjordkvalsundkvamsterdamnserverbaniakvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsor-varangermishimatsusakahogirlymisugitokorozawamitakeharamitourismartlabelingmitoyoakemiuramiyazurecontainerdpoliticaobangmiyotamatsukuris-an-actormjondalenmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsuuramoriyoshiminamiashigaramormonstermoroyamatsuzakis-an-actressmushcdn77-sslingmortgagemoscowithgoogleapiszmoseushimogosenmosjoenmoskenesorreisahayakawakamiichikawamisatottoris-an-anarchistjordalshalsenmossortlandmosviknx-serversusakiyosupabaseminemotegit-reposoruminanomoviemovimientokyotangotembaixadattowebhareidsbergmozilla-iotrentinosuedtirolmtranbytomaridagawalmartrentinsud-tirolmuikaminokawanishiaizubangemukoelnmunakatanemuosattemupkomatsushimassa-carrara-massacarraramassabuzzmurmanskomforbar2murotorcraftranakatombetsumy-gatewaymusashinodesakegawamuseumincomcastoripressorfoldmusicapetownnews-stagingmutsuzawamy-vigormy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoundcastorjdevcloudfunctionsokndalmydattolocalcertificationmyddnsgeekgalaxymydissentrentinsudtirolmydobissmarterthanyoumydrobofageometre-experts-comptablesowamydspectruminisitemyeffectrentinsued-tirolmyfastly-edgekey-stagingmyfirewalledreplittlestargardmyforuminterecifedextraspace-to-rentalstomakomaibaramyfritzmyftpaccesspeedpartnermyhome-servermyjinomykolaivencloud66mymailermymediapchoseikarugalsacemyokohamamatsudamypeplatformsharis-an-artistockholmestrandmypetsphinxn--41amyphotoshibajddarvodkafjordvaporcloudmypictureshinomypsxn--42c2d9amysecuritycamerakermyshopblockspjelkavikommunalforbundmyshopifymyspreadshopselectrentinsuedtirolmytabitordermythic-beastspydebergmytis-a-anarchistg-buildermytuleap-partnersquaresindevicenzamyvnchoshichikashukudoyamakeuppermywirecipescaracallypoivronpokerpokrovskommunepolkowicepoltavalle-aostavernpomorzeszowithyoutuberspacekitagawaponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bykleclerchitachinakagawaltervistaipeigersundynamic-dnsarlpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprgmrprimetelprincipeprivatelinkomonowruzhgorodeoprivatizehealthinsuranceprofesionalprogressivegasrlpromonza-e-della-brianzaptokuyamatsushigepropertysnesrvarggatrevisogneprotectionprotonetroandindependent-inquest-a-la-masionprudentialpruszkowiwatsukiyonotaireserve-onlineprvcyonabarumbriaprzeworskogpunyufuelpupulawypussycatanzarowixsitepvhachirogatakahatakaishimojis-a-geekautokeinotteroypvtrogstadpwchowderpzqhadanorthwesternmutualqldqotoyohashimotoshimaqponiatowadaqslgbtroitskomorotsukagawaqualifioapplatter-applatterplcube-serverquangngais-certifiedugit-pagespeedmobilizeroticaltanissettailscaleforcequangninhthuanquangtritonoshonais-foundationquickconnectromsakuragawaquicksytestreamlitapplumbingouvaresearchitectesrhtrentoyonakagyokutoyakomakizunokunimimatakasugais-an-engineeringquipelementstrippertuscanytushungrytuvalle-daostamayukis-into-animeiwamizawatuxfamilytuyenquangbinhthuantwmailvestnesuzukis-gonevestre-slidreggio-calabriavestre-totennishiawakuravestvagoyvevelstadvibo-valentiaavibovalentiavideovinhphuchromedicinagatorogerssarufutsunomiyawakasaikaitakokonoevinnicarbonia-iglesias-carboniaiglesiascarboniavinnytsiavipsinaapplurinacionalvirginanmokurennebuvirtual-userveexchangevirtualservervirtualuserveftpodhalevisakurais-into-carsnoasakuholeckodairaviterboliviajessheimmobilienvivianvivoryvixn--45br5cylvlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavmintsorocabalashovhachiojiyahikobierzycevologdanskoninjambylvolvolkswagencyouvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiechungnamdalseidfjordynathomebuiltwithdarkhangelskypecorittogojomeetoystre-slidrettozawawmemergencyahabackdropalermochizukikirarahkkeravjuwmflabsvalbardunloppadualstackomvuxn--3hcrj9chonanbuskerudynamisches-dnsarpsborgripeeweeklylotterywoodsidellogliastradingworse-thanhphohochiminhadselbuyshouseshirakolobrzegersundongthapmircloudletshiranukamishihorowowloclawekonskowolawawpdevcloudwpenginepoweredwphostedmailwpmucdnipropetrovskygearappodlasiellaknoluoktagajobojis-an-entertainerwpmudevcdnaccessojamparaglidingwritesthisblogoipodzonewroclawmcloudwsseoullensvanguardianwtcp4wtfastlylbanzaicloudappspotagereporthruherecreationinomiyakonojorpelandigickarasjohkameyamatotakadawuozuerichardlillywzmiuwajimaxn--4it797konsulatrobeepsondriobranconagareyamaizuruhrxn--4pvxs4allxn--54b7fta0ccistrondheimpertrixcdn77-secureadymadealstahaugesunderxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49citadelhichisochimkentozsdell-ogliastraderxn--5rtq34kontuminamiuonumatsunoxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264citicarrdrobakamaiorigin-stagingmxn--12co0c3b4evalleaostaobaomoriguchiharaffleentrycloudflare-ipfstcgroupaaskimitsubatamibulsan-suedtirolkuszczytnoopscbgrimstadrrxn--80aaa0cvacationsvchoyodobashichinohealth-carereforminamidaitomanaustdalxn--80adxhksveioxn--80ao21axn--80aqecdr1axn--80asehdbarclaycards3-us-west-1xn--80aswgxn--80aukraanghkeliwebpaaskoyabeagleboardxn--8dbq2axn--8ltr62konyvelohmusashimurayamassivegridxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencowayxn--90a3academiamicable-modemoneyxn--90aeroportsinfolionetworkangerxn--90aishobaraxn--90amckinseyxn--90azhytomyrxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakurastoragexn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsvelvikongsbergxn--bck1b9a5dre4civilaviationfabricafederation-webredirectmediatechnologyeongbukashiwazakiyosembokutamamuraxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptarumizusawaxn--blt-elabcienciamallamaceiobbcn-north-1xn--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagesquare7xn--brum-voagatrustkanazawaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarefootballooningjovikarasjoketokashikiyokawaraxn--c1avgxn--c2br7gxn--c3s14misakis-a-therapistoiaxn--cck2b3baremetalombardyn-vpndns3-website-ap-northeast-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-into-cartoonsokamitsuexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694bargainstantcloudfrontdoorestauranthuathienhuebinordre-landiherokuapparochernigovernmentjeldsundiscordsays3-website-ap-southeast-1xn--czrs0trvaroyxn--czru2dxn--czrw28barrel-of-knowledgeapplinziitatebayashijonawatebizenakanojoetsumomodellinglassnillfjordiscordsezgoraxn--d1acj3barrell-of-knowledgecomputermezproxyzgorzeleccoffeedbackanagawarmiastalowa-wolayangroupars3-website-ap-southeast-2xn--d1alfaststacksevenassigdalxn--d1atrysiljanxn--d5qv7z876clanbibaiduckdnsaseboknowsitallxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koobindalxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmail-boxaxn--eckvdtc9dxn--efvn9svn-repostuff-4-salexn--efvy88haebaruericssongdalenviknaklodzkochikushinonsenasakuchinotsuchiurakawaxn--ehqz56nxn--elqq16hagakhanhhoabinhduongxn--eveni-0qa01gaxn--f6qx53axn--fct429kooris-a-nascarfanxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcleverappsassarinuyamashinazawaxn--fiq64barsycenterprisecloudcontrolappgafanquangnamasteigenoamishirasatochigifts3-website-eu-west-1xn--fiqs8swidnicaravanylvenetogakushimotoganexn--fiqz9swidnikitagatakkomaganexn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsswiebodzindependent-commissionxn--forlcesena-c8axn--fpcrj9c3dxn--frde-granexn--frna-woaxn--frya-hraxn--fzc2c9e2clickrisinglesjaguarxn--fzys8d69uvgmailxn--g2xx48clinicasacampinagrandebungotakadaemongolianishitosashimizunaminamiawajikintuitoyotsukaidownloadrudtvsaogoncapooguyxn--gckr3f0fastvps-serveronakanotoddenxn--gecrj9cliniquedaklakasamatsudoesntexisteingeekasserversicherungroks-theatrentin-sud-tirolxn--ggaviika-8ya47hagebostadxn--gildeskl-g0axn--givuotna-8yandexcloudxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-into-gamessinamsosnowieconomiasadojin-dslattuminamitanexn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45brj9churcharterxn--gnstigliefern-wobihirosakikamijimayfirstorfjordxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3eveneswinoujsciencexn--h2brj9c8clothingdustdatadetectrani-andria-barletta-trani-andriaxn--h3cuzk1dienbienxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinehimejiiyamanouchikujoinvilleirvikarasuyamashikemrevistathellequipmentjmaxxxjavald-aostatics3-website-sa-east-1xn--hebda8basicserversejny-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3swisstufftoread-booksnestudioxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsusonoxn--io0a7is-leetrentinoaltoadigexn--j1adpohlxn--j1aefauskedsmokorsetagayaseralingenovaraxn--j1ael8basilicataniaxn--j1amhaibarakisosakitahatakamatsukawaxn--j6w193gxn--jlq480n2rgxn--jlster-byasakaiminatoyookananiimiharuxn--jrpeland-54axn--jvr189misasaguris-an-accountantsmolaquilaocais-a-linux-useranishiaritabashikaoizumizakitashiobaraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45q11circlerkstagentsasayamaxn--koluokta-7ya57haiduongxn--kprw13dxn--kpry57dxn--kput3is-lostre-toteneis-a-llamarumorimachidaxn--krager-gyasugitlabbvieeexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastly-terrariuminamiiseharaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokanmakiwakuratexn--kvnangen-k0axn--l-1fairwindsynology-diskstationxn--l1accentureklamborghinikkofuefukihabororosynology-dsuzakadnsaliastudynaliastrynxn--laheadju-7yatominamibosoftwarendalenugxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basketballfinanzjaworznoticeableksvikaratsuginamikatagamilanotogawaxn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctulaspeziaxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacn-northwest-1xn--lten-granvindafjordxn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesxn--mgb9awbfbsbxn--1qqw23axn--mgba3a3ejtunesuzukamogawaxn--mgba3a4f16axn--mgba3a4fra1-deloittexn--mgba7c0bbn0axn--mgbaakc7dvfsxn--mgbaam7a8haiphongonnakatsugawaxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscountry-snowplowiczeladzlgleezeu-2xn--mgbai9azgqp6jelasticbeanstalkharkovalleeaostexn--mgbayh7gparasitexn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskopervikhmelnytskyivalleedaostexn--mgbqly7c0a67fbcngroks-thisayamanobeatsaudaxn--mgbqly7cvafricargoboavistanbulsan-sudtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhauspostman-echofunatoriginstances3-website-us-east-1xn--mgbx4cd0abkhaziaxn--mix082fbx-osewienxn--mix891fbxosexyxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cnpyatigorskjervoyagexn--mkru45is-not-certifiedxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakuratanxn--mosjen-eyatsukannamihokksundxn--mot-tlavangenxn--mre-og-romsdal-qqbuservecounterstrikexn--msy-ula0hair-surveillancexn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9guacuiababybluebiteckidsmynasushiobaracingrok-freeddnsfreebox-osascoli-picenogatabuseating-organicbcgjerdrumcprequalifymelbourneasypanelblagrarq-authgear-stagingjerstadeltaishinomakilovecollegefantasyleaguenoharauthgearappspacehosted-by-previderehabmereitattoolforgerockyombolzano-altoadigeorgeorgiauthordalandroideporteatonamidorivnebetsukubankanumazuryomitanocparmautocodebergamoarekembuchikumagayagawafflecelloisirs3-external-180reggioemiliaromagnarusawaustrheimbalsan-sudtirolivingitpagexlivornobserveregruhostingivestbyglandroverhalladeskjakamaiedge-stagingivingjemnes3-eu-west-2038xn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbgdty6ciscofreakamaihd-stagingriwataraindroppdalxn--nit225koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpiwatexn--nmesjevuemie-tcbalatinord-frontierxn--nnx388axn--nodessakurawebsozais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilynxn--4dbrk0cexn--o3cw4hakatanortonkotsunndalxn--o3cyx2axn--od0algardxn--od0aq3beneventodayusuharaxn--ogbpf8fldrvelvetromsohuissier-justicexn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfedjeezxn--p1ais-slickharkivallee-d-aostexn--pgbs0dhlx3xn--porsgu-sta26fedorainfraclouderaxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cnsauheradyndns-at-homedepotenzamamicrosoftbankasukabedzin-brbalsfjordietgoryoshiokanravocats3-fips-us-gov-west-1xn--qcka1pmcpenzapposxn--qqqt11misconfusedxn--qxa6axn--qxamunexus-3xn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-uberleetrentinos-tirolxn--rennesy-v1axn--rhkkervju-01afedorapeoplefrakkestadyndns-webhostingujogaszxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturalxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bentleyusuitatamotorsitestinglitchernihivgubs3-website-us-west-1xn--rros-graphicsxn--rskog-uuaxn--rst-0naturbruksgymnxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9cntraniandriabarlettatraniandriaxn--sandnessjen-ogbentrendhostingliwiceu-3xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4gbriminiserverxn--skierv-utazurestaticappspaceusercontentunkongsvingerxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navigationxn--slt-elabogadobeaemcloud-fr1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbeppublishproxyuufcfanirasakindependent-panelomonza-brianzaporizhzhedmarkarelianceu-4xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeskidyn-ip24xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bloggerxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbestbuyshoparenagasakikuchikuseihicampinashikiminohostfoldnavyuzawaxn--stre-toten-zcbetainaboxfuselfipartindependent-reviewegroweibolognagasukeu-north-1xn--t60b56axn--tckweddingxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbhzc66xn--trentin-sdtirol-7vbialystokkeymachineu-south-1xn--trentino-sd-tirol-c3bielawakuyachimataharanzanishiazaindielddanuorrindigenamerikawauevje-og-hornnes3-website-us-west-2xn--trentino-sdtirol-szbiella-speziaxn--trentinosd-tirol-rzbieszczadygeyachiyodaeguamfamscompute-1xn--trentinosdtirol-7vbievat-band-campaignieznoorstaplesakyotanabellunordeste-idclkarlsoyxn--trentinsd-tirol-6vbifukagawalbrzycharitydalomzaporizhzhiaxn--trentinsdtirol-nsbigv-infolkebiblegnicalvinklein-butterhcloudiscoursesalangenishigotpantheonsitexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atventuresinstagingxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturindalxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbturystykaneyamazoexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccertmgreaterxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbiharstadotsubetsugarulezajskiervaksdalondonetskarmoyxn--vestvgy-ixa6oxn--vg-yiabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniaxn--vgan-qoaxn--vgsy-qoa0jellybeanxn--vgu402coguchikuzenishiwakinvestmentsaveincloudyndns-at-workisboringsakershusrcfdyndns-blogsitexn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihoronobeokagakikugawalesundiscoverdalondrinaplesknsalon-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1communexn--wgbl6axn--xhq521bikedaejeonbuk0xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubackyardshiraois-a-greenxn--y9a3aquarelleasingxn--yer-znavois-very-badxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it168dxn--ystre-slidre-ujbiofficialorenskoglobodoes-itcouldbeworldishangrilamdongnairkitapps-audibleasecuritytacticsxn--0trq7p7nnishiharaxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipartsaloonishiizunazukindustriaxnbayernxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 000000000..d56e9e762 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,203 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + icannNode = u&(1<>= nodesBitsICANN + u = children.get(u & (1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..22cc99844 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..6ac6e1efb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..c8ae6ddc1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..910728fb1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gc + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..7f1946780 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..9526d2ce3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c new file mode 100644 index 000000000..3f73a05dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +#include +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC push_options +#pragma GCC target("xsave") +#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + uint64_t v = _xgetbv(0); + *eax = v & 0xffffffff; + *edx = v >> 32; +} + +#pragma clang attribute pop +#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go new file mode 100644 index 000000000..99c60fe9f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (386 || amd64 || amd64p32) && gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} + +// gccgo doesn't build on Darwin, per: +// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 +func darwinSupportsAVX512() bool { + return false +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..743eb5435 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !386 && !amd64 && !amd64p32 && !arm64 + +package cpu + +func archInit() { + if err := readHWCAP(); err != nil { + return + } + doinit() + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go new file mode 100644 index 000000000..2057006dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +func doinit() { + ARM.HasSWP = isSet(hwCap, hwcap_SWP) + ARM.HasHALF = isSet(hwCap, hwcap_HALF) + ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) + ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) + ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) + ARM.HasFPA = isSet(hwCap, hwcap_FPA) + ARM.HasVFP = isSet(hwCap, hwcap_VFP) + ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) + ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) + ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) + ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) + ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) + ARM.HasNEON = isSet(hwCap, hwcap_NEON) + ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) + ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) + ARM.HasTLS = isSet(hwCap, hwcap_TLS) + ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) + ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) + ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) + ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) + ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM.HasAES = isSet(hwCap2, hwcap2_AES) + ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) + ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) + ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) + ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..08f35ea17 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "strings" + "syscall" +) + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 + + hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 +) + +// linuxKernelCanEmulateCPUID reports whether we're running +// on Linux 4.11+. Ideally we'd like to ask the question about +// whether the current kernel contains +// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 +// but the version number will have to do. +func linuxKernelCanEmulateCPUID() bool { + var un syscall.Utsname + syscall.Uname(&un) + var sb strings.Builder + for _, b := range un.Release[:] { + if b == 0 { + break + } + sb.WriteByte(byte(b)) + } + major, minor, _, ok := parseRelease(sb.String()) + return ok && (major > 4 || major == 4 && minor >= 11) +} + +func doinit() { + if err := readHWCAP(); err != nil { + // We failed to read /proc/self/auxv. This can happen if the binary has + // been given extra capabilities(7) with /bin/setcap. + // + // When this happens, we have two options. If the Linux kernel is new + // enough (4.11+), we can read the arm64 registers directly which'll + // trap into the kernel and then return back to userspace. + // + // But on older kernels, such as Linux 4.4.180 as used on many Synology + // devices, calling readARM64Registers (specifically getisar0) will + // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo + // instead. + // + // See golang/go#57336. + if linuxKernelCanEmulateCPUID() { + readARM64Registers() + } else { + readLinuxProcCPUInfo() + } + return + } + + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) + + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..4686c1d54 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (mips64 || mips64le) + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..7d902b684 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..197188e67 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,30 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && (ppc64 || ppc64le) + +package cpu + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 000000000..cb4a0c572 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..1517ac61d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +func initS390Xbase() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go new file mode 100644 index 000000000..558635850 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..fedb00cc4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips64 || mips64le + +package cpu + +const cacheLineSize = 32 + +func initOptions() { + options = []option{ + {Name: "msa", Feature: &MIPS64X.HasMSA}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..ffb4ec7eb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build mips || mipsle + +package cpu + +const cacheLineSize = 32 + +func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go new file mode 100644 index 000000000..ebfb3fc8e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,173 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + _CTL_QUERY = -2 + + _SYSCTL_VERS_1 = 0x1000000 +) + +var _zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +type sysctlNode struct { + Flags uint32 + Num int32 + Name [32]int8 + Ver uint32 + __rsvd uint32 + Un [16]byte + _sysctl_size [8]byte + _sysctl_func [8]byte + _sysctl_parent [8]byte + _sysctl_desc [8]byte +} + +func sysctlNodes(mib []int32) ([]sysctlNode, error) { + var olen uintptr + + // Get a list of all sysctl nodes below the given MIB by performing + // a sysctl for the given MIB with CTL_QUERY appended. + mib = append(mib, _CTL_QUERY) + qnode := sysctlNode{Flags: _SYSCTL_VERS_1} + qp := (*byte)(unsafe.Pointer(&qnode)) + sz := unsafe.Sizeof(qnode) + if err := sysctl(mib, nil, &olen, qp, sz); err != nil { + return nil, err + } + + // Now that we know the size, get the actual nodes. + nodes := make([]sysctlNode, olen/sz) + np := (*byte)(unsafe.Pointer(&nodes[0])) + if err := sysctl(mib, np, &olen, qp, sz); err != nil { + return nil, err + } + + return nodes, nil +} + +func nametomib(name string) ([]int32, error) { + // Split name into components. + var parts []string + last := 0 + for i := 0; i < len(name); i++ { + if name[i] == '.' { + parts = append(parts, name[last:i]) + last = i + 1 + } + } + parts = append(parts, name[last:]) + + mib := []int32{} + // Discover the nodes and construct the MIB OID. + for partno, part := range parts { + nodes, err := sysctlNodes(mib) + if err != nil { + return nil, err + } + for _, node := range nodes { + n := make([]byte, 0) + for i := range node.Name { + if node.Name[i] != 0 { + n = append(n, byte(node.Name[i])) + } + } + if string(n) == part { + mib = append(mib, int32(node.Num)) + break + } + } + if len(mib) != partno+1 { + return nil, err + } + } + + return mib, nil +} + +// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's +type aarch64SysctlCPUID struct { + midr uint64 /* Main ID Register */ + revidr uint64 /* Revision ID Register */ + mpidr uint64 /* Multiprocessor Affinity Register */ + aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ + aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ + aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ + aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ + aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ + aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ + aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ + aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ + aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ + aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ + mvfr0 uint32 /* Media and VFP Feature Register 0 */ + mvfr1 uint32 /* Media and VFP Feature Register 1 */ + mvfr2 uint32 /* Media and VFP Feature Register 2 */ + pad uint32 + clidr uint64 /* Cache Level ID Register */ + ctr uint64 /* Cache Type Register */ +} + +func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { + mib, err := nametomib(name) + if err != nil { + return nil, err + } + + out := aarch64SysctlCPUID{} + n := unsafe.Sizeof(out) + _, _, errno := syscall.Syscall6( + syscall.SYS___SYSCTL, + uintptr(unsafe.Pointer(&mib[0])), + uintptr(len(mib)), + uintptr(unsafe.Pointer(&out)), + uintptr(unsafe.Pointer(&n)), + uintptr(0), + uintptr(0)) + if errno != 0 { + return nil, errno + } + return &out, nil +} + +func doinit() { + cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") + if err != nil { + setMinimalFeatures() + return + } + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go new file mode 100644 index 000000000..85b64d5cc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,65 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// Minimal copy of functionality from x/sys/unix so the cpu package can call +// sysctl without depending on x/sys/unix. + +const ( + // From OpenBSD's sys/sysctl.h. + _CTL_MACHDEP = 7 + + // From OpenBSD's machine/cpu.h. + _CPU_ID_AA64ISAR0 = 2 + _CPU_ID_AA64ISAR1 = 3 +) + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 + +func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if errno != 0 { + return errno + } + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +func sysctlUint64(mib []uint32) (uint64, bool) { + var out uint64 + nout := unsafe.Sizeof(out) + if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { + return 0, false + } + return out, true +} + +func doinit() { + setMinimalFeatures() + + // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. + isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) + if !ok { + return + } + isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) + if !ok { + return + } + parseARM64SystemRegisters(isar0, isar1, 0) + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s new file mode 100644 index 000000000..054ba05d6 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) + +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go new file mode 100644 index 000000000..e9ecf2a45 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -0,0 +1,9 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && arm + +package cpu + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..5341e7f88 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && !netbsd && !openbsd && arm64 + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go new file mode 100644 index 000000000..5f8f2419a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && (mips64 || mips64le) + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go new file mode 100644 index 000000000..89608fba2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -0,0 +1,12 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !linux && (ppc64 || ppc64le) + +package cpu + +func archInit() { + PPC64.IsPOWER8 = true + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go new file mode 100644 index 000000000..5ab87808f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -0,0 +1,11 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux && riscv64 + +package cpu + +func archInit() { + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go new file mode 100644 index 000000000..c14f12b14 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -0,0 +1,16 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 || ppc64le + +package cpu + +const cacheLineSize = 128 + +func initOptions() { + options = []option{ + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..aca3199c9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 + +package cpu + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go new file mode 100644 index 000000000..5881b8833 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.go @@ -0,0 +1,172 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +func initOptions() { + options = []option{ + {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, + {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, + {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, + {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3EH}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "aes", Feature: &S390X.HasAES}, + {Name: "aescbc", Feature: &S390X.HasAESCBC}, + {Name: "aesctr", Feature: &S390X.HasAESCTR}, + {Name: "aesgcm", Feature: &S390X.HasAESGCM}, + {Name: "ghash", Feature: &S390X.HasGHASH}, + {Name: "sha1", Feature: &S390X.HasSHA1}, + {Name: "sha256", Feature: &S390X.HasSHA256}, + {Name: "sha3", Feature: &S390X.HasSHA3}, + {Name: "sha512", Feature: &S390X.HasSHA512}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "vxe", Feature: &S390X.HasVXE}, + } +} + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // mandatory facilities + zarch facility = 1 // z architecture mode is active + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate + + // miscellaneous facilities + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement + + // cryptography facilities + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + msa9 facility = 155 // message-security-assist extension 9 + + // vector facilities + vx facility = 129 // vector facility + vxe facility = 135 // vector-enhancements 1 + vxe2 facility = 148 // vector-enhancements 2 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + initS390Xbase() + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..1fb4b7013 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..384787ea3 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func initOptions() {} + +func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..c29f5e4c5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64 || amd64p32 + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func initOptions() { + options = []option{ + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "avx512", Feature: &X86.HasAVX512}, + {Name: "avx512f", Feature: &X86.HasAVX512F}, + {Name: "avx512cd", Feature: &X86.HasAVX512CD}, + {Name: "avx512er", Feature: &X86.HasAVX512ER}, + {Name: "avx512pf", Feature: &X86.HasAVX512PF}, + {Name: "avx512vl", Feature: &X86.HasAVX512VL}, + {Name: "avx512bw", Feature: &X86.HasAVX512BW}, + {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, + {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, + {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, + {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, + {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, + {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, + {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, + {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, + {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, + {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, + {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, + {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, + {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "cx16", Feature: &X86.HasCX16}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "osxsave", Feature: &X86.HasOSXSAVE}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "rdrand", Feature: &X86.HasRDRAND}, + {Name: "rdseed", Feature: &X86.HasRDSEED}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64: + {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, + } +} + +func archInit() { + + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasCX16 = isSet(13, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + var osSupportsAVX, osSupportsAVX512 bool + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + + if runtime.GOOS == "darwin" { + // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. + // Since users can't rely on mask register contents, let's not advertise AVX-512 support. + // See issue 49233. + osSupportsAVX512 = false + } else { + // Check if OPMASK and ZMM registers have OS support. + osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) + } + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, ecx7, edx7 := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) + + X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension + if X86.HasAVX512 { + X86.HasAVX512F = true + X86.HasAVX512CD = isSet(28, ebx7) + X86.HasAVX512ER = isSet(27, ebx7) + X86.HasAVX512PF = isSet(26, ebx7) + X86.HasAVX512VL = isSet(31, ebx7) + X86.HasAVX512BW = isSet(30, ebx7) + X86.HasAVX512DQ = isSet(17, ebx7) + X86.HasAVX512IFMA = isSet(21, ebx7) + X86.HasAVX512VBMI = isSet(1, ecx7) + X86.HasAVX5124VNNIW = isSet(2, edx7) + X86.HasAVX5124FMAPS = isSet(3, edx7) + X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) + X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) + X86.HasAVX512VNNI = isSet(11, ecx7) + X86.HasAVX512GFNI = isSet(8, ecx7) + X86.HasAVX512VAES = isSet(9, ecx7) + X86.HasAVX512VBMI2 = isSet(6, ecx7) + X86.HasAVX512BITALG = isSet(12, ecx7) + + eax71, _, _, _ := cpuid(7, 1) + X86.HasAVX512BF16 = isSet(5, eax71) + } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + + buf, err := os.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go new file mode 100644 index 000000000..762b63d68 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/parse.go @@ -0,0 +1,43 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "strconv" + +// parseRelease parses a dot-separated version number. It follows the semver +// syntax, but allows the minor and patch versions to be elided. +// +// This is a copy of the Go runtime's parseRelease from +// https://golang.org/cl/209597. +func parseRelease(rel string) (major, minor, patch int, ok bool) { + // Strip anything after a dash or plus. + for i := 0; i < len(rel); i++ { + if rel[i] == '-' || rel[i] == '+' { + rel = rel[:i] + break + } + } + + next := func() (int, bool) { + for i := 0; i < len(rel); i++ { + if rel[i] == '.' { + ver, err := strconv.Atoi(rel[:i]) + rel = rel[i+1:] + return ver, err == nil + } + } + ver, err := strconv.Atoi(rel) + rel = "" + return ver, err == nil + } + if major, ok = next(); !ok || rel == "" { + return + } + if minor, ok = next(); !ok || rel == "" { + return + } + patch, ok = next() + return +} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go new file mode 100644 index 000000000..4cd64c704 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -0,0 +1,53 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && arm64 + +package cpu + +import ( + "errors" + "io" + "os" + "strings" +) + +func readLinuxProcCPUInfo() error { + f, err := os.Open("/proc/cpuinfo") + if err != nil { + return err + } + defer f.Close() + + var buf [1 << 10]byte // enough for first CPU + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + in := string(buf[:n]) + const features = "\nFeatures : " + i := strings.Index(in, features) + if i == -1 { + return errors.New("no CPU features found") + } + in = in[i+len(features):] + if i := strings.Index(in, "\n"); i != -1 { + in = in[:i] + } + m := map[string]*bool{} + + initOptions() // need it early here; it's harmless to call twice + for _, o := range options { + m[o.Name] = o.Feature + } + // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". + m["evtstrm"] = &ARM64.HasEVTSTRM + + for _, f := range strings.Fields(in) { + if p, ok := m[f]; ok { + *p = true + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 000000000..5f92ac9a2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 000000000..4c9788ea8 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..1b9ccb091 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Moreover, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +//go:build aix && gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go new file mode 100644 index 000000000..e8b6cdbe9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on AIX without depending on x/sys/unix. +// (See golang.org/issue/32102) + +//go:build aix && ppc64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" + +//go:linkname libc_getsystemcfg libc_getsystemcfg + +type syscallFunc uintptr + +var libc_getsystemcfg syscallFunc + +type errno = syscall.Errno + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) + +func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) + return +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 000000000..fd8632444 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 000000000..bbf86ccf0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 000000000..f533091c1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 000000000..74db26b94 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 000000000..fc1835d8a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 000000000..e7d3805e3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,235 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/httpbody.proto + +package httpbody + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); +// +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *HttpBody) Reset() { + *x = HttpBody{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_httpbody_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpBody) ProtoMessage() {} + +func (x *HttpBody) ProtoReflect() protoreflect.Message { + mi := &file_google_api_httpbody_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead. +func (*HttpBody) Descriptor() ([]byte, []int) { + return file_google_api_httpbody_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpBody) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *HttpBody) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *HttpBody) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_google_api_httpbody_proto protoreflect.FileDescriptor + +var file_google_api_httpbody_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, + 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_httpbody_proto_rawDescOnce sync.Once + file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc +) + +func file_google_api_httpbody_proto_rawDescGZIP() []byte { + file_google_api_httpbody_proto_rawDescOnce.Do(func() { + file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData) + }) + return file_google_api_httpbody_proto_rawDescData +} + +var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_httpbody_proto_goTypes = []interface{}{ + (*HttpBody)(nil), // 0: google.api.HttpBody + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_api_httpbody_proto_depIdxs = []int32{ + 1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_httpbody_proto_init() } +func file_google_api_httpbody_proto_init() { + if File_google_api_httpbody_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_httpbody_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_httpbody_proto_goTypes, + DependencyIndexes: file_google_api_httpbody_proto_depIdxs, + MessageInfos: file_google_api_httpbody_proto_msgTypes, + }.Build() + File_google_api_httpbody_proto = out.File + file_google_api_httpbody_proto_rawDesc = nil + file_google_api_httpbody_proto_goTypes = nil + file_google_api_httpbody_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 000000000..3e5621827 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,1314 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/error_details.proto + +package errdetails + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` +} + +func (x *RetryInfo) Reset() { + *x = RetryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryInfo) ProtoMessage() {} + +func (x *RetryInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. +func (*RetryInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} +} + +func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { + if x != nil { + return x.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` +} + +func (x *DebugInfo) Reset() { + *x = DebugInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugInfo) ProtoMessage() {} + +func (x *DebugInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. +func (*DebugInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} +} + +func (x *DebugInfo) GetStackEntries() []string { + if x != nil { + return x.StackEntries + } + return nil +} + +func (x *DebugInfo) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *QuotaFailure) Reset() { + *x = QuotaFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure) ProtoMessage() {} + +func (x *QuotaFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} +} + +func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *PreconditionFailure) Reset() { + *x = PreconditionFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure) ProtoMessage() {} + +func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} +} + +func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` +} + +func (x *BadRequest) Reset() { + *x = BadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest) ProtoMessage() {} + +func (x *BadRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. +func (*BadRequest) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} +} + +func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if x != nil { + return x.FieldViolations + } + return nil +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` +} + +func (x *RequestInfo) Reset() { + *x = RequestInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestInfo) ProtoMessage() {} + +func (x *RequestInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. +func (*RequestInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *RequestInfo) GetServingData() string { + if x != nil { + return x.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path that leads to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // - `full_name` for a violation in the `full_name` value + // - `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // - `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // - `fullName` for a violation in the `fullName` value + // - `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // - `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, + 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, + 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo + (*RetryInfo)(nil), // 1: google.rpc.RetryInfo + (*DebugInfo)(nil), // 2: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + nil, // 10: google.rpc.ErrorInfo.MetadataEntry + (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..6ad1b1c1d --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,203 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 000000000..0854d298e --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `./scripts/vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..5d4096d46 --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,36 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC +- [atollena](https://github.com/atollena), Datadog, Inc. +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 000000000..be38384ff --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,49 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./scripts/vet.sh + +vetdeps: + ./scripts/vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testsubmodule \ + testrace \ + testdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 000000000..b572707c6 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,107 @@ +# gRPC-Go + +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. + +## Installation + +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + + +```go +import "google.golang.org/grpc" +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 000000000..abab27937 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..52d530d7a --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import ( + "fmt" + "strings" +) + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. +type Attributes struct { + m map[any]any +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 000000000..29475e31c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..d7b40b7cb --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specified +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 000000000..b181f386a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,464 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + + logger = grpclog.Component("balancer") +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + name := strings.ToLower(b.Name()) + if name != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } + m[name] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +func init() { + internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 000000000..2b87bd79c --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,264 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + state: connectivity.Connecting, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call Shutdown for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 000000000..e31d76e33 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 000000000..c33413581 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..4ecfa1c21 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go new file mode 100644 index 000000000..4d69b4052 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + +const ( + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (pickfirstBuilder) Name() string { + return Name +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } + b.state = state.ConnectivityState +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 000000000..260255d31 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: uint32(rand.Intn(len(scs))), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + next uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 000000000..8ad6ce2f0 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the updateClientConnState() method +// is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + uccs := func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") + ccb.serializer.TrySchedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 000000000..55bffaa77 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identity. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 000000000..788c89c16 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 000000000..32b7fa579 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 000000000..9c8850e3f --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1827 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + } + + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + // Apply dial options. + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if err = cc.initAuthority(); err != nil { + return nil, err + } + + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + + return cc, nil +} + +// Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. +// +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEvent{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { + return err + } + + cc.addTraceEvent("exiting idle mode") + return nil +} + +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { + cc.mu.Lock() + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() + cc.csMgr.updateState(connectivity.Idle) + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + return nil +} + +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) + cc.addTraceEvent("created") +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// newConnectivityStateManager creates an connectivityStateManager with +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { + return &connectivityStateManager{ + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelz *channelz.Channel + pubSub *grpcsync.PubSub +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) + csm.pubSub.Publish(state) + + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + pickerWrapper *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + retryThrottler atomic.Value // Updated from service config. + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) + } +} + +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig() + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig() + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLBLocked(s.ServiceConfig) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig + } + bw := cc.balancerWrapper + cc.mu.Unlock() + + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + copy(out, in) + return out +} + +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) + + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.conns[ac] = struct{}{} + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// Target returns the target string of the ClientConn. +func (cc *ClientConn) Target() string { + return cc.target +} + +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + +func (cc *ClientConn) incrCallsStarted() { + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (cc *ClientConn) incrCallsFailed() { + cc.channelz.ChannelMetrics.CallsFailed.Add(1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } + ac.mu.Unlock() + return nil + } + + ac.resetTransportAndUnlock() + return nil +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + addrs = copyAddresses(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return + } + + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } + } + + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil + } + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) + } + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransportAndUnlock() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + cc.resolverWrapper.resolveNow(o) + cc.mu.RUnlock() +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. + cc.mu.Unlock() + + cc.resolverWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelz.ID) + + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw *acBalancerWrapper + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelz *channelz.SubChannel +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) + if lastErr == nil { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } + ac.acbw.updateState(s, ac.curAddr, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + ac.mu.Lock() + if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() + return + } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) + if ctx.Err() != nil { + return errConnClosing + } + ac.mu.Lock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(ctx, addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) + + onClose := func(r transport.GoAwayReason) { + ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + } + + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + defer cancel() + copts.ChannelzParent = ac.channelz + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } + // newTr is either nil, or closed. + hcancel() + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err + } + + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateReadyChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelz.ID) + ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancellation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err == nil { + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + return err + } + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimiters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + if authorityFromDialOption != "" { + cc.authority = authorityFromDialOption + } else if authorityFromCreds != "" { + cc.authority = authorityFromCreds + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { + cc.authority = "localhost" + endpoint + } else { + cc.authority = encodeAuthority(endpoint) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 000000000..e840858b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" +) + +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. +type baseCodec interface { + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) +} + +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..934fac2b0 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..0b42c302b --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,250 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %d", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 000000000..4a8992642 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 000000000..665e790bb --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 000000000..4c805c644 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..411435854 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "os" + + "google.golang.org/grpc/grpclog" + credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" +) + +var logger = grpclog.Component("credentials") + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup(t.State.CipherSuite), + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } + tlsInfo := TLSInfo{ + State: cs, + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 000000000..2b285beee --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,767 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "net/url" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithBufferPool = withBufferPool +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParent channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration + defaultScheme string + maxCallAttempts int +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +var globalDialOptions []DialOption + +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The default value for this buffer is 32KB. +// +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + " " + grpcUA + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +// +// Keepalive is disabled by default. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(c channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParent = c + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + copts: transport.ConnectOptions{ + ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + UseProxy: true, + UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), + }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + } +} + +// withMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + +func withBufferPool(bufferPool mem.BufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.BufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 000000000..e7b532b6f --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./scripts/regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 000000000..11d0ae142 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]any) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + c, _ := registeredCodecs[contentSubtype].(Codec) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 000000000..074c5e234 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 000000000..6306e8bb0 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { + last := len(buf) + if last < 4 { + return -1 + } + return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 000000000..ceec319dd --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodecV2(&codecV2{}) +} + +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} + +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { + vv := messageV2Of(v) + if vv == nil { + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) + } + + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil +} + +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { + vv := messageV2Of(v) + if vv == nil { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) +} + +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v + } + + return nil +} + +func (c *codecV2) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 000000000..1d827dd5d --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 000000000..3221f7a63 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 000000000..f1ae080dc --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 000000000..db320105e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog + +import ( + "os" + + "google.golang.org/grpc/grpclog/internal" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return internal.LoggerV2Impl.V(l) +} + +// Info logs to the INFO log. +func Info(args ...any) { + internal.LoggerV2Impl.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...any) { + internal.LoggerV2Impl.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...any) { + internal.LoggerV2Impl.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...any) { + internal.LoggerV2Impl.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...any) { + internal.LoggerV2Impl.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...any) { + internal.LoggerV2Impl.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...any) { + internal.LoggerV2Impl.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...any) { + internal.LoggerV2Impl.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...any) { + internal.LoggerV2Impl.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...any) { + internal.LoggerV2Impl.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...any) { + internal.LoggerV2Impl.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calls os.Exit() with exit code 1. +func Fatalln(args ...any) { + internal.LoggerV2Impl.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...any) { + internal.LoggerV2Impl.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...any) { + internal.LoggerV2Impl.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...any) { + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 000000000..59c03bc14 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 000000000..e524fdd40 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go new file mode 100644 index 000000000..07df71e98 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 000000000..4b2035857 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/grpclog/internal" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger internal.Logger + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 000000000..892dc13d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog/internal" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 internal.LoggerV2 + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, + }) +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 000000000..d92335445 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,308 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. +) + +// Enum value maps for HealthCheckResponse_ServingStatus. +var ( + HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", + } + HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, + } +) + +func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { + p := new(HealthCheckResponse_ServingStatus) + *p = x + return p +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() +} + +func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { + return &file_grpc_health_v1_health_proto_enumTypes[0] +} + +func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} +} + +type HealthCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckRequest) ProtoMessage() {} + +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +type HealthCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckResponse) ProtoMessage() {} + +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if x != nil { + return x.Status + } + return HealthCheckResponse_UNKNOWN +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +var file_grpc_health_v1_health_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, + 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_health_v1_health_proto_rawDescOnce sync.Once + file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc +) + +func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { + file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + }) + return file_grpc_health_v1_health_proto_rawDescData +} + +var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_goTypes = []any{ + (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus + (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest + (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse +} +var file_grpc_health_v1_health_proto_depIdxs = []int32{ + 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus + 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_grpc_health_v1_health_proto_init() } +func file_grpc_health_v1_health_proto_init() { + if File_grpc_health_v1_health_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_health_v1_health_proto_goTypes, + DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, + EnumInfos: file_grpc_health_v1_health_proto_enumTypes, + MessageInfos: file_grpc_health_v1_health_proto_msgTypes, + }.Build() + File_grpc_health_v1_health_proto = out.File + file_grpc_health_v1_health_proto_rawDesc = nil + file_grpc_health_v1_health_proto_goTypes = nil + file_grpc_health_v1_health_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 000000000..f96b8ab49 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,234 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. +type HealthClient interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. +type HealthServer interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error +} + +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} + +func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (UnimplementedHealthServer) testEmbeddedByValue() {} + +// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HealthServer will +// result in compilation errors. +type UnsafeHealthServer interface { + mustEmbedUnimplementedHealthServer() +} + +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Health_ServiceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_Check_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] + +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 000000000..877d78fc3 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 000000000..b15cf482d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "context" + "errors" + "math/rand" + "time" + + grpcbackoff "google.golang.org/grpc/backoff" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 000000000..13821a926 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000..73bb4c4ee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,419 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return nil, errBalancerClosed + } + bw := &balancerWrapper{ + builder: builder, + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return nil, balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return bw, nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + + if balToUpdate == nil { + return errBalancerClosed + } + + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + builder balancer.Builder + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 000000000..94a08d687 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) any +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) any { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 000000000..755fdebc1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,192 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type Logger interface { + GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.GetMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { + // Max length of header and message. + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} +} + +type logger struct { + config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } + l.config.All = ml + return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) + } + l.config.Services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) + } + l.config.Methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) + } + l.config.Blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if l.config.All == nil { + return nil + } + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 000000000..1ee00a39a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 000000000..f9e80e27a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 000000000..966932891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,446 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "context" + "net" + "strings" + "sync/atomic" + "time" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this pluggable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it pluggable. + } +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { + m := c.toProto() + timestamp := timestamppb.Now() + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *binlogpb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type LogEntryConfig interface { + toProto() *binlogpb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &binlogpb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = durationpb.New(c.Timeout) + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &binlogpb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = binlogpb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = binlogpb.Address_TYPE_IPV6 + } else { + ret.Type = binlogpb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = binlogpb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = binlogpb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 000000000..9ea598b14 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*binlogpb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..11f91668a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,116 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import ( + "errors" + "sync" +) + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan any + closed bool + closing bool + mu sync.Mutex + backlog []any +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan any, 1)} +} + +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t any) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return errBufferClosed + } + if len(b.backlog) == 0 { + select { + case b.c <- t: + return nil + default: + } + } + b.backlog = append(b.backlog, t) + return nil +} + +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } else if b.closing && !b.closed { + close(b.c) + } +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { + return b.c +} + +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return + } + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 000000000..d7e9e1d54 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 000000000..64c791953 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided into two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 000000000..078bb8123 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,230 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines internal APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +package channelz + +import ( + "sync/atomic" + "time" + + "google.golang.org/grpc/internal" +) + +var ( + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + atomic.StoreInt32(&curState, 1) +} + +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.LoadInt32(&curState) == 1 +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetrics, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) +} + +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) +} + +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) +} + +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *Server { + return db.getServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(parent *Channel, target string) *Channel { + id := IDGen.genID() + + if !IsOn() { + return &Channel{ID: id} + } + + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, + nestedChans: make(map[int64]string), + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, + } + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { + id := IDGen.genID() + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, + } + + if !IsOn() { + return sc + } + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(ref string) *Server { + id := IDGen.genID() + if !IsOn() { + return &Server{ID: id} + } + + svr := &Server{ + RefName: ref, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + ID: id, + } + db.addServer(id, svr) + return svr +} + +// RegisterSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) + } + return skt +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id int64) { + if !IsOn() { + return + } + db.removeEntry(id) +} + +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { + id int64 +} + +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..ee4d72125 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 000000000..cdfc49d6e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 000000000..fa64834b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 000000000..3b88e4cba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go new file mode 100644 index 000000000..5ac73ff83 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go new file mode 100644 index 000000000..0e6e18e18 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -0,0 +1,47 @@ +//go:build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 000000000..36b867403 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 000000000..9deee7f65 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 000000000..25ade6230 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 000000000..2919632d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 000000000..f792fd22c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 000000000..452985f8d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strconv" + "strings" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) +) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 000000000..dd314cfb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 000000000..29f234acb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7617be215 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // BufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go new file mode 100644 index 000000000..092ad187a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger grpclog.DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + if pl != nil { + return pl.logger.V(l) + } + return true +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000..19b9d6392 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } + } + + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 000000000..fbe697c37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 000000000..6635f7bca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 000000000..6d8c2f518 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 000000000..e8d866984 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 000000000..b25b0baec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000..e2f948e8f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 000000000..6f22bd891 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 000000000..ec62b4775 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 000000000..7a092b2b8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 000000000..fe49cb74c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 000000000..7aae9240f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,259 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. + UserSetDefaultScheme = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) +) + +// HealthChecker defines the signature of the client-side LB channel health +// checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 000000000..900bfb716 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 000000000..dbee7a60d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/protoadapt" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { + mm := protojson.MarshalOptions{ + Indent: jsonIndent, + Multiline: true, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 000000000..f0603871c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..4552db16b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/resolver/dns/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) + +func init() { + resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, address) + } +} + +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: internal.AddressDialer(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver internal.NetResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var nextResolutionTime time.Time + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + return + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): + } + } +} + +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service + // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT(ctx) + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", internal.ErrMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return rand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c0eae4f5f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overridden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 000000000..b901c7bac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import ( + "errors" + + "google.golang.org/grpc/resolver" +) + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) +} + +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 000000000..27cd81af9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 000000000..11d82afcc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..51e733e49 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 000000000..fd33af51a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 000000000..be110d41f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..757925381 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,205 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + m, err := anypb.New(protoadapt.MessageV2Of(detail)) + if err != nil { + return nil, err + } + p.Details = append(p.Details, m) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } + details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail, err := any.UnmarshalNew() + if err != nil { + details = append(details, err) + continue + } + details = append(details, detail) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 000000000..b3a72276d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 000000000..54c24c2ff --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(net.Conn, time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 000000000..4f347edd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..7e7aaa546 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keepalive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..d5c1085ee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keepalive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 000000000..070680edb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 000000000..ef72fbb3a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,1035 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "net" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it any + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() any { + return il.head.it +} + +func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + reader mem.Reader + // onEachWrite is called every time + // a part of data is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn error // if set, loopyWriter will exit with this error +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. + mu sync.Mutex + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Pointer[chan struct{}] +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. +func (c *controlBuffer) throttle() { + if ch := c.trfChan.Load(); ch != nil { + select { + case <-(*ch): + case <-c.done: + } + } +} + +// put adds an item to the controlbuf. +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing + } + if f != nil { + if !f() { // f wasn't successful + return false, nil + } + } + if it == nil { + return true, nil + } + + var wakeUp bool + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + if wakeUp { + select { + case c.wakeupCh <- struct{}{}: + default: + } + } + return true, nil +} + +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. +func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. + c.mu.Unlock() + return frame, err + } + c.consumerWaiting = true + c.mu.Unlock() + + // Release the lock above and wait to be woken up. + select { + case <-c.wakeupCh: + case <-c.done: + return nil, errors.New("transport closed by client") + } + } +} + +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. +func (c *controlBuffer) finish() { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return + } + c.closed = true + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() + } + } + + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch := c.trfChan.Swap(nil) + if ch != nil { + close(*ch) + } +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resembling a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + conn net.Conn + logger *grpclog.PrefixLogger + bufferPool mem.BufferPool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. +func (l *loopyWriter) run() (err error) { + defer func() { + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + } + l.cbuf.finish() + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return + } + } +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + l.applySettings(s.ss) + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + return l.originateStream(str, h) +} + +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) + return nil + } + if err := hdr.initStream(str.id); err != nil { + return err + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) { + str, ok := l.estdStreams[df.streamID] + if !ok { + return + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { + o.resp <- l.sendQuota +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } + return nil +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. + + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h + } else { + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) + + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && remainingBytes == 0 { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 000000000..bc8ee0747 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 000000000..97198c515 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 000000000..ce878693b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,502 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusMethodNotAllowed) + return nil, errors.New(msg) + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) + } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } + if _, ok := w.(http.Flusher); !ok { + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) + } + + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + peer: p, + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + bufferPool: bufferPool, + } + st.logger = prefixLoggerForServerHandlerTransport(st) + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + peer peer.Peer + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats []stats.Handler + logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool +} + +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) +} + +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + } + + if len(s.trailer) > 0 { + for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close(errors.New("finished writing status")) + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + defer data.Free() + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } + ht.rw.(http.Flusher).Flush() + }) + if err != nil { + data.Free() + return err + } + return nil +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close(errors.New("request is done processing")) + }() + + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + req := ht.req + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) + if n > 0 { + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain(string) { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 000000000..c769deab5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1823 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + isyscall "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +var goAwayLoopyWriterTimeout = 5 * time.Second + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandlers []stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + registeredCompressors string + + // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + nextID uint32 + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + channelz *channelz.Socket + + onClose func(GoAwayReason) + + bufferPool mem.BufferPool + + connectionID uint64 + logger *grpclog.PrefixLogger +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + } + + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + + // The following defer and goroutine monitor the connectCtx for cancellation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + keepaliveEnabled: keepaliveEnabled, + bufferPool: opts.BufferPool, + onClose: onClose, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + sh.HandleConn(t.ctx, connBegin) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) + t.Close(err) + } + }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, + } +} + +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(uint32) error { + t.mu.Lock() + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { + t.mu.Unlock() + cleanup(ErrConnClosing) + return ErrConnClosing + } + if channelz.IsOn() { + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + transportDrainRequired := false + checkForStreamQuota := func() bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.activeStreams[s.id] = s + t.mu.Unlock() + + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func() bool { + if t.maxSendHeaderListSize == nil { + return true + } + var sz int64 + for _, f := range hdr.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() + }, hdr) + if err != nil { + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) + } else { + t.channelz.SocketMetrics.StreamsFailed.Add(1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func() bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed anymore. +func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + t.mu.Lock() + // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } + t.cancel() + t.conn.Close() + channelz.RemoveEntry(t.channelz.ID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } + sh.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() + return errStreamDone + } + } else if s.getState() != streamActive { + _ = reader.Close() + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + reader: reader, + } + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() + return err + } + } + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + updateIWS := func() bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancellation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func() bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } + } + t.mu.Unlock() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutex to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + close(s.headerChan) + } + } + + for _, sh := range t.statsHandlers { + if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { + return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } + t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 000000000..584b50fe5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1475 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelz *channelz.Socket + bufferPool mem.BufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: config.MaxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } + t := &http2Server{ + done: done, + conn: conn, + peer: peer, + framer: framer, + readerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + bufferPool: config.BufferPool, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) + t.logger = prefixLoggerForServerTransport(t) + + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close(err) + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-t.readerDone: + case <-timer.C: + } + t.conn.Close() + } + }() + go t.keepalive() + return t, nil +} + +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(ctx) + } + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return nil + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + s.cancel() + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + t.Close(err) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if err := t.operateHeaders(ctx, frame, handle); err != nil { + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func() bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } + } + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + hf := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + + if s.getState() == streamDone { + return nil + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() + return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + _ = reader.Close() + return t.streamContextErr(s) + } + } + + df := &dataFrame{ + streamID: s.id, + h: hdr, + reader: reader, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() + return t.streamContextErr(s) + } + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain("max_idle") + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") + } + t.controlBuf.put(closeConnection{}) + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) + return + } + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close(err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + } + channelz.RemoveEntry(t.channelz.ID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) + } else { + t.channelz.SocketMetrics.StreamsFailed.Add(1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) Drain(debugData string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainEvent != nil { + return + } + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + retErr := g.closeConn + if len(t.activeStreams) == 0 { + retErr = errors.New("second GOAWAY written and no active streams left to process") + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + t.framer.writer.Flush() + if retErr != nil { + return false, retErr + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case <-t.drainEvent.Done(): + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } +} + +func (t *http2Server) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +} + +func (t *http2Server) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { + return &peer.Peer{ + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func SetConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 000000000..3613d7b64 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,468 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "encoding/base64" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + fmt.Fprintf(&sb, "%%%02X", b) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + sb.WriteByte(b) + } else { + fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } + return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + sb.WriteByte(c) + } else { + sb.WriteByte(byte(parsed)) + i += 2 + } + } else { + sb.WriteByte(c) + } + } + return sb.String() +} + +type bufWriter struct { + pool *sync.Pool + buf []byte + offset int + batchSize int + conn net.Conn + err error +} + +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ + batchSize: batchSize, + conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w +} + +func (w *bufWriter) Write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + n, err := w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } + written := 0 + for len(b) > 0 { + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err + } + } + return written, nil +} + +func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) + w.offset = 0 + return w.err +} + +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +var writeBufferPoolMap = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +func getWriteBufferPool(size int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 000000000..42ed2b07a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 000000000..c11b52782 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 000000000..54b224436 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "google.golang.org/grpc/internal" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) + if err != nil { + return nil, err + } + if proxyURL == nil { + // proxy is disabled if proxyURL is nil. + return conn, err + } + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 000000000..924ba4f36 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,934 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer mem.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error +} + +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { + r.last = nil + } + return buf, nil + } + if r.closeStream != nil { + buf, r.err = r.readClient(n) + } else { + buf, r.err = r.read(n) + } + return buf, r.err +} + +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, n) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return 0, m.err + } + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) + } + + return m.buffer, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct ClientTransport // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader *transportReader + fc *inFlow + wq *writeQuota + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metadata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return nil, er + } + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil +} + +// transportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader *recvBufferReader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) + if err != nil { + t.er = err + return 0, err + } + t.windowHandler(n) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + SharedWriteBuffer bool + ChannelzParent *channelz.Server + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 + BufferPool mem.BufferPool +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(context.Context, func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close(err error) + + // Peer returns the peer of the server transport. + Peer() *peer.Peer + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain(debugData string) + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indicate application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 000000000..eb42b19fb --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 000000000..c37c58c02 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 000000000..228e9c2f2 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 000000000..4d66b2ccc --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 000000000..d2e15253b --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,295 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insensitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 000000000..499a49c8c --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "fmt" + "net" + "strings" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 000000000..bdaa2130e --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,219 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] + statsHandlers []stats.Handler // to record blocking picker calls +} + +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + pw := &pickerWrapper{ + statsHandlers: statsHandlers, + } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw +} + +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) +} + +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac + ac.incrCallsStarted() + done := result.Done + result.Done = func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { + var ch chan struct{} + + var lastPickErr error + + for { + pg := pw.pickerGen.Load() + if pg == nil { + return nil, balancer.PickResult{}, ErrClientConnClosing + } + if pg.picker == nil { + ch = pg.blockingCh + } + if ch == pg.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - we have already called pick on the current picker. + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + + ch = pg.blockingCh + p := pg.picker + + pickResult, err := p.Pick(info) + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + } + + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acbw.ac.getReadyTransport(); t != nil { + if channelz.IsOn() { + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil + } + return t, pickResult, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) +} + +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 000000000..e87a17f36 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData mem.BufferSlice + hdr []byte + payload mem.BufferSlice + pf payloadFormat +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) + if err != nil { + return err + } + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + + return nil +} diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 000000000..9ace83ccb --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 000000000..6997e4740 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + "google.golang.org/grpc/reflection/internal" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return internal.V1AlphaToV1Request(resp), nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 000000000..e1f58104d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []any{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []any{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 000000000..031082807 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,138 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility. +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error +} + +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 000000000..0582e16af --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,1028 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server set one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []any{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse +} +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest + 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest + 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse + 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse + 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse + 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse + 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest + 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []any{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 000000000..80755d74d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,135 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream} + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse] + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility. +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error +} + +// UnimplementedServerReflectionServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedServerReflectionServer struct{} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} +func (UnimplementedServerReflectionServer) testEmbeddedByValue() {} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + // If the following call panics, it indicates UnimplementedServerReflectionServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse] + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go new file mode 100644 index 000000000..902fc6d35 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go @@ -0,0 +1,436 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code that is shared by both reflection package and +// the test package. The packages are split in this way inorder to avoid +// dependency to deprecated package github.com/golang/protobuf. +package internal + +import ( + "io" + "sort" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerReflectionServer is the server API for ServerReflection service. +type ServerReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + S ServiceInfoProvider + DescResolver protodesc.Resolver + ExtResolver ExtensionResolver +} + +// FileDescWithDependencies returns a slice of serialized fileDescriptors in +// wire format ([]byte). The fileDescriptors will include fd and all the +// transitive dependencies of fd with names not in sentFileDescriptors. +func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + if fd.IsPlaceholder() { + // If the given root file is a placeholder, treat it + // as missing instead of serializing it. + return nil, protoregistry.NotFound + } + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} + for len(queue) > 0 { + currentfd := queue[0] + queue = queue[1:] + if currentfd.IsPlaceholder() { + // Skip any missing files in the dependency graph. + continue + } + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) + if err != nil { + return nil, err + } + r = append(r, currentfdEncoded) + } + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) + } + } + return r, nil +} + +// FileDescEncodingContainingSymbol finds the file descriptor containing the +// given symbol, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. The given symbol +// can be a type, a service or a method. +func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { + d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors) +} + +// FileDescEncodingContainingExtension finds the file descriptor containing +// given extension, finds all of its previously unsent transitive dependencies, +// does marshalling on them, and returns the marshalled result. +func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { + xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) + if err != nil { + return nil, err + } + return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) +} + +// AllExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) { + var numbers []int32 + s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } + } + return numbers, nil +} + +// ListServices returns the names of services this server exposes. +func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.S.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp +} + +// ServerReflectionInfo is the reflection service handler. +func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + sentFileDescriptors := make(map[string]bool) + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &v1reflectionpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.DescResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.FileDescWithDependencies(fd, sentFileDescriptors) + } + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.ListServices(), + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} + +// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha. +func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1. +func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha. +func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} + +// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1. +func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { + var v1 v1reflectionpb.ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1reflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 000000000..13a94e2dd --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,160 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "google.golang.org/grpc" + "google.golang.org/grpc/reflection/internal" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider +} + +var _ GRPCServer = (*grpc.Server)(nil) + +// Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. +func Register(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo +} + +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver +} + +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &internal.ServerReflectionServer{ + S: opts.Services, + DescResolver: opts.DescriptorResolver, + ExtResolver: opts.ExtensionResolver, + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..ef3d6ed6c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "time" + + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 000000000..ada5b9bb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value any +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 000000000..202854511 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,332 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// scheme is initially set to "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme + internal.UserSetDefaultScheme = true +} + +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. +func GetDefaultScheme() string { + return defaultScheme +} + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + // + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata any +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string +} + +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +type Target struct { + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// String returns the canonical string representation of Target. +func (t Target) String() string { + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..23bb3fb25 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.TrySchedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 000000000..2d96f1405 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,1069 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return io.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. +// +// By default, RPCs do not "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} + +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = newCodecV1Bridge(o.Codec) + return nil +} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = newCodecV0Bridge(o.Codec) + return nil +} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r streamReader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte + + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying streamReader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { + return 0, nil, err + } + + pf := payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + + data, err := p.r.Read(int(length)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, data, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(b.Len()) > math.MaxUint32 { + b.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil + } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) + wrapErr := func(err error) error { + out.Free() + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + if compressor != nil { + z, err := compressor.Compress(w) + if err != nil { + return nil, 0, wrapErr(err) + } + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } + } + if err := z.Close(); err != nil { + return nil, 0, wrapErr(err) + } + } else { + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } + } + return out, compressionMade, nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { + hdr = make([]byte, headerLen) + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData + } else { + length = uint32(data.Len()) + payload = data + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload +} + +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + compressedLength int // The compressed length got from wire. + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } +} + +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() + } + + var size int + if pf.isCompressed() { + defer compressed.Free() + + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } + size = len(uncompressedBuf) + } else { + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + out.Free() + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } else { + out = compressed + } + + if payInfo != nil { + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out + } + + return out, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, 0, err + } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err + } + return out, out.Len(), nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) + if err != nil { + return err + } + + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) +} + +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return errContextDeadline + case context.Canceled: + return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.CodecV2); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = getCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = getCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 9. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 000000000..d1e1415a4 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,2208 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption + internal.BufferPool = bufferPool +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType any + Methods []MethodDesc + Streams []StreamDesc + Metadata any +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata any +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events traceEventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines + + channelz *channelz.Server + + serverWorkerChannel chan func() + serverWorkerChannelClose func() +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle + statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 + bufferPool mem.BufferPool + waitForHandlers bool +} + +var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + bufferPool: mem.DefaultBufferPool(), +} +var globalServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = newCodecV0Bridge(codec) + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +func bufferPool(bufferPool mem.BufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.bufferPool = bufferPool + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorker blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.serverWorker() +} + +// initServerWorkers creates worker goroutines and a channel to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + channelz.Info(logger, s.channelz, "Server created") + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl any) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata any +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelz *channelz.Socket +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } + s.lis[ls] = true + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + s.mu.Unlock() + channelz.Info(logger, ls.channelz, "ListenSocket created") + + var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(context.Background(), st, rawConn) + s.removeConn(lisAddr, st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParent: s.channelz, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { + select { + case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } + go f() + }) +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + if err != nil { + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(r.Context(), st, nil) +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close(errors.New("Server.addConn called when server has already been stopped")) + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain("") + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) incrCallsStarted() { + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + s.channelz.ServerMetrics.CallsSucceeded.Add(1) +} + +func (s *Server) incrCallsFailed() { + s.channelz.ServerMetrics.CallsFailed.Add(1) +} + +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) + return err + } + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) + if err != nil { + data.Free() + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) + return err + } + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil { + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } + } + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + var sendCompressorName string + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + sendCompressorName = cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + var payInfo *payloadInfo + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + defer payInfo.free() + } + + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return err + } + defer d.Free() + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + + for _, sh := range shs { + sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: d.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d.Materialize(), + } + for _, binlog := range binlogs { + binlog.Log(ctx, cm) + } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + sh := &binarylog.ServerHeader{ + Header: h, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + } + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, st) + } + } + return err + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + sm := &binarylog.ServerMessage{ + Message: reply, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) + } + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(stream, statusOK) +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(ctx, statsBegin) + } + } + ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: shs, + } + + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(ctx, end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, logEntry) + } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + ss.sendCompressorName = s.opts.cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server any + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(ss.s, statusOK) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.stop(false) +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) + s.mu.Lock() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() + } + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + + if s.opts.numServerWorkers > 0 { + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } + } +} + +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } + } + s.drain = true + } +} + +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() + } + s.lis = nil +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return getCodec(proto.Name) + } + codec := getCodec(contentSubtype) + if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) + return getCodec(proto.Name) + } + return codec +} + +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return stream.ClientAdvertisedCompressors(), nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name string, clientCompressors []string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range clientCompressors { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 000000000..2671c5ef6 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,356 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig serviceconfig.LoadBalancingConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *internalserviceconfig.Duration + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *json.RawMessage + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } +} +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + c := rsc.LoadBalancingConfig + if c == nil { + name := pickfirst.Name + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy + } + if balancer.Get(name) == nil { + name = pickfirst.Name + } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r + } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} + } + sc.lbConfig = cfg + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: (*time.Duration)(m.Timeout), + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + + if jrp.MaxAttempts <= 1 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: maxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..35e7a20a0 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 000000000..dc03731e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 000000000..71195c494 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,343 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. + Payload any + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. + Payload any + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..a93360efb --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + + return Convert(err).Code() +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 000000000..bb2b2a216 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1841 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "strconv" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m any) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } + + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { + return nil, err + } + + if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: newTrace("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = newTraceContext(ctx, trInfo.tr) + } + + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } + return err + } + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) + } + a.s = s + a.ctx = s.Context() + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err + } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } + // Wait for the trailers. + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if a.s != nil { + code = a.s.Status().Code() + } else { + code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(rand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. + return err + } + // Note that the first op in replayBuffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.replayBuffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + + if err != nil { + cs.finish(err) + // Do not return the error. The user should get it by calling Recv(). + return nil, nil + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + cleanup() + return + } + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) +} + +func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) + } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() + op := func(a *csAttempt) error { + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() + } + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ + OnClientSide: true, + Message: data.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } + } + return err +} + +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + defer recvInfo.free() + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + + cs.mu.Unlock() + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + defer payInfo.free() + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: payInfo.uncompressedBytes.Len(), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) + } + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.pickResult.Done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.pickResult.Done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newNonRetryClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transport. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. + go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() + select { + case <-acCtx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + // TODO(dfawley): should we be checking len(data) instead? + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) + } + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + sendCompressorName string + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler []stats.Handler + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + + if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) + } + } + return nil +} + +func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + defer payInfo.free() + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err == io.EOF { + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + } + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, 0, err + } + compData, pf, err := compress(data, cp, comp, pool) + if err != nil { + data.Free() + return nil, nil, nil, 0, err + } + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil +} diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 000000000..0037fee0b --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,238 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). + Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. + CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. +type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. + SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). + Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 000000000..07f012576 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 000000000..10f4f798f --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr traceLog + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []any +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 000000000..1da3a2308 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go new file mode 100644 index 000000000..88d6e8571 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -0,0 +1,39 @@ +//go:build !grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + t "golang.org/x/net/trace" +) + +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 000000000..a96b6a6bf --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.67.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 000000000..8f9e592f8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,685 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 000000000..ae71007c1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 000000000..0e72d8537 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,380 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + if fd.HasPresence() { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 000000000..4b177c820 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,876 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 000000000..ea1d3e65a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 000000000..2999d7133 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 000000000..f7fea7d8d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 000000000..50578d659 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 000000000..934f2dcb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 000000000..ea276d15a --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 000000000..19de8d371 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,572 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []any{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 000000000..8f206a661 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,782 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// # Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by any, map[string]any, and []any. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the any, map[string]any, and []any +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// # Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]any{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]any{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []any{ +// map[string]any{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]any{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []any{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +package structpb + +import ( + base64 "encoding/base64" + json "encoding/json" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]any) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]any { + f := x.GetFields() + vs := make(map[string]any, len(f)) + for k, v := range f { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of value. + // + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v any) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]any: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []any: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() any { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x, ok := x.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x, ok := x.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []any) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []any { + vals := x.GetValues() + vs := make([]any, len(vals)) + for i, v := range vals { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []any{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_rawDesc = nil + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 000000000..006060e56 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,632 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []any{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/gcfg.v1/LICENSE b/vendor/gopkg.in/gcfg.v1/LICENSE new file mode 100644 index 000000000..87a5cede3 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/gcfg.v1/README b/vendor/gopkg.in/gcfg.v1/README new file mode 100644 index 000000000..1ff233a52 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/README @@ -0,0 +1,4 @@ +Gcfg reads INI-style configuration files into Go structs; +supports user-defined types and subsections. + +Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/vendor/gopkg.in/gcfg.v1/doc.go b/vendor/gopkg.in/gcfg.v1/doc.go new file mode 100644 index 000000000..32f3e9d69 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/doc.go @@ -0,0 +1,145 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// It is possible to provide default values for subsections in the section +// "default-" (or by setting values in the corresponding struct +// field "Default_"). +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "gopkg.in/gcfg.v1" diff --git a/vendor/gopkg.in/gcfg.v1/errors.go b/vendor/gopkg.in/gcfg.v1/errors.go new file mode 100644 index 000000000..83a591dac --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/errors.go @@ -0,0 +1,57 @@ +package gcfg + +import warnings "gopkg.in/warnings.v0" + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type loc struct { + section string + subsection *string + variable *string +} + +type extraData struct { + loc +} + +type locErr struct { + msg string + loc +} + +func (l loc) String() string { + s := "section \"" + l.section + "\"" + if l.subsection != nil { + s += ", subsection \"" + *l.subsection + "\"" + } + if l.variable != nil { + s += ", variable \"" + *l.variable + "\"" + } + return s +} + +func (e extraData) Error() string { + return "can't store data at " + e.loc.String() +} + +func (e locErr) Error() string { + return e.msg + " at " + e.loc.String() +} + +var _ error = extraData{} +var _ error = locErr{} diff --git a/vendor/gopkg.in/gcfg.v1/read.go b/vendor/gopkg.in/gcfg.v1/read.go new file mode 100644 index 000000000..06796653c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/read.go @@ -0,0 +1,257 @@ +package gcfg + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "gopkg.in/gcfg.v1/scanner" + "gopkg.in/gcfg.v1/token" + "gopkg.in/warnings.v0" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} +var utf8Bom = []byte("\ufeff") + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func readIntoPass(c *warnings.Collector, config interface{}, fset *token.FileSet, + file *token.File, src []byte, subsectPass bool) error { + // + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.IDENT { + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + } + if tok != token.RBRACK { + if sectsub == "" { + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := c.Collect(set(c, config, sect, sectsub, "", true, "", subsectPass)) + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.STRING { + if err := c.Collect(errfn("expected value")); err != nil { + return err + } + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + } + err := set(c, config, sect, sectsub, n, blank, v, subsectPass) + if err != nil { + return err + } + default: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err + } + } + } +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + err := readIntoPass(c, config, fset, file, src, false) + if err != nil { + return err + } + err = readIntoPass(c, config, fset, file, src, true) + if err != nil { + return err + } + return c.Done() +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +// +// For compatibility with files created on Windows, the ReadFileInto skips a +// single leading UTF8 BOM sequence if it exists. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + + // Skips a single leading UTF8 BOM sequence if it exists. + src = skipLeadingUtf8Bom(src) + + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +func skipLeadingUtf8Bom(src []byte) []byte { + lengthUtf8Bom := len(utf8Bom) + + if len(src) >= lengthUtf8Bom { + if bytes.Equal(src[:lengthUtf8Bom], utf8Bom) { + return src[lengthUtf8Bom:] + } + } + return src +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/errors.go b/vendor/gopkg.in/gcfg.v1/scanner/errors.go new file mode 100644 index 000000000..1a3c0f656 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/gopkg.in/gcfg.v1/scanner/scanner.go b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go new file mode 100644 index 000000000..6d0eee916 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "gopkg.in/gcfg.v1/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' && s.ch != '"' { + s.error(offs, "unquoted '\\' must be followed by new line or double quote") + break loop + } + s.next() + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/gopkg.in/gcfg.v1/set.go b/vendor/gopkg.in/gcfg.v1/set.go new file mode 100644 index 000000000..73aee5003 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/set.go @@ -0,0 +1,329 @@ +package gcfg + +import ( + "bytes" + "encoding" + "encoding/gob" + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "gopkg.in/gcfg.v1/types" + "gopkg.in/warnings.v0" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(encoding.TextUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + blank bool, value string, subsectPass bool) error { + // + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + l := loc{section: sect} + if !vSect.IsValid() { + err := extraData{loc: l} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil + } + if isSubsect { + l.subsection = &sub + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + var err error + if pv, err = newValue(c, sect, vCfg, vType); err != nil { + return err + } + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + return c.Collect(extraData{loc: l}) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + l.variable = &name + if !vVar.IsValid() { + return c.Collect(extraData{loc: l}) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blank { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blank, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return locErr{msg: err.Error(), loc: l} + } + } + if !ok { + // in case all setters returned errUnsupportedType + return locErr{msg: err.Error(), loc: l} + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/gopkg.in/gcfg.v1/token/position.go b/vendor/gopkg.in/gcfg.v1/token/position.go new file mode 100644 index 000000000..fc45c1e76 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/gopkg.in/gcfg.v1/token/serialize.go b/vendor/gopkg.in/gcfg.v1/token/serialize.go new file mode 100644 index 000000000..4adc8f9e3 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/gopkg.in/gcfg.v1/token/token.go b/vendor/gopkg.in/gcfg.v1/token/token.go new file mode 100644 index 000000000..b3c7c83fa --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/gopkg.in/gcfg.v1/types/bool.go b/vendor/gopkg.in/gcfg.v1/types/bool.go new file mode 100644 index 000000000..8dcae0d8c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/doc.go b/vendor/gopkg.in/gcfg.v1/types/doc.go new file mode 100644 index 000000000..9f9c345f6 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/gopkg.in/gcfg.v1/types/enum.go b/vendor/gopkg.in/gcfg.v1/types/enum.go new file mode 100644 index 000000000..1a0c7ef45 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/gopkg.in/gcfg.v1/types/int.go b/vendor/gopkg.in/gcfg.v1/types/int.go new file mode 100644 index 000000000..af7e75c12 --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/gopkg.in/gcfg.v1/types/scan.go b/vendor/gopkg.in/gcfg.v1/types/scan.go new file mode 100644 index 000000000..db2f6ed3c --- /dev/null +++ b/vendor/gopkg.in/gcfg.v1/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml new file mode 100644 index 000000000..21166f5c7 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - tip + - 1.15.x + - 1.14.x + - 1.13.x + - 1.12.x + +env: + - GO111MODULE=on diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 000000000..c3d4cc307 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 000000000..060eae52a --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,179 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // disabled by default +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +Backups use the log file name given to Logger, in the form `name-timestamp.ext` +where name is the filename without the extension, timestamp is the time at which +the log was rotated formatted with the time.Time format of +`2006-01-02T15-04-05.000` and the extension is the original extension. For +example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created +at 6:30pm on Nov 11 2016 would use the filename +`/var/log/foo/server-2016-11-04T18-30-00.000.log` + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 000000000..11d066972 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 000000000..465f56927 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// osChown is a var so we can mock it out during tests. +var osChown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return osChown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 000000000..3447cdc05 --- /dev/null +++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,541 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + compressSuffix = ".gz" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Backups use the log file name given to Logger, in the form +// `name-timestamp.ext` where name is the filename without the extension, +// timestamp is the time at which the log was rotated formatted with the +// time.Time format of `2006-01-02T15-04-05.000` and the extension is the +// original extension. For example, if your Logger.Filename is +// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would +// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log` +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + // Compress determines if the rotated log files should be compressed + // using gzip. The default is not to perform compression. + Compress bool `json:"compress" yaml:"compress"` + + size int64 + file *os.File + mu sync.Mutex + + millCh chan bool + startMill sync.Once +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + osStat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates compression and removal of old log +// files according to the configuration. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// post-rotation processing and removal. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + if err := l.openNew(); err != nil { + return err + } + l.mill() + return nil +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0755) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0600) + info, err := osStat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + l.mill() + + filename := l.filename() + info, err := osStat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// filename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// millRunOnce performs compression and removal of stale log files. +// Log files are compressed if enabled via configuration and old log +// files are removed, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) millRunOnce() error { + if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var compress, remove []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + preserved := make(map[string]bool) + var remaining []logInfo + for _, f := range files { + // Only count the uncompressed log file or the + // compressed log file, not both. + fn := f.Name() + if strings.HasSuffix(fn, compressSuffix) { + fn = fn[:len(fn)-len(compressSuffix)] + } + preserved[fn] = true + + if len(preserved) > l.MaxBackups { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + cutoff := currentTime().Add(-1 * diff) + + var remaining []logInfo + for _, f := range files { + if f.timestamp.Before(cutoff) { + remove = append(remove, f) + } else { + remaining = append(remaining, f) + } + } + files = remaining + } + + if l.Compress { + for _, f := range files { + if !strings.HasSuffix(f.Name(), compressSuffix) { + compress = append(compress, f) + } + } + } + + for _, f := range remove { + errRemove := os.Remove(filepath.Join(l.dir(), f.Name())) + if err == nil && errRemove != nil { + err = errRemove + } + } + for _, f := range compress { + fn := filepath.Join(l.dir(), f.Name()) + errCompress := compressLogFile(fn, fn+compressSuffix) + if err == nil && errCompress != nil { + err = errCompress + } + } + + return err +} + +// millRun runs in a goroutine to manage post-rotation compression and removal +// of old log files. +func (l *Logger) millRun() { + for range l.millCh { + // what am I going to do, log this? + _ = l.millRunOnce() + } +} + +// mill performs post-rotation compression and removal of stale log files, +// starting the mill goroutine if necessary. +func (l *Logger) mill() { + l.startMill.Do(func() { + l.millCh = make(chan bool, 1) + go l.millRun() + }) + select { + case l.millCh <- true: + default: + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil { + logFiles = append(logFiles, logInfo{t, f}) + continue + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) { + if !strings.HasPrefix(filename, prefix) { + return time.Time{}, errors.New("mismatched prefix") + } + if !strings.HasSuffix(filename, ext) { + return time.Time{}, errors.New("mismatched extension") + } + ts := filename[len(prefix) : len(filename)-len(ext)] + return time.Parse(backupTimeFormat, ts) +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// compressLogFile compresses the given log file, removing the +// uncompressed log file if successful. +func compressLogFile(src, dst string) (err error) { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open log file: %v", err) + } + defer f.Close() + + fi, err := osStat(src) + if err != nil { + return fmt.Errorf("failed to stat log file: %v", err) + } + + if err := chown(dst, fi); err != nil { + return fmt.Errorf("failed to chown compressed log file: %v", err) + } + + // If this file already exists, we presume it was created by + // a previous attempt to compress the log file. + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + return fmt.Errorf("failed to open compressed log file: %v", err) + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + defer func() { + if err != nil { + os.Remove(dst) + err = fmt.Errorf("failed to compress log file: %v", err) + } + }() + + if _, err := io.Copy(gz, f); err != nil { + return err + } + if err := gz.Close(); err != nil { + return err + } + if err := gzf.Close(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + if err := os.Remove(src); err != nil { + return err + } + + return nil +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE new file mode 100644 index 000000000..d65f7e9d8 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Péter Surányi. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README new file mode 100644 index 000000000..974212ba1 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/README @@ -0,0 +1,77 @@ +Package warnings implements error handling with non-fatal errors (warnings). + +import path: "gopkg.in/warnings.v0" +package docs: https://godoc.org/gopkg.in/warnings.v0 +issues: https://github.com/go-warnings/warnings/issues +pull requests: https://github.com/go-warnings/warnings/pulls + +A recurring pattern in Go programming is the following: + + func myfunc(params) error { + if err := doSomething(...); err != nil { + return err + } + if err := doSomethingElse(...); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + return errors.New("my error") + } + ... + return nil + } + +This pattern allows interrupting the flow on any received error. But what if +there are errors that should be noted but still not fatal, for which the flow +should not be interrupted? Implementing such logic at each if statement would +make the code complex and the flow much harder to follow. + +Package warnings provides the Collector type and a clean and simple pattern +for achieving such logic. The Collector takes care of deciding when to break +the flow and when to continue, collecting any non-fatal errors (warnings) +along the way. The only requirement is that fatal and non-fatal errors can be +distinguished programmatically; that is a function such as + + IsFatal(error) bool + +must be implemented. The following is an example of what the above snippet +could look like using the warnings package: + + import "gopkg.in/warnings.v0" + + func isFatal(err error) bool { + _, ok := err.(WarningType) + return !ok + } + + func myfunc(params) error { + c := warnings.NewCollector(isFatal) + c.FatalWithWarnings = true + if err := c.Collect(doSomething()); err != nil { + return err + } + if err := c.Collect(doSomethingElse(...)); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + if err := c.Collect(errors.New("my error")); err != nil { + return err + } + } + ... + return c.Done() + } + +For an example of a non-trivial code base using this library, see +gopkg.in/gcfg.v1 + +Rules for using warnings + + - ensure that warnings are programmatically distinguishable from fatal + errors (i.e. implement an isFatal function and any necessary error types) + - ensure that there is a single Collector instance for a call of each + exported function + - ensure that all errors (fatal or warning) are fed through Collect + - ensure that every time an error is returned, it is one returned by a + Collector (from Collect or Done) + - ensure that Collect is never called after Done diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go new file mode 100644 index 000000000..b849d1e3d --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/warnings.go @@ -0,0 +1,194 @@ +// Package warnings implements error handling with non-fatal errors (warnings). +// +// A recurring pattern in Go programming is the following: +// +// func myfunc(params) error { +// if err := doSomething(...); err != nil { +// return err +// } +// if err := doSomethingElse(...); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// return errors.New("my error") +// } +// ... +// return nil +// } +// +// This pattern allows interrupting the flow on any received error. But what if +// there are errors that should be noted but still not fatal, for which the flow +// should not be interrupted? Implementing such logic at each if statement would +// make the code complex and the flow much harder to follow. +// +// Package warnings provides the Collector type and a clean and simple pattern +// for achieving such logic. The Collector takes care of deciding when to break +// the flow and when to continue, collecting any non-fatal errors (warnings) +// along the way. The only requirement is that fatal and non-fatal errors can be +// distinguished programmatically; that is a function such as +// +// IsFatal(error) bool +// +// must be implemented. The following is an example of what the above snippet +// could look like using the warnings package: +// +// import "gopkg.in/warnings.v0" +// +// func isFatal(err error) bool { +// _, ok := err.(WarningType) +// return !ok +// } +// +// func myfunc(params) error { +// c := warnings.NewCollector(isFatal) +// c.FatalWithWarnings = true +// if err := c.Collect(doSomething()); err != nil { +// return err +// } +// if err := c.Collect(doSomethingElse(...)); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// if err := c.Collect(errors.New("my error")); err != nil { +// return err +// } +// } +// ... +// return c.Done() +// } +// +// For an example of a non-trivial code base using this library, see +// gopkg.in/gcfg.v1 +// +// Rules for using warnings +// +// - ensure that warnings are programmatically distinguishable from fatal +// errors (i.e. implement an isFatal function and any necessary error types) +// - ensure that there is a single Collector instance for a call of each +// exported function +// - ensure that all errors (fatal or warning) are fed through Collect +// - ensure that every time an error is returned, it is one returned by a +// Collector (from Collect or Done) +// - ensure that Collect is never called after Done +// +// TODO +// +// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?) +// - consider interaction with contexts +// - go vet-style invocations verifier +// - semi-automatic code converter +// +package warnings // import "gopkg.in/warnings.v0" + +import ( + "bytes" + "fmt" +) + +// List holds a collection of warnings and optionally one fatal error. +type List struct { + Warnings []error + Fatal error +} + +// Error implements the error interface. +func (l List) Error() string { + b := bytes.NewBuffer(nil) + if l.Fatal != nil { + fmt.Fprintln(b, "fatal:") + fmt.Fprintln(b, l.Fatal) + } + switch len(l.Warnings) { + case 0: + // nop + case 1: + fmt.Fprintln(b, "warning:") + default: + fmt.Fprintln(b, "warnings:") + } + for _, err := range l.Warnings { + fmt.Fprintln(b, err) + } + return b.String() +} + +// A Collector collects errors up to the first fatal error. +type Collector struct { + // IsFatal distinguishes between warnings and fatal errors. + IsFatal func(error) bool + // FatalWithWarnings set to true means that a fatal error is returned as + // a List together with all warnings so far. The default behavior is to + // only return the fatal error and discard any warnings that have been + // collected. + FatalWithWarnings bool + + l List + done bool +} + +// NewCollector returns a new Collector; it uses isFatal to distinguish between +// warnings and fatal errors. +func NewCollector(isFatal func(error) bool) *Collector { + return &Collector{IsFatal: isFatal} +} + +// Collect collects a single error (warning or fatal). It returns nil if +// collection can continue (only warnings so far), or otherwise the errors +// collected. Collect mustn't be called after the first fatal error or after +// Done has been called. +func (c *Collector) Collect(err error) error { + if c.done { + panic("warnings.Collector already done") + } + if err == nil { + return nil + } + if c.IsFatal(err) { + c.done = true + c.l.Fatal = err + } else { + c.l.Warnings = append(c.l.Warnings, err) + } + if c.l.Fatal != nil { + return c.erorr() + } + return nil +} + +// Done ends collection and returns the collected error(s). +func (c *Collector) Done() error { + c.done = true + return c.erorr() +} + +func (c *Collector) erorr() error { + if !c.FatalWithWarnings && c.l.Fatal != nil { + return c.l.Fatal + } + if c.l.Fatal == nil && len(c.l.Warnings) == 0 { + return nil + } + // Note that a single warning is also returned as a List. This is to make it + // easier to determine fatal-ness of the returned error. + return c.l +} + +// FatalOnly returns the fatal error, if any, **in an error returned by a +// Collector**. It returns nil if and only if err is nil or err is a List +// with err.Fatal == nil. +func FatalOnly(err error) error { + l, ok := err.(List) + if !ok { + return err + } + return l.Fatal +} + +// WarningsOnly returns the warnings **in an error returned by a Collector**. +func WarningsOnly(err error) []error { + l, ok := err.(List) + if !ok { + return nil + } + return l.Warnings +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go new file mode 100644 index 000000000..ff3537e98 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/informer.go @@ -0,0 +1,215 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + "context" + "sync" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/informers" + "k8s.io/client-go/metadata" + "k8s.io/client-go/metadata/metadatalister" + "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for metadataSharedInformerFactory. +type SharedInformerOption func(*metadataSharedInformerFactory) *metadataSharedInformerFactory + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *metadataSharedInformerFactory) *metadataSharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of metadataSharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client metadata.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, metav1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of metadataSharedInformerFactory. +// Listers obtained via this factory will be subject to the same filters as specified here. +func NewFilteredSharedInformerFactory(client metadata.Interface, defaultResync time.Duration, namespace string, tweakListOptions TweakListOptionsFunc) SharedInformerFactory { + return &metadataSharedInformerFactory{ + client: client, + defaultResync: defaultResync, + namespace: namespace, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + tweakListOptions: tweakListOptions, + } +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of metadataSharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client metadata.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &metadataSharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: map[schema.GroupVersionResource]informers.GenericInformer{}, + startedInformers: make(map[schema.GroupVersionResource]bool), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +type metadataSharedInformerFactory struct { + client metadata.Interface + defaultResync time.Duration + namespace string + transform cache.TransformFunc + + lock sync.Mutex + informers map[schema.GroupVersionResource]informers.GenericInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[schema.GroupVersionResource]bool + tweakListOptions TweakListOptionsFunc + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +var _ SharedInformerFactory = &metadataSharedInformerFactory{} + +func (f *metadataSharedInformerFactory) ForResource(gvr schema.GroupVersionResource) informers.GenericInformer { + f.lock.Lock() + defer f.lock.Unlock() + + key := gvr + informer, exists := f.informers[key] + if exists { + return informer + } + + informer = NewFilteredMetadataInformer(f.client, gvr, f.namespace, f.defaultResync, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + informer.Informer().SetTransform(f.transform) + f.informers[key] = informer + + return informer +} + +// Start initializes all requested informers. +func (f *metadataSharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer.Informer() + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *metadataSharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool { + informers := func() map[schema.GroupVersionResource]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionResource]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer.Informer() + } + } + return informers + }() + + res := map[schema.GroupVersionResource]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +func (f *metadataSharedInformerFactory) Shutdown() { + // Will return immediately if there is nothing to wait for. + defer f.wg.Wait() + + f.lock.Lock() + defer f.lock.Unlock() + f.shuttingDown = true +} + +// NewFilteredMetadataInformer constructs a new informer for a metadata type. +func NewFilteredMetadataInformer(client metadata.Interface, gvr schema.GroupVersionResource, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions TweakListOptionsFunc) informers.GenericInformer { + return &metadataInformer{ + gvr: gvr, + informer: cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.Resource(gvr).Namespace(namespace).Watch(context.TODO(), options) + }, + }, + &metav1.PartialObjectMetadata{}, + resyncPeriod, + indexers, + ), + } +} + +type metadataInformer struct { + informer cache.SharedIndexInformer + gvr schema.GroupVersionResource +} + +var _ informers.GenericInformer = &metadataInformer{} + +func (d *metadataInformer) Informer() cache.SharedIndexInformer { + return d.informer +} + +func (d *metadataInformer) Lister() cache.GenericLister { + return metadatalister.NewRuntimeObjectShim(metadatalister.New(d.informer.GetIndexer(), d.gvr)) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go new file mode 100644 index 000000000..9f61706cd --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatainformer/interface.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatainformer + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" +) + +// SharedInformerFactory provides access to a shared informer and lister for dynamic client +type SharedInformerFactory interface { + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(gvr schema.GroupVersionResource) informers.GenericInformer + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[schema.GroupVersionResource]bool + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() +} + +// TweakListOptionsFunc defines the signature of a helper function +// that wants to provide more listing options to API +type TweakListOptionsFunc func(*metav1.ListOptions) diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/interface.go b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go new file mode 100644 index 000000000..bb3548589 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*metav1.PartialObjectMetadata, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*metav1.PartialObjectMetadata, error) +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/lister.go b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go new file mode 100644 index 000000000..faeccc0fc --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &metadataLister{} +var _ NamespaceLister = &metadataNamespaceLister{} + +// metadataLister implements the Lister interface. +type metadataLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &metadataLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *metadataLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *metadataLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *metadataLister) Namespace(namespace string) NamespaceLister { + return &metadataNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// metadataNamespaceLister implements the NamespaceLister interface. +type metadataNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *metadataNamespaceLister) List(selector labels.Selector) (ret []*metav1.PartialObjectMetadata, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*metav1.PartialObjectMetadata)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *metadataNamespaceLister) Get(name string) (*metav1.PartialObjectMetadata, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*metav1.PartialObjectMetadata), nil +} diff --git a/vendor/k8s.io/client-go/metadata/metadatalister/shim.go b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go new file mode 100644 index 000000000..f31c60725 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadatalister/shim.go @@ -0,0 +1,87 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadatalister + +import ( + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +var _ cache.GenericLister = &metadataListerShim{} +var _ cache.GenericNamespaceLister = &metadataNamespaceListerShim{} + +// metadataListerShim implements the cache.GenericLister interface. +type metadataListerShim struct { + lister Lister +} + +// NewRuntimeObjectShim returns a new shim for Lister. +// It wraps Lister so that it implements cache.GenericLister interface +func NewRuntimeObjectShim(lister Lister) cache.GenericLister { + return &metadataListerShim{lister: lister} +} + +// List will return all objects across namespaces +func (s *metadataListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := s.lister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve assuming that name==key +func (s *metadataListerShim) Get(name string) (runtime.Object, error) { + return s.lister.Get(name) +} + +func (s *metadataListerShim) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &metadataNamespaceListerShim{ + namespaceLister: s.lister.Namespace(namespace), + } +} + +// metadataNamespaceListerShim implements the NamespaceLister interface. +// It wraps NamespaceLister so that it implements cache.GenericNamespaceLister interface +type metadataNamespaceListerShim struct { + namespaceLister NamespaceLister +} + +// List will return all objects in this namespace +func (ns *metadataNamespaceListerShim) List(selector labels.Selector) (ret []runtime.Object, err error) { + objs, err := ns.namespaceLister.List(selector) + if err != nil { + return nil, err + } + + ret = make([]runtime.Object, len(objs)) + for index, obj := range objs { + ret[index] = obj + } + return ret, err +} + +// Get will attempt to retrieve by namespace and name +func (ns *metadataNamespaceListerShim) Get(name string) (runtime.Object, error) { + return ns.namespaceLister.Get(name) +} diff --git a/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go new file mode 100644 index 000000000..40ec27d87 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go @@ -0,0 +1,303 @@ +/* +Copyright 2013 Google Inc. All Rights Reserved. +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verbosity + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +// New returns a struct that implements -v and -vmodule support. Changing and +// checking these settings is thread-safe, with all concurrency issues handled +// internally. +func New() *VState { + vs := new(VState) + + // The two fields must have a pointer to the overal struct for their + // implementation of Set. + vs.vmodule.vs = vs + vs.verbosity.vs = vs + + return vs +} + +// Value is an extension that makes it possible to use the values in pflag. +type Value interface { + flag.Value + Type() string +} + +func (vs *VState) V() Value { + return &vs.verbosity +} + +func (vs *VState) VModule() Value { + return &vs.vmodule +} + +// VState contains settings and state. Some of its fields can be accessed +// through atomic read/writes, in other cases a mutex must be held. +type VState struct { + mu sync.Mutex + + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity levelSpec // V logging level, the value of the -v flag/ + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 +} + +// Level must be an int32 to support atomic read/writes. +type Level int32 + +type levelSpec struct { + vs *VState + l Level +} + +// get returns the value of the level. +func (l *levelSpec) get() Level { + return Level(atomic.LoadInt32((*int32)(&l.l))) +} + +// set sets the value of the level. +func (l *levelSpec) set(val Level) { + atomic.StoreInt32((*int32)(&l.l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *levelSpec) String() string { + return strconv.FormatInt(int64(l.l), 10) +} + +// Get is part of the flag.Getter interface. It returns the +// verbosity level as int32. +func (l *levelSpec) Get() interface{} { + return int32(l.l) +} + +// Type is part of pflag.Value. +func (l *levelSpec) Type() string { + return "Level" +} + +// Set is part of the flag.Value interface. +func (l *levelSpec) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + l.vs.mu.Lock() + defer l.vs.mu.Unlock() + l.vs.set(Level(v), l.vs.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + vs *VState + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + // Empty instances don't have and don't need a lock (can + // happen when flag uses introspection). + if m.vs != nil { + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + } + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +// Type is part of pflag.Value +func (m *moduleSpec) Type() string { + return "pattern=N,..." +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Set will sets module value +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + m.vs.set(m.vs.verbosity.l, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// set sets a consistent state for V logging. +// The mutex must be held. +func (vs *VState) set(l Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + vs.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&vs.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + vs.vmodule.filter = filter + vs.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&vs.filterLength, int32(len(filter))) + vs.verbosity.set(l) +} + +// Enabled checks whether logging is enabled at the given level. This must be +// called with depth=0 when the caller of enabled will do the logging and +// higher values when more stack levels need to be skipped. +// +// The mutex will be locked only if needed. +func (vs *VState) Enabled(level Level, depth int) bool { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if vs.verbosity.get() >= level { + return true + } + + // It's off globally but vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&vs.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + vs.mu.Lock() + defer vs.mu.Unlock() + if runtime.Callers(depth+2, vs.pcs[:]) == 0 { + return false + } + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := vs.pcs[0] - 1 + v, ok := vs.vmap[pc] + if !ok { + v = vs.setV(pc) + } + return v >= level + } + return false +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// Mutex is held. +func (vs *VState) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + file = strings.TrimSuffix(file, ".go") + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range vs.vmodule.filter { + if filter.match(file) { + vs.vmap[pc] = filter.level + return filter.level + } + } + vs.vmap[pc] = 0 + return 0 +} diff --git a/vendor/k8s.io/klog/v2/textlogger/options.go b/vendor/k8s.io/klog/v2/textlogger/options.go new file mode 100644 index 000000000..b1c4eefb3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/options.go @@ -0,0 +1,154 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "flag" + "io" + "os" + "strconv" + "time" + + "k8s.io/klog/v2/internal/verbosity" +) + +// Config influences logging in a text logger. To make this configurable via +// command line flags, instantiate this once per program and use AddFlags to +// bind command line flags to the instance before passing it to NewTestContext. +// +// Must be constructed with NewConfig. +type Config struct { + vstate *verbosity.VState + co configOptions +} + +// Verbosity returns a value instance that can be used to query (via String) or +// modify (via Set) the verbosity threshold. This is thread-safe and can be +// done at runtime. +func (c *Config) Verbosity() flag.Value { + return c.vstate.V() +} + +// VModule returns a value instance that can be used to query (via String) or +// modify (via Set) the vmodule settings. This is thread-safe and can be done +// at runtime. +func (c *Config) VModule() flag.Value { + return c.vstate.VModule() +} + +// ConfigOption implements functional parameters for NewConfig. +type ConfigOption func(co *configOptions) + +type configOptions struct { + verbosityFlagName string + vmoduleFlagName string + verbosityDefault int + fixedTime *time.Time + unwind func(int) (string, int) + output io.Writer +} + +// VerbosityFlagName overrides the default -v for the verbosity level. +func VerbosityFlagName(name string) ConfigOption { + return func(co *configOptions) { + + co.verbosityFlagName = name + } +} + +// VModulFlagName overrides the default -vmodule for the per-module +// verbosity levels. +func VModuleFlagName(name string) ConfigOption { + return func(co *configOptions) { + co.vmoduleFlagName = name + } +} + +// Verbosity overrides the default verbosity level of 0. +// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions +// for log level conventions in Kubernetes. +func Verbosity(level int) ConfigOption { + return func(co *configOptions) { + co.verbosityDefault = level + } +} + +// Output overrides stderr as the output stream. +func Output(output io.Writer) ConfigOption { + return func(co *configOptions) { + co.output = output + } +} + +// FixedTime overrides the actual time with a fixed time. Useful only for testing. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FixedTime(ts time.Time) ConfigOption { + return func(co *configOptions) { + co.fixedTime = &ts + } +} + +// Backtrace overrides the default mechanism for determining the call site. +// The callback is invoked with the number of function calls between itself +// and the call site. It must return the file name and line number. An empty +// file name indicates that the information is unknown. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption { + return func(co *configOptions) { + co.unwind = unwind + } +} + +// NewConfig returns a configuration with recommended defaults and optional +// modifications. Command line flags are not bound to any FlagSet yet. +func NewConfig(opts ...ConfigOption) *Config { + c := &Config{ + vstate: verbosity.New(), + co: configOptions{ + verbosityFlagName: "v", + vmoduleFlagName: "vmodule", + verbosityDefault: 0, + unwind: runtimeBacktrace, + output: os.Stderr, + }, + } + for _, opt := range opts { + opt(&c.co) + } + + // Cannot fail for this input. + _ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10)) + return c +} + +// AddFlags registers the command line flags that control the configuration. +// +// The default flag names are the same as in klog, so unless those defaults +// are changed, either klog.InitFlags or Config.AddFlags can be used for the +// same flag set, but not both. +func (c *Config) AddFlags(fs *flag.FlagSet) { + fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger") + fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns") +} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go new file mode 100644 index 000000000..0b55a2994 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go @@ -0,0 +1,187 @@ +/* +Copyright 2019 The Kubernetes Authors. +Copyright 2020 Intel Corporation. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package textlogger contains an implementation of the logr interface which is +// producing the exact same output as klog. It does not route output through +// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be +// configured through its own [NewConfig] and [Config.AddFlags]. +package textlogger + +import ( + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/verbosity" +) + +var ( + // TimeNow is used to retrieve the current time. May be changed for testing. + TimeNow = time.Now +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewLogger constructs a new logger. +// +// Verbosity can be modified at any time through the Config.V and +// Config.VModule API. +func NewLogger(c *Config) logr.Logger { + return logr.New(&tlogger{ + values: nil, + config: c, + }) +} + +type tlogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string + config *Config +} + +func (l *tlogger) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *tlogger) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *tlogger) Enabled(level int) bool { + return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth) +} + +func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) { + l.print(nil, severity.InfoLog, msg, kvList) +} + +func (l *tlogger) Error(err error, msg string, kvList ...interface{}) { + l.print(err, severity.ErrorLog, msg, kvList) +} + +func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) { + // Determine caller. + // +1 for this frame, +1 for Info/Error. + skip := l.callDepth + 2 + file, line := l.config.co.unwind(skip) + if file == "" { + file = "???" + line = 1 + } else if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + l.printWithInfos(file, line, time.Now(), err, s, msg, kvList) +} + +func runtimeBacktrace(skip int) (string, int) { + _, file, line, ok := runtime.Caller(skip + 1) + if !ok { + return "", 0 + } + return file, line +} + +func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + defer buffer.PutBuffer(b) + + // Format header. + if l.config.co.fixedTime != nil { + now = *l.config.co.fixedTime + } + b.FormatHeader(s, file, line, now) + + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVFormat(&b.Buffer, "err", err) + } + serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList) + if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' { + b.WriteByte('\n') + } + _, _ = l.config.co.output.Write(b.Bytes()) +} + +func (l *tlogger) WriteKlogBuffer(data []byte) { + _, _ = l.config.co.output.Write(data) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l *tlogger) WithName(name string) logr.LogSink { + clone := *l + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + clone.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + clone.values = v + clone.hasPrefix = true + } + return &clone +} + +func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink { + clone := *l + clone.values = serialize.WithValues(l.values, kvList) + return &clone +} + +// KlogBufferWriter is implemented by the textlogger LogSink. +type KlogBufferWriter interface { + // WriteKlogBuffer takes a pre-formatted buffer prepared by klog and + // writes it unchanged to the output stream. Can be used with + // klog.WriteKlogBuffer when setting a logger through + // klog.SetLoggerWithOptions. + WriteKlogBuffer([]byte) +} + +var _ logr.LogSink = &tlogger{} +var _ logr.CallDepthLogSink = &tlogger{} +var _ KlogBufferWriter = &tlogger{} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go new file mode 100644 index 000000000..c888ef8a6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go @@ -0,0 +1,52 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *tlogger) Handle(ctx context.Context, record slog.Record) error { + return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos) +} + +func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *tlogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &tlogger{} diff --git a/vendor/k8s.io/utils/exec/README.md b/vendor/k8s.io/utils/exec/README.md new file mode 100644 index 000000000..7944e8dd3 --- /dev/null +++ b/vendor/k8s.io/utils/exec/README.md @@ -0,0 +1,5 @@ +# Exec + +This package provides an interface for `os/exec`. It makes it easier to mock +and replace in tests, especially with the [FakeExec](testing/fake_exec.go) +struct. diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go new file mode 100644 index 000000000..cbb44bdb5 --- /dev/null +++ b/vendor/k8s.io/utils/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/utils/exec" diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go new file mode 100644 index 000000000..d9c91e3ca --- /dev/null +++ b/vendor/k8s.io/utils/exec/exec.go @@ -0,0 +1,256 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "io" + "io/fs" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // CommandContext returns a Cmd instance which can be used to run a single command. + // + // The provided context is used to kill the process if the context becomes done + // before the command completes on its own. For example, a timeout can be set in + // the context. + CommandContext(ctx context.Context, cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) +} + +// CommandContext is part of the Interface interface. +func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.CommandContext(ctx, cmd, args...))) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + path, err := osexec.LookPath(file) + return path, handleError(maskErrDot(err)) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +var _ Cmd = &cmdWrapper{} + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + err := (*osexec.Cmd)(cmd).Run() + return handleError(err) +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + return out, handleError(err) +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + return out, handleError(err) +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + + if c.Process == nil { + return + } + + c.Process.Signal(syscall.SIGTERM) + + time.AfterFunc(10*time.Second, func() { + if !c.ProcessState.Exited() { + c.Process.Signal(syscall.SIGKILL) + } + }) +} + +func handleError(err error) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case *osexec.ExitError: + return &ExitErrorWrapper{e} + case *fs.PathError: + return ErrExecutableNotFound + case *osexec.Error: + if e.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = &ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +// Exited is to check if the process has finished +func (e CodeExitError) Exited() bool { + return true +} + +// ExitStatus is for checking the error code +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go new file mode 100644 index 000000000..acf45f1cd --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go118.go @@ -0,0 +1,32 @@ +//go:build !go1.19 +// +build !go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" +) + +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + return cmd +} + +func maskErrDot(err error) error { + return err +} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go new file mode 100644 index 000000000..55874c929 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go119.go @@ -0,0 +1,40 @@ +//go:build go1.19 +// +build go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "errors" + osexec "os/exec" +) + +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} diff --git a/vendor/lukechampine.com/uint128/LICENSE b/vendor/lukechampine.com/uint128/LICENSE new file mode 100644 index 000000000..a14c6cf2a --- /dev/null +++ b/vendor/lukechampine.com/uint128/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Luke Champine + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/lukechampine.com/uint128/README.md b/vendor/lukechampine.com/uint128/README.md new file mode 100644 index 000000000..1ea5d7df1 --- /dev/null +++ b/vendor/lukechampine.com/uint128/README.md @@ -0,0 +1,46 @@ +uint128 +------- + +[![GoDoc](https://godoc.org/github.com/lukechampine/uint128?status.svg)](https://godoc.org/github.com/lukechampine/uint128) +[![Go Report Card](http://goreportcard.com/badge/github.com/lukechampine/uint128)](https://goreportcard.com/report/github.com/lukechampine/uint128) + +``` +go get lukechampine.com/uint128 +``` + +`uint128` provides a high-performance `Uint128` type that supports standard arithmetic +operations. Unlike `math/big`, operations on `Uint128` values always produce new values +instead of modifying a pointer receiver. A `Uint128` value is therefore immutable, just +like `uint64` and friends. + +The name `uint128.Uint128` stutters, so I recommend either using a "dot import" +or aliasing `uint128.Uint128` to give it a project-specific name. Embedding the type +is not recommended, because methods will still return `uint128.Uint128`; this means that, +if you want to extend the type with new methods, your best bet is probably to copy the +source code wholesale and rename the identifier. ¯\\\_(ツ)\_/¯ + + +# Benchmarks + +Addition, multiplication, and subtraction are on par with their native 64-bit +equivalents. Division is slower: ~20x slower when dividing a `Uint128` by a +`uint64`, and ~100x slower when dividing by a `Uint128`. However, division is +still faster than with `big.Int` (for the same operands), especially when +dividing by a `uint64`. + +``` +BenchmarkArithmetic/Add-4 2000000000 0.45 ns/op 0 B/op 0 allocs/op +BenchmarkArithmetic/Sub-4 2000000000 0.67 ns/op 0 B/op 0 allocs/op +BenchmarkArithmetic/Mul-4 2000000000 0.42 ns/op 0 B/op 0 allocs/op +BenchmarkArithmetic/Lsh-4 2000000000 1.06 ns/op 0 B/op 0 allocs/op +BenchmarkArithmetic/Rsh-4 2000000000 1.06 ns/op 0 B/op 0 allocs/op + +BenchmarkDivision/native_64/64-4 2000000000 0.39 ns/op 0 B/op 0 allocs/op +BenchmarkDivision/Div_128/64-4 2000000000 6.28 ns/op 0 B/op 0 allocs/op +BenchmarkDivision/Div_128/128-4 30000000 45.2 ns/op 0 B/op 0 allocs/op +BenchmarkDivision/big.Int_128/64-4 20000000 98.2 ns/op 8 B/op 1 allocs/op +BenchmarkDivision/big.Int_128/128-4 30000000 53.4 ns/op 48 B/op 1 allocs/op + +BenchmarkString/Uint128-4 10000000 173 ns/op 48 B/op 1 allocs/op +BenchmarkString/big.Int-4 5000000 350 ns/op 144 B/op 3 allocs/op +``` diff --git a/vendor/lukechampine.com/uint128/uint128.go b/vendor/lukechampine.com/uint128/uint128.go new file mode 100644 index 000000000..04e65783a --- /dev/null +++ b/vendor/lukechampine.com/uint128/uint128.go @@ -0,0 +1,440 @@ +package uint128 // import "lukechampine.com/uint128" + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "math/big" + "math/bits" +) + +// Zero is a zero-valued uint128. +var Zero Uint128 + +// Max is the largest possible uint128 value. +var Max = New(math.MaxUint64, math.MaxUint64) + +// A Uint128 is an unsigned 128-bit number. +type Uint128 struct { + Lo, Hi uint64 +} + +// IsZero returns true if u == 0. +func (u Uint128) IsZero() bool { + // NOTE: we do not compare against Zero, because that is a global variable + // that could be modified. + return u == Uint128{} +} + +// Equals returns true if u == v. +// +// Uint128 values can be compared directly with ==, but use of the Equals method +// is preferred for consistency. +func (u Uint128) Equals(v Uint128) bool { + return u == v +} + +// Equals64 returns true if u == v. +func (u Uint128) Equals64(v uint64) bool { + return u.Lo == v && u.Hi == 0 +} + +// Cmp compares u and v and returns: +// +// -1 if u < v +// 0 if u == v +// +1 if u > v +// +func (u Uint128) Cmp(v Uint128) int { + if u == v { + return 0 + } else if u.Hi < v.Hi || (u.Hi == v.Hi && u.Lo < v.Lo) { + return -1 + } else { + return 1 + } +} + +// Cmp64 compares u and v and returns: +// +// -1 if u < v +// 0 if u == v +// +1 if u > v +// +func (u Uint128) Cmp64(v uint64) int { + if u.Hi == 0 && u.Lo == v { + return 0 + } else if u.Hi == 0 && u.Lo < v { + return -1 + } else { + return 1 + } +} + +// And returns u&v. +func (u Uint128) And(v Uint128) Uint128 { + return Uint128{u.Lo & v.Lo, u.Hi & v.Hi} +} + +// And64 returns u&v. +func (u Uint128) And64(v uint64) Uint128 { + return Uint128{u.Lo & v, u.Hi & 0} +} + +// Or returns u|v. +func (u Uint128) Or(v Uint128) Uint128 { + return Uint128{u.Lo | v.Lo, u.Hi | v.Hi} +} + +// Or64 returns u|v. +func (u Uint128) Or64(v uint64) Uint128 { + return Uint128{u.Lo | v, u.Hi | 0} +} + +// Xor returns u^v. +func (u Uint128) Xor(v Uint128) Uint128 { + return Uint128{u.Lo ^ v.Lo, u.Hi ^ v.Hi} +} + +// Xor64 returns u^v. +func (u Uint128) Xor64(v uint64) Uint128 { + return Uint128{u.Lo ^ v, u.Hi ^ 0} +} + +// Add returns u+v. +func (u Uint128) Add(v Uint128) Uint128 { + lo, carry := bits.Add64(u.Lo, v.Lo, 0) + hi, carry := bits.Add64(u.Hi, v.Hi, carry) + if carry != 0 { + panic("overflow") + } + return Uint128{lo, hi} +} + +// AddWrap returns u+v with wraparound semantics; for example, +// Max.AddWrap(From64(1)) == Zero. +func (u Uint128) AddWrap(v Uint128) Uint128 { + lo, carry := bits.Add64(u.Lo, v.Lo, 0) + hi, _ := bits.Add64(u.Hi, v.Hi, carry) + return Uint128{lo, hi} +} + +// Add64 returns u+v. +func (u Uint128) Add64(v uint64) Uint128 { + lo, carry := bits.Add64(u.Lo, v, 0) + hi, carry := bits.Add64(u.Hi, 0, carry) + if carry != 0 { + panic("overflow") + } + return Uint128{lo, hi} +} + +// AddWrap64 returns u+v with wraparound semantics; for example, +// Max.AddWrap64(1) == Zero. +func (u Uint128) AddWrap64(v uint64) Uint128 { + lo, carry := bits.Add64(u.Lo, v, 0) + hi := u.Hi + carry + return Uint128{lo, hi} +} + +// Sub returns u-v. +func (u Uint128) Sub(v Uint128) Uint128 { + lo, borrow := bits.Sub64(u.Lo, v.Lo, 0) + hi, borrow := bits.Sub64(u.Hi, v.Hi, borrow) + if borrow != 0 { + panic("underflow") + } + return Uint128{lo, hi} +} + +// SubWrap returns u-v with wraparound semantics; for example, +// Zero.SubWrap(From64(1)) == Max. +func (u Uint128) SubWrap(v Uint128) Uint128 { + lo, borrow := bits.Sub64(u.Lo, v.Lo, 0) + hi, _ := bits.Sub64(u.Hi, v.Hi, borrow) + return Uint128{lo, hi} +} + +// Sub64 returns u-v. +func (u Uint128) Sub64(v uint64) Uint128 { + lo, borrow := bits.Sub64(u.Lo, v, 0) + hi, borrow := bits.Sub64(u.Hi, 0, borrow) + if borrow != 0 { + panic("underflow") + } + return Uint128{lo, hi} +} + +// SubWrap64 returns u-v with wraparound semantics; for example, +// Zero.SubWrap64(1) == Max. +func (u Uint128) SubWrap64(v uint64) Uint128 { + lo, borrow := bits.Sub64(u.Lo, v, 0) + hi := u.Hi - borrow + return Uint128{lo, hi} +} + +// Mul returns u*v, panicking on overflow. +func (u Uint128) Mul(v Uint128) Uint128 { + hi, lo := bits.Mul64(u.Lo, v.Lo) + p0, p1 := bits.Mul64(u.Hi, v.Lo) + p2, p3 := bits.Mul64(u.Lo, v.Hi) + hi, c0 := bits.Add64(hi, p1, 0) + hi, c1 := bits.Add64(hi, p3, c0) + if (u.Hi != 0 && v.Hi != 0) || p0 != 0 || p2 != 0 || c1 != 0 { + panic("overflow") + } + return Uint128{lo, hi} +} + +// MulWrap returns u*v with wraparound semantics; for example, +// Max.MulWrap(Max) == 1. +func (u Uint128) MulWrap(v Uint128) Uint128 { + hi, lo := bits.Mul64(u.Lo, v.Lo) + hi += u.Hi*v.Lo + u.Lo*v.Hi + return Uint128{lo, hi} +} + +// Mul64 returns u*v, panicking on overflow. +func (u Uint128) Mul64(v uint64) Uint128 { + hi, lo := bits.Mul64(u.Lo, v) + p0, p1 := bits.Mul64(u.Hi, v) + hi, c0 := bits.Add64(hi, p1, 0) + if p0 != 0 || c0 != 0 { + panic("overflow") + } + return Uint128{lo, hi} +} + +// MulWrap64 returns u*v with wraparound semantics; for example, +// Max.MulWrap64(2) == Max.Sub64(1). +func (u Uint128) MulWrap64(v uint64) Uint128 { + hi, lo := bits.Mul64(u.Lo, v) + hi += u.Hi * v + return Uint128{lo, hi} +} + +// Div returns u/v. +func (u Uint128) Div(v Uint128) Uint128 { + q, _ := u.QuoRem(v) + return q +} + +// Div64 returns u/v. +func (u Uint128) Div64(v uint64) Uint128 { + q, _ := u.QuoRem64(v) + return q +} + +// QuoRem returns q = u/v and r = u%v. +func (u Uint128) QuoRem(v Uint128) (q, r Uint128) { + if v.Hi == 0 { + var r64 uint64 + q, r64 = u.QuoRem64(v.Lo) + r = From64(r64) + } else { + // generate a "trial quotient," guaranteed to be within 1 of the actual + // quotient, then adjust. + n := uint(bits.LeadingZeros64(v.Hi)) + v1 := v.Lsh(n) + u1 := u.Rsh(1) + tq, _ := bits.Div64(u1.Hi, u1.Lo, v1.Hi) + tq >>= 63 - n + if tq != 0 { + tq-- + } + q = From64(tq) + // calculate remainder using trial quotient, then adjust if remainder is + // greater than divisor + r = u.Sub(v.Mul64(tq)) + if r.Cmp(v) >= 0 { + q = q.Add64(1) + r = r.Sub(v) + } + } + return +} + +// QuoRem64 returns q = u/v and r = u%v. +func (u Uint128) QuoRem64(v uint64) (q Uint128, r uint64) { + if u.Hi < v { + q.Lo, r = bits.Div64(u.Hi, u.Lo, v) + } else { + q.Hi, r = bits.Div64(0, u.Hi, v) + q.Lo, r = bits.Div64(r, u.Lo, v) + } + return +} + +// Mod returns r = u%v. +func (u Uint128) Mod(v Uint128) (r Uint128) { + _, r = u.QuoRem(v) + return +} + +// Mod64 returns r = u%v. +func (u Uint128) Mod64(v uint64) (r uint64) { + _, r = u.QuoRem64(v) + return +} + +// Lsh returns u< 64 { + s.Lo = 0 + s.Hi = u.Lo << (n - 64) + } else { + s.Lo = u.Lo << n + s.Hi = u.Hi<>(64-n) + } + return +} + +// Rsh returns u>>n. +func (u Uint128) Rsh(n uint) (s Uint128) { + if n > 64 { + s.Lo = u.Hi >> (n - 64) + s.Hi = 0 + } else { + s.Lo = u.Lo>>n | u.Hi<<(64-n) + s.Hi = u.Hi >> n + } + return +} + +// LeadingZeros returns the number of leading zero bits in u; the result is 128 +// for u == 0. +func (u Uint128) LeadingZeros() int { + if u.Hi > 0 { + return bits.LeadingZeros64(u.Hi) + } + return 64 + bits.LeadingZeros64(u.Lo) +} + +// TrailingZeros returns the number of trailing zero bits in u; the result is +// 128 for u == 0. +func (u Uint128) TrailingZeros() int { + if u.Lo > 0 { + return bits.TrailingZeros64(u.Lo) + } + return 64 + bits.TrailingZeros64(u.Hi) +} + +// OnesCount returns the number of one bits ("population count") in u. +func (u Uint128) OnesCount() int { + return bits.OnesCount64(u.Hi) + bits.OnesCount64(u.Lo) +} + +// RotateLeft returns the value of u rotated left by (k mod 128) bits. +func (u Uint128) RotateLeft(k int) Uint128 { + const n = 128 + s := uint(k) & (n - 1) + return u.Lsh(s).Or(u.Rsh(n - s)) +} + +// RotateRight returns the value of u rotated left by (k mod 128) bits. +func (u Uint128) RotateRight(k int) Uint128 { + return u.RotateLeft(-k) +} + +// Reverse returns the value of u with its bits in reversed order. +func (u Uint128) Reverse() Uint128 { + return Uint128{bits.Reverse64(u.Hi), bits.Reverse64(u.Lo)} +} + +// ReverseBytes returns the value of u with its bytes in reversed order. +func (u Uint128) ReverseBytes() Uint128 { + return Uint128{bits.ReverseBytes64(u.Hi), bits.ReverseBytes64(u.Lo)} +} + +// Len returns the minimum number of bits required to represent u; the result is +// 0 for u == 0. +func (u Uint128) Len() int { + return 128 - u.LeadingZeros() +} + +// String returns the base-10 representation of u as a string. +func (u Uint128) String() string { + if u.IsZero() { + return "0" + } + buf := []byte("0000000000000000000000000000000000000000") // log10(2^128) < 40 + for i := len(buf); ; i -= 19 { + q, r := u.QuoRem64(1e19) // largest power of 10 that fits in a uint64 + var n int + for ; r != 0; r /= 10 { + n++ + buf[i-n] += byte(r % 10) + } + if q.IsZero() { + return string(buf[i-n:]) + } + u = q + } +} + +// PutBytes stores u in b in little-endian order. It panics if len(b) < 16. +func (u Uint128) PutBytes(b []byte) { + binary.LittleEndian.PutUint64(b[:8], u.Lo) + binary.LittleEndian.PutUint64(b[8:], u.Hi) +} + +// Big returns u as a *big.Int. +func (u Uint128) Big() *big.Int { + i := new(big.Int).SetUint64(u.Hi) + i = i.Lsh(i, 64) + i = i.Xor(i, new(big.Int).SetUint64(u.Lo)) + return i +} + +// Scan implements fmt.Scanner. +func (u *Uint128) Scan(s fmt.ScanState, ch rune) error { + i := new(big.Int) + if err := i.Scan(s, ch); err != nil { + return err + } else if i.Sign() < 0 { + return errors.New("value cannot be negative") + } else if i.BitLen() > 128 { + return errors.New("value overflows Uint128") + } + u.Lo = i.Uint64() + u.Hi = i.Rsh(i, 64).Uint64() + return nil +} + +// New returns the Uint128 value (lo,hi). +func New(lo, hi uint64) Uint128 { + return Uint128{lo, hi} +} + +// From64 converts v to a Uint128 value. +func From64(v uint64) Uint128 { + return New(v, 0) +} + +// FromBytes converts b to a Uint128 value. +func FromBytes(b []byte) Uint128 { + return New( + binary.LittleEndian.Uint64(b[:8]), + binary.LittleEndian.Uint64(b[8:]), + ) +} + +// FromBig converts i to a Uint128 value. It panics if i is negative or +// overflows 128 bits. +func FromBig(i *big.Int) (u Uint128) { + if i.Sign() < 0 { + panic("value cannot be negative") + } else if i.BitLen() > 128 { + panic("value overflows Uint128") + } + u.Lo = i.Uint64() + u.Hi = i.Rsh(i, 64).Uint64() + return u +} + +// FromString parses s as a Uint128 value. +func FromString(s string) (u Uint128, err error) { + _, err = fmt.Sscan(s, &u) + return +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b9c9d838f..a51fade53 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,18 +1,93 @@ +# github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible +## explicit +github.com/Knetic/govaluate +# github.com/agoda-com/opentelemetry-logs-go v0.5.0 +## explicit; go 1.21 +github.com/agoda-com/opentelemetry-logs-go +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/envconfig +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/logstransform +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/otlpconfig +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/internal/retry +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogsgrpc +github.com/agoda-com/opentelemetry-logs-go/exporters/otlp/otlplogs/otlplogshttp +github.com/agoda-com/opentelemetry-logs-go/internal/global +github.com/agoda-com/opentelemetry-logs-go/logs +github.com/agoda-com/opentelemetry-logs-go/sdk/internal/env +github.com/agoda-com/opentelemetry-logs-go/sdk/logs +github.com/agoda-com/opentelemetry-logs-go/semconv +# github.com/benbjohnson/clock v1.3.5 +## explicit; go 1.15 +github.com/benbjohnson/clock # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile # github.com/bpfman/bpfman-operator v0.5.5-0.20241023163832-0bf84bbd3927 ## explicit; go 1.22.0 github.com/bpfman/bpfman-operator/apis/v1alpha1 +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 +# github.com/cenkalti/hub v1.0.1 +## explicit +github.com/cenkalti/hub +# github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 +## explicit +github.com/cenkalti/rpc2 +github.com/cenkalti/rpc2/jsonrpc # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/cilium/ebpf v0.16.0 +## explicit; go 1.21 +github.com/cilium/ebpf +github.com/cilium/ebpf/asm +github.com/cilium/ebpf/btf +github.com/cilium/ebpf/internal +github.com/cilium/ebpf/internal/epoll +github.com/cilium/ebpf/internal/kallsyms +github.com/cilium/ebpf/internal/kconfig +github.com/cilium/ebpf/internal/sys +github.com/cilium/ebpf/internal/sysenc +github.com/cilium/ebpf/internal/tracefs +github.com/cilium/ebpf/internal/unix +github.com/cilium/ebpf/link +github.com/cilium/ebpf/perf +github.com/cilium/ebpf/ringbuf +github.com/cilium/ebpf/rlimit +# github.com/containernetworking/cni v1.1.2 +## explicit; go 1.14 +github.com/containernetworking/cni/libcni +github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/types +github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/types/040 +github.com/containernetworking/cni/pkg/types/100 +github.com/containernetworking/cni/pkg/types/create +github.com/containernetworking/cni/pkg/types/internal +github.com/containernetworking/cni/pkg/utils +github.com/containernetworking/cni/pkg/version +# github.com/containernetworking/plugins v1.2.0 +## explicit; go 1.17 +github.com/containernetworking/plugins/pkg/ip +github.com/containernetworking/plugins/pkg/ns +github.com/containernetworking/plugins/pkg/utils/sysctl +# github.com/coreos/go-iptables v0.6.0 +## explicit; go 1.16 +github.com/coreos/go-iptables/iptables # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver +# github.com/cpuguy83/go-md2man/v2 v2.0.4 +## explicit; go 1.11 +github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew +# github.com/dustin/go-humanize v1.0.1 +## explicit; go 1.16 +github.com/dustin/go-humanize # github.com/emicklei/go-restful/v3 v3.12.1 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -23,14 +98,38 @@ github.com/emicklei/go-restful/v3/log ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json +# github.com/fsnotify/fsnotify v1.7.0 +## explicit; go 1.17 +github.com/fsnotify/fsnotify # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 +# github.com/gavv/monotime v0.0.0-20190418164738-30dba4353424 +## explicit +github.com/gavv/monotime +# github.com/go-ini/ini v1.67.0 +## explicit +github.com/go-ini/ini +# github.com/go-kit/kit v0.13.0 +## explicit; go 1.17 +github.com/go-kit/kit/log +github.com/go-kit/kit/log/level +github.com/go-kit/kit/log/logrus +# github.com/go-kit/log v0.2.1 +## explicit; go 1.17 +github.com/go-kit/log +github.com/go-kit/log/level +# github.com/go-logfmt/logfmt v0.5.1 +## explicit; go 1.17 +github.com/go-logfmt/logfmt # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr github.com/go-logr/logr/slogr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-logr/zapr v1.3.0 ## explicit; go 1.18 github.com/go-logr/zapr @@ -47,10 +146,24 @@ github.com/go-openapi/swag # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 github.com/go-task/slim-sprig/v3 +# github.com/goccy/go-json v0.10.3 +## explicit; go 1.19 +github.com/goccy/go-json +github.com/goccy/go-json/internal/decoder +github.com/goccy/go-json/internal/encoder +github.com/goccy/go-json/internal/encoder/vm +github.com/goccy/go-json/internal/encoder/vm_color +github.com/goccy/go-json/internal/encoder/vm_color_indent +github.com/goccy/go-json/internal/encoder/vm_indent +github.com/goccy/go-json/internal/errors +github.com/goccy/go-json/internal/runtime # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +github.com/gogo/protobuf/types # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -61,6 +174,9 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp +# github.com/golang/snappy v0.0.4 +## explicit +github.com/golang/snappy # github.com/google/gnostic-models v0.6.8 ## explicit; go 1.18 github.com/google/gnostic-models/compiler @@ -85,9 +201,24 @@ github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid +# github.com/gopacket/gopacket v1.2.0 +## explicit; go 1.20 +github.com/gopacket/gopacket +github.com/gopacket/gopacket/layers +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +## explicit; go 1.21 +github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule +github.com/grpc-ecosystem/grpc-gateway/v2/runtime +github.com/grpc-ecosystem/grpc-gateway/v2/utilities +# github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb +## explicit +github.com/heptiolabs/healthcheck # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/ip2location/ip2location-go/v9 v9.7.0 +## explicit; go 1.14 +github.com/ip2location/ip2location-go/v9 # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -100,17 +231,51 @@ github.com/json-iterator/go # github.com/klauspost/compress v1.17.9 ## explicit; go 1.20 github.com/klauspost/compress +github.com/klauspost/compress/flate github.com/klauspost/compress/fse +github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/race github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/s2 +github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash +# github.com/klauspost/cpuid/v2 v2.2.8 +## explicit; go 1.15 +github.com/klauspost/cpuid/v2 +# github.com/libp2p/go-reuseport v0.3.0 +## explicit; go 1.19 +github.com/libp2p/go-reuseport # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 +## explicit; go 1.12 +github.com/mdlayher/ethernet +# github.com/minio/md5-simd v1.1.2 +## explicit; go 1.14 +github.com/minio/md5-simd +# github.com/minio/minio-go/v7 v7.0.77 +## explicit; go 1.21 +github.com/minio/minio-go/v7 +github.com/minio/minio-go/v7/pkg/cors +github.com/minio/minio-go/v7/pkg/credentials +github.com/minio/minio-go/v7/pkg/encrypt +github.com/minio/minio-go/v7/pkg/lifecycle +github.com/minio/minio-go/v7/pkg/notification +github.com/minio/minio-go/v7/pkg/replication +github.com/minio/minio-go/v7/pkg/s3utils +github.com/minio/minio-go/v7/pkg/set +github.com/minio/minio-go/v7/pkg/signer +github.com/minio/minio-go/v7/pkg/sse +github.com/minio/minio-go/v7/pkg/tags +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -127,7 +292,80 @@ github.com/mwitkow/go-conntrack ## explicit; go 1.22.3 github.com/netobserv/flowlogs-pipeline/pkg/api github.com/netobserv/flowlogs-pipeline/pkg/config +github.com/netobserv/flowlogs-pipeline/pkg/operational +github.com/netobserv/flowlogs-pipeline/pkg/pipeline +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/decode +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/metrics +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/encode/opentelemetry +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/aggregate +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/conntrack +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/extract/timebased +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/ingest +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/cni +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/kubernetes/informers +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/location +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/transform/netdb +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/utils +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc +github.com/netobserv/flowlogs-pipeline/pkg/pipeline/write/grpc/genericmap +github.com/netobserv/flowlogs-pipeline/pkg/prometheus +github.com/netobserv/flowlogs-pipeline/pkg/server github.com/netobserv/flowlogs-pipeline/pkg/utils +github.com/netobserv/flowlogs-pipeline/pkg/utils/filters +# github.com/netobserv/gopipes v0.3.0 +## explicit; go 1.18 +github.com/netobserv/gopipes/pkg/node +github.com/netobserv/gopipes/pkg/node/internal/connect +# github.com/netobserv/loki-client-go v0.0.0-20220927092034-f37122a54500 +## explicit; go 1.15 +github.com/netobserv/loki-client-go/loki +github.com/netobserv/loki-client-go/pkg/backoff +github.com/netobserv/loki-client-go/pkg/helpers +github.com/netobserv/loki-client-go/pkg/labelutil +github.com/netobserv/loki-client-go/pkg/logproto +github.com/netobserv/loki-client-go/pkg/metric +github.com/netobserv/loki-client-go/pkg/urlutil +# github.com/netobserv/netobserv-ebpf-agent v1.6.1-crc2.0.20241008130234-a20397fb8f88 => github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b +## explicit; go 1.22.3 +github.com/netobserv/netobserv-ebpf-agent/pkg/agent +github.com/netobserv/netobserv-ebpf-agent/pkg/decode +github.com/netobserv/netobserv-ebpf-agent/pkg/decode/packets +github.com/netobserv/netobserv-ebpf-agent/pkg/ebpf +github.com/netobserv/netobserv-ebpf-agent/pkg/exporter +github.com/netobserv/netobserv-ebpf-agent/pkg/flow +github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/flow +github.com/netobserv/netobserv-ebpf-agent/pkg/grpc/packet +github.com/netobserv/netobserv-ebpf-agent/pkg/ifaces +github.com/netobserv/netobserv-ebpf-agent/pkg/kernel +github.com/netobserv/netobserv-ebpf-agent/pkg/metrics +github.com/netobserv/netobserv-ebpf-agent/pkg/model +github.com/netobserv/netobserv-ebpf-agent/pkg/pbflow +github.com/netobserv/netobserv-ebpf-agent/pkg/pbpacket +github.com/netobserv/netobserv-ebpf-agent/pkg/prometheus +github.com/netobserv/netobserv-ebpf-agent/pkg/tracer +github.com/netobserv/netobserv-ebpf-agent/pkg/utils +github.com/netobserv/netobserv-ebpf-agent/pkg/utils/packets +# github.com/netsampler/goflow2 v1.3.7 +## explicit; go 1.18 +github.com/netsampler/goflow2/decoders +github.com/netsampler/goflow2/decoders/netflow +github.com/netsampler/goflow2/decoders/netflow/templates +github.com/netsampler/goflow2/decoders/netflow/templates/memory +github.com/netsampler/goflow2/decoders/netflowlegacy +github.com/netsampler/goflow2/decoders/sflow +github.com/netsampler/goflow2/decoders/utils +github.com/netsampler/goflow2/format +github.com/netsampler/goflow2/format/common +github.com/netsampler/goflow2/format/protobuf +github.com/netsampler/goflow2/pb +github.com/netsampler/goflow2/producer +github.com/netsampler/goflow2/transport +github.com/netsampler/goflow2/utils # github.com/onsi/ginkgo/v2 v2.22.0 ## explicit; go 1.22.0 github.com/onsi/ginkgo/v2 @@ -172,6 +410,69 @@ github.com/openshift/api/operator/v1 github.com/openshift/api/operator/v1/zz_generated.crd-manifests github.com/openshift/api/security/v1 github.com/openshift/api/security/v1/zz_generated.crd-manifests +# github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 +## explicit; go 1.18 +github.com/ovn-org/libovsdb/cache +github.com/ovn-org/libovsdb/client +github.com/ovn-org/libovsdb/database +github.com/ovn-org/libovsdb/mapper +github.com/ovn-org/libovsdb/model +github.com/ovn-org/libovsdb/ovsdb +github.com/ovn-org/libovsdb/ovsdb/serverdb +github.com/ovn-org/libovsdb/updates +# github.com/ovn-org/ovn-kubernetes/go-controller v0.0.0-20241126140656-c95491e46334 +## explicit; go 1.22.0 +github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model +github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb +github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/sampledecoder +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb +github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types +# github.com/pierrec/lz4/v4 v4.1.17 +## explicit; go 1.14 +github.com/pierrec/lz4/v4 +github.com/pierrec/lz4/v4/internal/lz4block +github.com/pierrec/lz4/v4/internal/lz4errors +github.com/pierrec/lz4/v4/internal/lz4stream +github.com/pierrec/lz4/v4/internal/xxh32 +# github.com/pion/dtls/v2 v2.2.4 +## explicit; go 1.13 +github.com/pion/dtls/v2 +github.com/pion/dtls/v2/internal/ciphersuite +github.com/pion/dtls/v2/internal/ciphersuite/types +github.com/pion/dtls/v2/internal/closer +github.com/pion/dtls/v2/internal/util +github.com/pion/dtls/v2/pkg/crypto/ccm +github.com/pion/dtls/v2/pkg/crypto/ciphersuite +github.com/pion/dtls/v2/pkg/crypto/clientcertificate +github.com/pion/dtls/v2/pkg/crypto/elliptic +github.com/pion/dtls/v2/pkg/crypto/hash +github.com/pion/dtls/v2/pkg/crypto/prf +github.com/pion/dtls/v2/pkg/crypto/signature +github.com/pion/dtls/v2/pkg/crypto/signaturehash +github.com/pion/dtls/v2/pkg/protocol +github.com/pion/dtls/v2/pkg/protocol/alert +github.com/pion/dtls/v2/pkg/protocol/extension +github.com/pion/dtls/v2/pkg/protocol/handshake +github.com/pion/dtls/v2/pkg/protocol/recordlayer +# github.com/pion/logging v0.2.2 +## explicit; go 1.12 +github.com/pion/logging +# github.com/pion/transport/v2 v2.0.0 +## explicit; go 1.12 +github.com/pion/transport/v2/connctx +github.com/pion/transport/v2/deadline +github.com/pion/transport/v2/packetio +github.com/pion/transport/v2/replaydetector +# github.com/pion/udp v0.1.4 +## explicit; go 1.14 +github.com/pion/udp +github.com/pion/udp/pkg/sync # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -182,7 +483,7 @@ github.com/pmezard/go-difflib/difflib ## explicit; go 1.22.0 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 -# github.com/prometheus/client_golang v1.20.3 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -198,11 +499,86 @@ github.com/prometheus/client_model/go github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/model +github.com/prometheus/common/version # github.com/prometheus/procfs v0.15.1 ## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/prometheus/prometheus v1.8.2-0.20201028100903-3245b3267b24 +## explicit; go 1.14 +github.com/prometheus/prometheus/pkg/labels +github.com/prometheus/prometheus/pkg/value +github.com/prometheus/prometheus/promql/parser +github.com/prometheus/prometheus/storage +github.com/prometheus/prometheus/tsdb/chunkenc +github.com/prometheus/prometheus/tsdb/chunks +github.com/prometheus/prometheus/tsdb/errors +github.com/prometheus/prometheus/tsdb/fileutil +github.com/prometheus/prometheus/tsdb/tsdbutil +github.com/prometheus/prometheus/util/strutil +# github.com/rs/xid v1.6.0 +## explicit; go 1.16 +github.com/rs/xid +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 +# github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 +## explicit; go 1.16 +github.com/safchain/ethtool +# github.com/segmentio/kafka-go v0.4.47 +## explicit; go 1.15 +github.com/segmentio/kafka-go +github.com/segmentio/kafka-go/compress +github.com/segmentio/kafka-go/compress/gzip +github.com/segmentio/kafka-go/compress/lz4 +github.com/segmentio/kafka-go/compress/snappy +github.com/segmentio/kafka-go/compress/zstd +github.com/segmentio/kafka-go/protocol +github.com/segmentio/kafka-go/protocol/addoffsetstotxn +github.com/segmentio/kafka-go/protocol/addpartitionstotxn +github.com/segmentio/kafka-go/protocol/alterclientquotas +github.com/segmentio/kafka-go/protocol/alterconfigs +github.com/segmentio/kafka-go/protocol/alterpartitionreassignments +github.com/segmentio/kafka-go/protocol/alteruserscramcredentials +github.com/segmentio/kafka-go/protocol/apiversions +github.com/segmentio/kafka-go/protocol/consumer +github.com/segmentio/kafka-go/protocol/createacls +github.com/segmentio/kafka-go/protocol/createpartitions +github.com/segmentio/kafka-go/protocol/createtopics +github.com/segmentio/kafka-go/protocol/deleteacls +github.com/segmentio/kafka-go/protocol/deletegroups +github.com/segmentio/kafka-go/protocol/deletetopics +github.com/segmentio/kafka-go/protocol/describeacls +github.com/segmentio/kafka-go/protocol/describeclientquotas +github.com/segmentio/kafka-go/protocol/describeconfigs +github.com/segmentio/kafka-go/protocol/describegroups +github.com/segmentio/kafka-go/protocol/describeuserscramcredentials +github.com/segmentio/kafka-go/protocol/electleaders +github.com/segmentio/kafka-go/protocol/endtxn +github.com/segmentio/kafka-go/protocol/fetch +github.com/segmentio/kafka-go/protocol/findcoordinator +github.com/segmentio/kafka-go/protocol/heartbeat +github.com/segmentio/kafka-go/protocol/incrementalalterconfigs +github.com/segmentio/kafka-go/protocol/initproducerid +github.com/segmentio/kafka-go/protocol/joingroup +github.com/segmentio/kafka-go/protocol/leavegroup +github.com/segmentio/kafka-go/protocol/listgroups +github.com/segmentio/kafka-go/protocol/listoffsets +github.com/segmentio/kafka-go/protocol/listpartitionreassignments +github.com/segmentio/kafka-go/protocol/metadata +github.com/segmentio/kafka-go/protocol/offsetcommit +github.com/segmentio/kafka-go/protocol/offsetdelete +github.com/segmentio/kafka-go/protocol/offsetfetch +github.com/segmentio/kafka-go/protocol/produce +github.com/segmentio/kafka-go/protocol/rawproduce +github.com/segmentio/kafka-go/protocol/saslauthenticate +github.com/segmentio/kafka-go/protocol/saslhandshake +github.com/segmentio/kafka-go/protocol/syncgroup +github.com/segmentio/kafka-go/protocol/txnoffsetcommit +github.com/segmentio/kafka-go/sasl +github.com/segmentio/kafka-go/sasl/plain +github.com/segmentio/kafka-go/sasl/scram # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -216,9 +592,122 @@ github.com/stretchr/objx ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock +# github.com/urfave/cli/v2 v2.27.2 +## explicit; go 1.18 +github.com/urfave/cli/v2 +# github.com/vishvananda/netlink v1.3.0 +## explicit; go 1.12 +github.com/vishvananda/netlink +github.com/vishvananda/netlink/nl +# github.com/vishvananda/netns v0.0.4 +## explicit; go 1.17 +github.com/vishvananda/netns +# github.com/vmware/go-ipfix v0.9.0 +## explicit; go 1.21 +github.com/vmware/go-ipfix/pkg/entities +github.com/vmware/go-ipfix/pkg/exporter +github.com/vmware/go-ipfix/pkg/registry # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 +# github.com/xdg-go/pbkdf2 v1.0.0 +## explicit; go 1.9 +github.com/xdg-go/pbkdf2 +# github.com/xdg-go/scram v1.1.2 +## explicit; go 1.11 +github.com/xdg-go/scram +# github.com/xdg-go/stringprep v1.0.4 +## explicit; go 1.11 +github.com/xdg-go/stringprep +# github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 +## explicit; go 1.15.0 +github.com/xrash/smetrics +# go.opentelemetry.io/otel v1.31.0 +## explicit; go 1.22 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.21.0 +go.opentelemetry.io/otel/semconv/v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/exporters/otlp/otlptrace +go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry +# go.opentelemetry.io/otel/metric v1.31.0 +## explicit; go 1.22 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +go.opentelemetry.io/otel/metric/noop +# go.opentelemetry.io/otel/sdk v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/internal/x +go.opentelemetry.io/otel/sdk/resource +go.opentelemetry.io/otel/sdk/trace +# go.opentelemetry.io/otel/sdk/metric v1.29.0 +## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/internal +go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/exemplar +go.opentelemetry.io/otel/sdk/metric/internal/x +go.opentelemetry.io/otel/sdk/metric/metricdata +# go.opentelemetry.io/otel/trace v1.31.0 +## explicit; go 1.22 +go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/noop +# go.opentelemetry.io/proto/otlp v1.3.1 +## explicit; go 1.17 +go.opentelemetry.io/proto/otlp/collector/logs/v1 +go.opentelemetry.io/proto/otlp/collector/metrics/v1 +go.opentelemetry.io/proto/otlp/collector/trace/v1 +go.opentelemetry.io/proto/otlp/common/v1 +go.opentelemetry.io/proto/otlp/logs/v1 +go.opentelemetry.io/proto/otlp/metrics/v1 +go.opentelemetry.io/proto/otlp/resource/v1 +go.opentelemetry.io/proto/otlp/trace/v1 +# go.uber.org/atomic v1.9.0 +## explicit; go 1.13 +go.uber.org/atomic # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr @@ -233,8 +722,16 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore +# golang.org/x/crypto v0.28.0 +## explicit; go 1.20 +golang.org/x/crypto/argon2 +golang.org/x/crypto/blake2b +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/curve25519 # golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 +golang.org/x/exp/constraints golang.org/x/exp/maps # golang.org/x/net v0.30.0 ## explicit; go 1.18 @@ -248,6 +745,7 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries +golang.org/x/net/publicsuffix golang.org/x/net/trace # golang.org/x/oauth2 v0.22.0 ## explicit; go 1.18 @@ -256,9 +754,11 @@ golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal # golang.org/x/sys v0.26.0 ## explicit; go 1.18 +golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/term v0.25.0 ## explicit; go 1.18 golang.org/x/term @@ -294,9 +794,80 @@ golang.org/x/tools/go/ast/inspector # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 +# google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 +google.golang.org/genproto/googleapis/api/httpbody +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd +## explicit; go 1.21 +google.golang.org/genproto/googleapis/rpc/errdetails +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.67.1 +## explicit; go 1.21 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats +google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal +google.golang.org/grpc/health/grpc_health_v1 +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats +google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/keepalive +google.golang.org/grpc/mem +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 +google.golang.org/grpc/reflection/grpc_reflection_v1alpha +google.golang.org/grpc/reflection/internal +google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.35.1 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -305,6 +876,7 @@ google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -320,6 +892,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry @@ -329,13 +902,28 @@ google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/gofeaturespb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/fieldmaskpb +google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/evanphx/json-patch.v4 v4.12.0 ## explicit gopkg.in/evanphx/json-patch.v4 +# gopkg.in/gcfg.v1 v1.2.3 +## explicit +gopkg.in/gcfg.v1 +gopkg.in/gcfg.v1/scanner +gopkg.in/gcfg.v1/token +gopkg.in/gcfg.v1/types # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 +# gopkg.in/natefinch/lumberjack.v2 v2.2.1 +## explicit; go 1.13 +gopkg.in/natefinch/lumberjack.v2 +# gopkg.in/warnings.v0 v0.1.2 +## explicit +gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 @@ -762,6 +1350,8 @@ k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1beta1 k8s.io/client-go/listers/storagemigration/v1alpha1 k8s.io/client-go/metadata +k8s.io/client-go/metadata/metadatainformer +k8s.io/client-go/metadata/metadatalister k8s.io/client-go/openapi k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install @@ -815,6 +1405,8 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler +k8s.io/klog/v2/internal/verbosity +k8s.io/klog/v2/textlogger # k8s.io/kube-aggregator v0.31.2 ## explicit; go 1.22.0 k8s.io/kube-aggregator/pkg/apis/apiregistration @@ -835,12 +1427,16 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing +k8s.io/utils/exec k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace +# lukechampine.com/uint128 v1.2.0 +## explicit; go 1.12 +lukechampine.com/uint128 # sigs.k8s.io/controller-runtime v0.19.3 ## explicit; go 1.22.0 sigs.k8s.io/controller-runtime @@ -908,3 +1504,4 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 +# github.com/netobserv/netobserv-ebpf-agent => github.com/msherif1234/netobserv-ebpf-agent v0.0.0-20241213142900-5c07db3a6c0b